]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * scsi_lib.c Copyright (C) 1999 Eric Youngdale | |
3 | * | |
4 | * SCSI queueing library. | |
5 | * Initial versions: Eric Youngdale (eric@andante.org). | |
6 | * Based upon conversations with large numbers | |
7 | * of people at Linux Expo. | |
8 | */ | |
9 | ||
10 | #include <linux/bio.h> | |
11 | #include <linux/blkdev.h> | |
12 | #include <linux/completion.h> | |
13 | #include <linux/kernel.h> | |
14 | #include <linux/mempool.h> | |
15 | #include <linux/slab.h> | |
16 | #include <linux/init.h> | |
17 | #include <linux/pci.h> | |
18 | #include <linux/delay.h> | |
faead26d | 19 | #include <linux/hardirq.h> |
c6132da1 | 20 | #include <linux/scatterlist.h> |
1da177e4 LT |
21 | |
22 | #include <scsi/scsi.h> | |
beb40487 | 23 | #include <scsi/scsi_cmnd.h> |
1da177e4 LT |
24 | #include <scsi/scsi_dbg.h> |
25 | #include <scsi/scsi_device.h> | |
26 | #include <scsi/scsi_driver.h> | |
27 | #include <scsi/scsi_eh.h> | |
28 | #include <scsi/scsi_host.h> | |
1da177e4 LT |
29 | |
30 | #include "scsi_priv.h" | |
31 | #include "scsi_logging.h" | |
32 | ||
33 | ||
6391a113 | 34 | #define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools) |
5972511b | 35 | #define SG_MEMPOOL_SIZE 2 |
1da177e4 LT |
36 | |
37 | struct scsi_host_sg_pool { | |
38 | size_t size; | |
a8474ce2 | 39 | char *name; |
e18b890b | 40 | struct kmem_cache *slab; |
1da177e4 LT |
41 | mempool_t *pool; |
42 | }; | |
43 | ||
a8474ce2 | 44 | #define SP(x) { x, "sgpool-" #x } |
52c1da39 | 45 | static struct scsi_host_sg_pool scsi_sg_pools[] = { |
1da177e4 LT |
46 | SP(8), |
47 | SP(16), | |
48 | SP(32), | |
1da177e4 | 49 | SP(64), |
1da177e4 | 50 | SP(128), |
a8474ce2 | 51 | }; |
1da177e4 LT |
52 | #undef SP |
53 | ||
a1bf9d1d | 54 | static void scsi_run_queue(struct request_queue *q); |
e91442b6 JB |
55 | |
56 | /* | |
57 | * Function: scsi_unprep_request() | |
58 | * | |
59 | * Purpose: Remove all preparation done for a request, including its | |
60 | * associated scsi_cmnd, so that it can be requeued. | |
61 | * | |
62 | * Arguments: req - request to unprepare | |
63 | * | |
64 | * Lock status: Assumed that no locks are held upon entry. | |
65 | * | |
66 | * Returns: Nothing. | |
67 | */ | |
68 | static void scsi_unprep_request(struct request *req) | |
69 | { | |
70 | struct scsi_cmnd *cmd = req->special; | |
71 | ||
4aff5e23 | 72 | req->cmd_flags &= ~REQ_DONTPREP; |
beb40487 | 73 | req->special = NULL; |
e91442b6 | 74 | |
e91442b6 JB |
75 | scsi_put_command(cmd); |
76 | } | |
a1bf9d1d | 77 | |
1da177e4 LT |
78 | /* |
79 | * Function: scsi_queue_insert() | |
80 | * | |
81 | * Purpose: Insert a command in the midlevel queue. | |
82 | * | |
83 | * Arguments: cmd - command that we are adding to queue. | |
84 | * reason - why we are inserting command to queue. | |
85 | * | |
86 | * Lock status: Assumed that lock is not held upon entry. | |
87 | * | |
88 | * Returns: Nothing. | |
89 | * | |
90 | * Notes: We do this for one of two cases. Either the host is busy | |
91 | * and it cannot accept any more commands for the time being, | |
92 | * or the device returned QUEUE_FULL and can accept no more | |
93 | * commands. | |
94 | * Notes: This could be called either from an interrupt context or a | |
95 | * normal process context. | |
96 | */ | |
97 | int scsi_queue_insert(struct scsi_cmnd *cmd, int reason) | |
98 | { | |
99 | struct Scsi_Host *host = cmd->device->host; | |
100 | struct scsi_device *device = cmd->device; | |
a1bf9d1d TH |
101 | struct request_queue *q = device->request_queue; |
102 | unsigned long flags; | |
1da177e4 LT |
103 | |
104 | SCSI_LOG_MLQUEUE(1, | |
105 | printk("Inserting command %p into mlqueue\n", cmd)); | |
106 | ||
107 | /* | |
d8c37e7b | 108 | * Set the appropriate busy bit for the device/host. |
1da177e4 LT |
109 | * |
110 | * If the host/device isn't busy, assume that something actually | |
111 | * completed, and that we should be able to queue a command now. | |
112 | * | |
113 | * Note that the prior mid-layer assumption that any host could | |
114 | * always queue at least one command is now broken. The mid-layer | |
115 | * will implement a user specifiable stall (see | |
116 | * scsi_host.max_host_blocked and scsi_device.max_device_blocked) | |
117 | * if a command is requeued with no other commands outstanding | |
118 | * either for the device or for the host. | |
119 | */ | |
120 | if (reason == SCSI_MLQUEUE_HOST_BUSY) | |
121 | host->host_blocked = host->max_host_blocked; | |
122 | else if (reason == SCSI_MLQUEUE_DEVICE_BUSY) | |
123 | device->device_blocked = device->max_device_blocked; | |
124 | ||
1da177e4 LT |
125 | /* |
126 | * Decrement the counters, since these commands are no longer | |
127 | * active on the host/device. | |
128 | */ | |
129 | scsi_device_unbusy(device); | |
130 | ||
131 | /* | |
a1bf9d1d TH |
132 | * Requeue this command. It will go before all other commands |
133 | * that are already in the queue. | |
1da177e4 LT |
134 | * |
135 | * NOTE: there is magic here about the way the queue is plugged if | |
136 | * we have no outstanding commands. | |
137 | * | |
a1bf9d1d | 138 | * Although we *don't* plug the queue, we call the request |
1da177e4 LT |
139 | * function. The SCSI request function detects the blocked condition |
140 | * and plugs the queue appropriately. | |
a1bf9d1d TH |
141 | */ |
142 | spin_lock_irqsave(q->queue_lock, flags); | |
59897dad | 143 | blk_requeue_request(q, cmd->request); |
a1bf9d1d TH |
144 | spin_unlock_irqrestore(q->queue_lock, flags); |
145 | ||
146 | scsi_run_queue(q); | |
147 | ||
1da177e4 LT |
148 | return 0; |
149 | } | |
150 | ||
39216033 | 151 | /** |
33aa687d | 152 | * scsi_execute - insert request and wait for the result |
39216033 JB |
153 | * @sdev: scsi device |
154 | * @cmd: scsi command | |
155 | * @data_direction: data direction | |
156 | * @buffer: data buffer | |
157 | * @bufflen: len of buffer | |
158 | * @sense: optional sense buffer | |
159 | * @timeout: request timeout in seconds | |
160 | * @retries: number of times to retry request | |
33aa687d | 161 | * @flags: or into request flags; |
39216033 | 162 | * |
59c51591 | 163 | * returns the req->errors value which is the scsi_cmnd result |
ea73a9f2 | 164 | * field. |
39216033 | 165 | **/ |
33aa687d JB |
166 | int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd, |
167 | int data_direction, void *buffer, unsigned bufflen, | |
168 | unsigned char *sense, int timeout, int retries, int flags) | |
39216033 JB |
169 | { |
170 | struct request *req; | |
171 | int write = (data_direction == DMA_TO_DEVICE); | |
172 | int ret = DRIVER_ERROR << 24; | |
173 | ||
174 | req = blk_get_request(sdev->request_queue, write, __GFP_WAIT); | |
175 | ||
176 | if (bufflen && blk_rq_map_kern(sdev->request_queue, req, | |
177 | buffer, bufflen, __GFP_WAIT)) | |
178 | goto out; | |
179 | ||
180 | req->cmd_len = COMMAND_SIZE(cmd[0]); | |
181 | memcpy(req->cmd, cmd, req->cmd_len); | |
182 | req->sense = sense; | |
183 | req->sense_len = 0; | |
17e01f21 | 184 | req->retries = retries; |
39216033 | 185 | req->timeout = timeout; |
4aff5e23 JA |
186 | req->cmd_type = REQ_TYPE_BLOCK_PC; |
187 | req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT; | |
39216033 JB |
188 | |
189 | /* | |
190 | * head injection *required* here otherwise quiesce won't work | |
191 | */ | |
192 | blk_execute_rq(req->q, NULL, req, 1); | |
193 | ||
194 | ret = req->errors; | |
195 | out: | |
196 | blk_put_request(req); | |
197 | ||
198 | return ret; | |
199 | } | |
33aa687d | 200 | EXPORT_SYMBOL(scsi_execute); |
39216033 | 201 | |
ea73a9f2 JB |
202 | |
203 | int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd, | |
204 | int data_direction, void *buffer, unsigned bufflen, | |
205 | struct scsi_sense_hdr *sshdr, int timeout, int retries) | |
206 | { | |
207 | char *sense = NULL; | |
1ccb48bb AM |
208 | int result; |
209 | ||
ea73a9f2 | 210 | if (sshdr) { |
24669f75 | 211 | sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO); |
ea73a9f2 JB |
212 | if (!sense) |
213 | return DRIVER_ERROR << 24; | |
ea73a9f2 | 214 | } |
1ccb48bb | 215 | result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen, |
24669f75 | 216 | sense, timeout, retries, 0); |
ea73a9f2 | 217 | if (sshdr) |
e514385b | 218 | scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr); |
ea73a9f2 JB |
219 | |
220 | kfree(sense); | |
221 | return result; | |
222 | } | |
223 | EXPORT_SYMBOL(scsi_execute_req); | |
224 | ||
6e68af66 MC |
225 | struct scsi_io_context { |
226 | void *data; | |
227 | void (*done)(void *data, char *sense, int result, int resid); | |
228 | char sense[SCSI_SENSE_BUFFERSIZE]; | |
229 | }; | |
230 | ||
e18b890b | 231 | static struct kmem_cache *scsi_io_context_cache; |
aa7b5cd7 | 232 | |
e650c305 | 233 | static void scsi_end_async(struct request *req, int uptodate) |
6e68af66 MC |
234 | { |
235 | struct scsi_io_context *sioc = req->end_io_data; | |
236 | ||
237 | if (sioc->done) | |
238 | sioc->done(sioc->data, sioc->sense, req->errors, req->data_len); | |
239 | ||
aa7b5cd7 | 240 | kmem_cache_free(scsi_io_context_cache, sioc); |
6e68af66 MC |
241 | __blk_put_request(req->q, req); |
242 | } | |
243 | ||
244 | static int scsi_merge_bio(struct request *rq, struct bio *bio) | |
245 | { | |
246 | struct request_queue *q = rq->q; | |
247 | ||
248 | bio->bi_flags &= ~(1 << BIO_SEG_VALID); | |
249 | if (rq_data_dir(rq) == WRITE) | |
250 | bio->bi_rw |= (1 << BIO_RW); | |
251 | blk_queue_bounce(q, &bio); | |
252 | ||
3001ca77 | 253 | return blk_rq_append_bio(q, rq, bio); |
6e68af66 MC |
254 | } |
255 | ||
6712ecf8 | 256 | static void scsi_bi_endio(struct bio *bio, int error) |
6e68af66 | 257 | { |
6e68af66 | 258 | bio_put(bio); |
6e68af66 MC |
259 | } |
260 | ||
261 | /** | |
262 | * scsi_req_map_sg - map a scatterlist into a request | |
263 | * @rq: request to fill | |
264 | * @sg: scatterlist | |
265 | * @nsegs: number of elements | |
266 | * @bufflen: len of buffer | |
267 | * @gfp: memory allocation flags | |
268 | * | |
269 | * scsi_req_map_sg maps a scatterlist into a request so that the | |
270 | * request can be sent to the block layer. We do not trust the scatterlist | |
271 | * sent to use, as some ULDs use that struct to only organize the pages. | |
272 | */ | |
273 | static int scsi_req_map_sg(struct request *rq, struct scatterlist *sgl, | |
274 | int nsegs, unsigned bufflen, gfp_t gfp) | |
275 | { | |
276 | struct request_queue *q = rq->q; | |
f5235962 | 277 | int nr_pages = (bufflen + sgl[0].offset + PAGE_SIZE - 1) >> PAGE_SHIFT; |
bd441dea | 278 | unsigned int data_len = bufflen, len, bytes, off; |
c6132da1 | 279 | struct scatterlist *sg; |
6e68af66 MC |
280 | struct page *page; |
281 | struct bio *bio = NULL; | |
282 | int i, err, nr_vecs = 0; | |
283 | ||
c6132da1 JA |
284 | for_each_sg(sgl, sg, nsegs, i) { |
285 | page = sg->page; | |
286 | off = sg->offset; | |
287 | len = sg->length; | |
288 | data_len += len; | |
6e68af66 | 289 | |
bd441dea MC |
290 | while (len > 0 && data_len > 0) { |
291 | /* | |
292 | * sg sends a scatterlist that is larger than | |
293 | * the data_len it wants transferred for certain | |
294 | * IO sizes | |
295 | */ | |
6e68af66 | 296 | bytes = min_t(unsigned int, len, PAGE_SIZE - off); |
bd441dea | 297 | bytes = min(bytes, data_len); |
6e68af66 MC |
298 | |
299 | if (!bio) { | |
300 | nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages); | |
301 | nr_pages -= nr_vecs; | |
302 | ||
303 | bio = bio_alloc(gfp, nr_vecs); | |
304 | if (!bio) { | |
305 | err = -ENOMEM; | |
306 | goto free_bios; | |
307 | } | |
308 | bio->bi_end_io = scsi_bi_endio; | |
309 | } | |
310 | ||
311 | if (bio_add_pc_page(q, bio, page, bytes, off) != | |
312 | bytes) { | |
313 | bio_put(bio); | |
314 | err = -EINVAL; | |
315 | goto free_bios; | |
316 | } | |
317 | ||
318 | if (bio->bi_vcnt >= nr_vecs) { | |
319 | err = scsi_merge_bio(rq, bio); | |
320 | if (err) { | |
6712ecf8 | 321 | bio_endio(bio, 0); |
6e68af66 MC |
322 | goto free_bios; |
323 | } | |
324 | bio = NULL; | |
325 | } | |
326 | ||
327 | page++; | |
328 | len -= bytes; | |
bd441dea | 329 | data_len -=bytes; |
6e68af66 MC |
330 | off = 0; |
331 | } | |
332 | } | |
333 | ||
334 | rq->buffer = rq->data = NULL; | |
bd441dea | 335 | rq->data_len = bufflen; |
6e68af66 MC |
336 | return 0; |
337 | ||
338 | free_bios: | |
339 | while ((bio = rq->bio) != NULL) { | |
340 | rq->bio = bio->bi_next; | |
341 | /* | |
342 | * call endio instead of bio_put incase it was bounced | |
343 | */ | |
6712ecf8 | 344 | bio_endio(bio, 0); |
6e68af66 MC |
345 | } |
346 | ||
347 | return err; | |
348 | } | |
349 | ||
350 | /** | |
351 | * scsi_execute_async - insert request | |
352 | * @sdev: scsi device | |
353 | * @cmd: scsi command | |
bb1d1073 | 354 | * @cmd_len: length of scsi cdb |
6e68af66 MC |
355 | * @data_direction: data direction |
356 | * @buffer: data buffer (this can be a kernel buffer or scatterlist) | |
357 | * @bufflen: len of buffer | |
358 | * @use_sg: if buffer is a scatterlist this is the number of elements | |
359 | * @timeout: request timeout in seconds | |
360 | * @retries: number of times to retry request | |
361 | * @flags: or into request flags | |
362 | **/ | |
363 | int scsi_execute_async(struct scsi_device *sdev, const unsigned char *cmd, | |
bb1d1073 | 364 | int cmd_len, int data_direction, void *buffer, unsigned bufflen, |
6e68af66 MC |
365 | int use_sg, int timeout, int retries, void *privdata, |
366 | void (*done)(void *, char *, int, int), gfp_t gfp) | |
367 | { | |
368 | struct request *req; | |
369 | struct scsi_io_context *sioc; | |
370 | int err = 0; | |
371 | int write = (data_direction == DMA_TO_DEVICE); | |
372 | ||
c3762229 | 373 | sioc = kmem_cache_zalloc(scsi_io_context_cache, gfp); |
6e68af66 MC |
374 | if (!sioc) |
375 | return DRIVER_ERROR << 24; | |
376 | ||
377 | req = blk_get_request(sdev->request_queue, write, gfp); | |
378 | if (!req) | |
379 | goto free_sense; | |
4aff5e23 JA |
380 | req->cmd_type = REQ_TYPE_BLOCK_PC; |
381 | req->cmd_flags |= REQ_QUIET; | |
6e68af66 MC |
382 | |
383 | if (use_sg) | |
384 | err = scsi_req_map_sg(req, buffer, use_sg, bufflen, gfp); | |
385 | else if (bufflen) | |
386 | err = blk_rq_map_kern(req->q, req, buffer, bufflen, gfp); | |
387 | ||
388 | if (err) | |
389 | goto free_req; | |
390 | ||
bb1d1073 | 391 | req->cmd_len = cmd_len; |
097b8457 | 392 | memset(req->cmd, 0, BLK_MAX_CDB); /* ATAPI hates garbage after CDB */ |
6e68af66 MC |
393 | memcpy(req->cmd, cmd, req->cmd_len); |
394 | req->sense = sioc->sense; | |
395 | req->sense_len = 0; | |
396 | req->timeout = timeout; | |
17e01f21 | 397 | req->retries = retries; |
6e68af66 MC |
398 | req->end_io_data = sioc; |
399 | ||
400 | sioc->data = privdata; | |
401 | sioc->done = done; | |
402 | ||
403 | blk_execute_rq_nowait(req->q, NULL, req, 1, scsi_end_async); | |
404 | return 0; | |
405 | ||
406 | free_req: | |
407 | blk_put_request(req); | |
408 | free_sense: | |
6470f2ba | 409 | kmem_cache_free(scsi_io_context_cache, sioc); |
6e68af66 MC |
410 | return DRIVER_ERROR << 24; |
411 | } | |
412 | EXPORT_SYMBOL_GPL(scsi_execute_async); | |
413 | ||
1da177e4 LT |
414 | /* |
415 | * Function: scsi_init_cmd_errh() | |
416 | * | |
417 | * Purpose: Initialize cmd fields related to error handling. | |
418 | * | |
419 | * Arguments: cmd - command that is ready to be queued. | |
420 | * | |
1da177e4 LT |
421 | * Notes: This function has the job of initializing a number of |
422 | * fields related to error handling. Typically this will | |
423 | * be called once for each command, as required. | |
424 | */ | |
631c228c | 425 | static void scsi_init_cmd_errh(struct scsi_cmnd *cmd) |
1da177e4 | 426 | { |
1da177e4 | 427 | cmd->serial_number = 0; |
52aeeca9 | 428 | cmd->resid = 0; |
1da177e4 | 429 | memset(cmd->sense_buffer, 0, sizeof cmd->sense_buffer); |
1da177e4 LT |
430 | if (cmd->cmd_len == 0) |
431 | cmd->cmd_len = COMMAND_SIZE(cmd->cmnd[0]); | |
1da177e4 LT |
432 | } |
433 | ||
434 | void scsi_device_unbusy(struct scsi_device *sdev) | |
435 | { | |
436 | struct Scsi_Host *shost = sdev->host; | |
437 | unsigned long flags; | |
438 | ||
439 | spin_lock_irqsave(shost->host_lock, flags); | |
440 | shost->host_busy--; | |
939647ee | 441 | if (unlikely(scsi_host_in_recovery(shost) && |
ee7863bc | 442 | (shost->host_failed || shost->host_eh_scheduled))) |
1da177e4 LT |
443 | scsi_eh_wakeup(shost); |
444 | spin_unlock(shost->host_lock); | |
152587de | 445 | spin_lock(sdev->request_queue->queue_lock); |
1da177e4 | 446 | sdev->device_busy--; |
152587de | 447 | spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); |
1da177e4 LT |
448 | } |
449 | ||
450 | /* | |
451 | * Called for single_lun devices on IO completion. Clear starget_sdev_user, | |
452 | * and call blk_run_queue for all the scsi_devices on the target - | |
453 | * including current_sdev first. | |
454 | * | |
455 | * Called with *no* scsi locks held. | |
456 | */ | |
457 | static void scsi_single_lun_run(struct scsi_device *current_sdev) | |
458 | { | |
459 | struct Scsi_Host *shost = current_sdev->host; | |
460 | struct scsi_device *sdev, *tmp; | |
461 | struct scsi_target *starget = scsi_target(current_sdev); | |
462 | unsigned long flags; | |
463 | ||
464 | spin_lock_irqsave(shost->host_lock, flags); | |
465 | starget->starget_sdev_user = NULL; | |
466 | spin_unlock_irqrestore(shost->host_lock, flags); | |
467 | ||
468 | /* | |
469 | * Call blk_run_queue for all LUNs on the target, starting with | |
470 | * current_sdev. We race with others (to set starget_sdev_user), | |
471 | * but in most cases, we will be first. Ideally, each LU on the | |
472 | * target would get some limited time or requests on the target. | |
473 | */ | |
474 | blk_run_queue(current_sdev->request_queue); | |
475 | ||
476 | spin_lock_irqsave(shost->host_lock, flags); | |
477 | if (starget->starget_sdev_user) | |
478 | goto out; | |
479 | list_for_each_entry_safe(sdev, tmp, &starget->devices, | |
480 | same_target_siblings) { | |
481 | if (sdev == current_sdev) | |
482 | continue; | |
483 | if (scsi_device_get(sdev)) | |
484 | continue; | |
485 | ||
486 | spin_unlock_irqrestore(shost->host_lock, flags); | |
487 | blk_run_queue(sdev->request_queue); | |
488 | spin_lock_irqsave(shost->host_lock, flags); | |
489 | ||
490 | scsi_device_put(sdev); | |
491 | } | |
492 | out: | |
493 | spin_unlock_irqrestore(shost->host_lock, flags); | |
494 | } | |
495 | ||
496 | /* | |
497 | * Function: scsi_run_queue() | |
498 | * | |
499 | * Purpose: Select a proper request queue to serve next | |
500 | * | |
501 | * Arguments: q - last request's queue | |
502 | * | |
503 | * Returns: Nothing | |
504 | * | |
505 | * Notes: The previous command was completely finished, start | |
506 | * a new one if possible. | |
507 | */ | |
508 | static void scsi_run_queue(struct request_queue *q) | |
509 | { | |
510 | struct scsi_device *sdev = q->queuedata; | |
511 | struct Scsi_Host *shost = sdev->host; | |
512 | unsigned long flags; | |
513 | ||
514 | if (sdev->single_lun) | |
515 | scsi_single_lun_run(sdev); | |
516 | ||
517 | spin_lock_irqsave(shost->host_lock, flags); | |
518 | while (!list_empty(&shost->starved_list) && | |
519 | !shost->host_blocked && !shost->host_self_blocked && | |
520 | !((shost->can_queue > 0) && | |
521 | (shost->host_busy >= shost->can_queue))) { | |
522 | /* | |
523 | * As long as shost is accepting commands and we have | |
524 | * starved queues, call blk_run_queue. scsi_request_fn | |
525 | * drops the queue_lock and can add us back to the | |
526 | * starved_list. | |
527 | * | |
528 | * host_lock protects the starved_list and starved_entry. | |
529 | * scsi_request_fn must get the host_lock before checking | |
530 | * or modifying starved_list or starved_entry. | |
531 | */ | |
532 | sdev = list_entry(shost->starved_list.next, | |
533 | struct scsi_device, starved_entry); | |
534 | list_del_init(&sdev->starved_entry); | |
535 | spin_unlock_irqrestore(shost->host_lock, flags); | |
536 | ||
04846f25 AH |
537 | |
538 | if (test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) && | |
539 | !test_and_set_bit(QUEUE_FLAG_REENTER, | |
540 | &sdev->request_queue->queue_flags)) { | |
541 | blk_run_queue(sdev->request_queue); | |
542 | clear_bit(QUEUE_FLAG_REENTER, | |
543 | &sdev->request_queue->queue_flags); | |
544 | } else | |
545 | blk_run_queue(sdev->request_queue); | |
1da177e4 LT |
546 | |
547 | spin_lock_irqsave(shost->host_lock, flags); | |
548 | if (unlikely(!list_empty(&sdev->starved_entry))) | |
549 | /* | |
550 | * sdev lost a race, and was put back on the | |
551 | * starved list. This is unlikely but without this | |
552 | * in theory we could loop forever. | |
553 | */ | |
554 | break; | |
555 | } | |
556 | spin_unlock_irqrestore(shost->host_lock, flags); | |
557 | ||
558 | blk_run_queue(q); | |
559 | } | |
560 | ||
561 | /* | |
562 | * Function: scsi_requeue_command() | |
563 | * | |
564 | * Purpose: Handle post-processing of completed commands. | |
565 | * | |
566 | * Arguments: q - queue to operate on | |
567 | * cmd - command that may need to be requeued. | |
568 | * | |
569 | * Returns: Nothing | |
570 | * | |
571 | * Notes: After command completion, there may be blocks left | |
572 | * over which weren't finished by the previous command | |
573 | * this can be for a number of reasons - the main one is | |
574 | * I/O errors in the middle of the request, in which case | |
575 | * we need to request the blocks that come after the bad | |
576 | * sector. | |
e91442b6 | 577 | * Notes: Upon return, cmd is a stale pointer. |
1da177e4 LT |
578 | */ |
579 | static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd) | |
580 | { | |
e91442b6 | 581 | struct request *req = cmd->request; |
283369cc TH |
582 | unsigned long flags; |
583 | ||
e91442b6 | 584 | scsi_unprep_request(req); |
283369cc | 585 | spin_lock_irqsave(q->queue_lock, flags); |
e91442b6 | 586 | blk_requeue_request(q, req); |
283369cc | 587 | spin_unlock_irqrestore(q->queue_lock, flags); |
1da177e4 LT |
588 | |
589 | scsi_run_queue(q); | |
590 | } | |
591 | ||
592 | void scsi_next_command(struct scsi_cmnd *cmd) | |
593 | { | |
49d7bc64 LT |
594 | struct scsi_device *sdev = cmd->device; |
595 | struct request_queue *q = sdev->request_queue; | |
596 | ||
597 | /* need to hold a reference on the device before we let go of the cmd */ | |
598 | get_device(&sdev->sdev_gendev); | |
1da177e4 LT |
599 | |
600 | scsi_put_command(cmd); | |
601 | scsi_run_queue(q); | |
49d7bc64 LT |
602 | |
603 | /* ok to remove device now */ | |
604 | put_device(&sdev->sdev_gendev); | |
1da177e4 LT |
605 | } |
606 | ||
607 | void scsi_run_host_queues(struct Scsi_Host *shost) | |
608 | { | |
609 | struct scsi_device *sdev; | |
610 | ||
611 | shost_for_each_device(sdev, shost) | |
612 | scsi_run_queue(sdev->request_queue); | |
613 | } | |
614 | ||
615 | /* | |
616 | * Function: scsi_end_request() | |
617 | * | |
618 | * Purpose: Post-processing of completed commands (usually invoked at end | |
619 | * of upper level post-processing and scsi_io_completion). | |
620 | * | |
621 | * Arguments: cmd - command that is complete. | |
622 | * uptodate - 1 if I/O indicates success, <= 0 for I/O error. | |
623 | * bytes - number of bytes of completed I/O | |
624 | * requeue - indicates whether we should requeue leftovers. | |
625 | * | |
626 | * Lock status: Assumed that lock is not held upon entry. | |
627 | * | |
e91442b6 | 628 | * Returns: cmd if requeue required, NULL otherwise. |
1da177e4 LT |
629 | * |
630 | * Notes: This is called for block device requests in order to | |
631 | * mark some number of sectors as complete. | |
632 | * | |
633 | * We are guaranteeing that the request queue will be goosed | |
634 | * at some point during this call. | |
e91442b6 | 635 | * Notes: If cmd was requeued, upon return it will be a stale pointer. |
1da177e4 LT |
636 | */ |
637 | static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int uptodate, | |
638 | int bytes, int requeue) | |
639 | { | |
165125e1 | 640 | struct request_queue *q = cmd->device->request_queue; |
1da177e4 LT |
641 | struct request *req = cmd->request; |
642 | unsigned long flags; | |
643 | ||
644 | /* | |
645 | * If there are blocks left over at the end, set up the command | |
646 | * to queue the remainder of them. | |
647 | */ | |
648 | if (end_that_request_chunk(req, uptodate, bytes)) { | |
649 | int leftover = (req->hard_nr_sectors << 9); | |
650 | ||
651 | if (blk_pc_request(req)) | |
652 | leftover = req->data_len; | |
653 | ||
654 | /* kill remainder if no retrys */ | |
655 | if (!uptodate && blk_noretry_request(req)) | |
656 | end_that_request_chunk(req, 0, leftover); | |
657 | else { | |
e91442b6 | 658 | if (requeue) { |
1da177e4 LT |
659 | /* |
660 | * Bleah. Leftovers again. Stick the | |
661 | * leftovers in the front of the | |
662 | * queue, and goose the queue again. | |
663 | */ | |
664 | scsi_requeue_command(q, cmd); | |
e91442b6 JB |
665 | cmd = NULL; |
666 | } | |
1da177e4 LT |
667 | return cmd; |
668 | } | |
669 | } | |
670 | ||
671 | add_disk_randomness(req->rq_disk); | |
672 | ||
673 | spin_lock_irqsave(q->queue_lock, flags); | |
674 | if (blk_rq_tagged(req)) | |
675 | blk_queue_end_tag(q, req); | |
8ffdc655 | 676 | end_that_request_last(req, uptodate); |
1da177e4 LT |
677 | spin_unlock_irqrestore(q->queue_lock, flags); |
678 | ||
679 | /* | |
680 | * This will goose the queue request function at the end, so we don't | |
681 | * need to worry about launching another command. | |
682 | */ | |
683 | scsi_next_command(cmd); | |
684 | return NULL; | |
685 | } | |
686 | ||
a8474ce2 JA |
687 | /* |
688 | * The maximum number of SG segments that we will put inside a scatterlist | |
689 | * (unless chaining is used). Should ideally fit inside a single page, to | |
690 | * avoid a higher order allocation. | |
691 | */ | |
692 | #define SCSI_MAX_SG_SEGMENTS 128 | |
1da177e4 | 693 | |
a8474ce2 JA |
694 | /* |
695 | * Like SCSI_MAX_SG_SEGMENTS, but for archs that have sg chaining. This limit | |
696 | * is totally arbitrary, a setting of 2048 will get you at least 8mb ios. | |
697 | */ | |
698 | #define SCSI_MAX_SG_CHAIN_SEGMENTS 2048 | |
1da177e4 | 699 | |
a8474ce2 JA |
700 | static inline unsigned int scsi_sgtable_index(unsigned short nents) |
701 | { | |
702 | unsigned int index; | |
703 | ||
704 | switch (nents) { | |
1da177e4 | 705 | case 1 ... 8: |
a8474ce2 | 706 | index = 0; |
1da177e4 LT |
707 | break; |
708 | case 9 ... 16: | |
a8474ce2 | 709 | index = 1; |
1da177e4 LT |
710 | break; |
711 | case 17 ... 32: | |
a8474ce2 | 712 | index = 2; |
1da177e4 | 713 | break; |
1da177e4 | 714 | case 33 ... 64: |
a8474ce2 | 715 | index = 3; |
1da177e4 | 716 | break; |
a8474ce2 JA |
717 | case 65 ... SCSI_MAX_SG_SEGMENTS: |
718 | index = 4; | |
1da177e4 | 719 | break; |
1da177e4 | 720 | default: |
a8474ce2 JA |
721 | printk(KERN_ERR "scsi: bad segment count=%d\n", nents); |
722 | BUG(); | |
1da177e4 LT |
723 | } |
724 | ||
a8474ce2 JA |
725 | return index; |
726 | } | |
727 | ||
728 | struct scatterlist *scsi_alloc_sgtable(struct scsi_cmnd *cmd, gfp_t gfp_mask) | |
729 | { | |
730 | struct scsi_host_sg_pool *sgp; | |
731 | struct scatterlist *sgl, *prev, *ret; | |
732 | unsigned int index; | |
733 | int this, left; | |
734 | ||
735 | BUG_ON(!cmd->use_sg); | |
736 | ||
737 | left = cmd->use_sg; | |
738 | ret = prev = NULL; | |
739 | do { | |
740 | this = left; | |
741 | if (this > SCSI_MAX_SG_SEGMENTS) { | |
742 | this = SCSI_MAX_SG_SEGMENTS - 1; | |
743 | index = SG_MEMPOOL_NR - 1; | |
744 | } else | |
745 | index = scsi_sgtable_index(this); | |
746 | ||
747 | left -= this; | |
748 | ||
749 | sgp = scsi_sg_pools + index; | |
750 | ||
751 | sgl = mempool_alloc(sgp->pool, gfp_mask); | |
752 | if (unlikely(!sgl)) | |
753 | goto enomem; | |
754 | ||
755 | memset(sgl, 0, sizeof(*sgl) * sgp->size); | |
756 | ||
757 | /* | |
758 | * first loop through, set initial index and return value | |
759 | */ | |
760 | if (!ret) { | |
761 | cmd->sglist_len = index; | |
762 | ret = sgl; | |
763 | } | |
764 | ||
765 | /* | |
766 | * chain previous sglist, if any. we know the previous | |
767 | * sglist must be the biggest one, or we would not have | |
768 | * ended up doing another loop. | |
769 | */ | |
770 | if (prev) | |
771 | sg_chain(prev, SCSI_MAX_SG_SEGMENTS, sgl); | |
772 | ||
773 | /* | |
774 | * don't allow subsequent mempool allocs to sleep, it would | |
775 | * violate the mempool principle. | |
776 | */ | |
777 | gfp_mask &= ~__GFP_WAIT; | |
778 | gfp_mask |= __GFP_HIGH; | |
779 | prev = sgl; | |
780 | } while (left); | |
781 | ||
782 | /* | |
783 | * ->use_sg may get modified after dma mapping has potentially | |
784 | * shrunk the number of segments, so keep a copy of it for free. | |
785 | */ | |
786 | cmd->__use_sg = cmd->use_sg; | |
787 | return ret; | |
788 | enomem: | |
789 | if (ret) { | |
790 | /* | |
791 | * Free entries chained off ret. Since we were trying to | |
792 | * allocate another sglist, we know that all entries are of | |
793 | * the max size. | |
794 | */ | |
795 | sgp = scsi_sg_pools + SG_MEMPOOL_NR - 1; | |
796 | prev = ret; | |
797 | ret = &ret[SCSI_MAX_SG_SEGMENTS - 1]; | |
798 | ||
799 | while ((sgl = sg_chain_ptr(ret)) != NULL) { | |
800 | ret = &sgl[SCSI_MAX_SG_SEGMENTS - 1]; | |
801 | mempool_free(sgl, sgp->pool); | |
802 | } | |
803 | ||
804 | mempool_free(prev, sgp->pool); | |
805 | } | |
806 | return NULL; | |
1da177e4 LT |
807 | } |
808 | ||
b58d9154 FT |
809 | EXPORT_SYMBOL(scsi_alloc_sgtable); |
810 | ||
0cde8d95 | 811 | void scsi_free_sgtable(struct scsi_cmnd *cmd) |
1da177e4 | 812 | { |
0cde8d95 | 813 | struct scatterlist *sgl = cmd->request_buffer; |
1da177e4 LT |
814 | struct scsi_host_sg_pool *sgp; |
815 | ||
0cde8d95 | 816 | BUG_ON(cmd->sglist_len >= SG_MEMPOOL_NR); |
1da177e4 | 817 | |
a8474ce2 JA |
818 | /* |
819 | * if this is the biggest size sglist, check if we have | |
820 | * chained parts we need to free | |
821 | */ | |
822 | if (cmd->__use_sg > SCSI_MAX_SG_SEGMENTS) { | |
823 | unsigned short this, left; | |
824 | struct scatterlist *next; | |
825 | unsigned int index; | |
826 | ||
827 | left = cmd->__use_sg - (SCSI_MAX_SG_SEGMENTS - 1); | |
828 | next = sg_chain_ptr(&sgl[SCSI_MAX_SG_SEGMENTS - 1]); | |
829 | while (left && next) { | |
830 | sgl = next; | |
831 | this = left; | |
832 | if (this > SCSI_MAX_SG_SEGMENTS) { | |
833 | this = SCSI_MAX_SG_SEGMENTS - 1; | |
834 | index = SG_MEMPOOL_NR - 1; | |
835 | } else | |
836 | index = scsi_sgtable_index(this); | |
837 | ||
838 | left -= this; | |
839 | ||
840 | sgp = scsi_sg_pools + index; | |
841 | ||
842 | if (left) | |
843 | next = sg_chain_ptr(&sgl[sgp->size - 1]); | |
844 | ||
845 | mempool_free(sgl, sgp->pool); | |
846 | } | |
847 | ||
848 | /* | |
849 | * Restore original, will be freed below | |
850 | */ | |
851 | sgl = cmd->request_buffer; | |
852 | } | |
853 | ||
0cde8d95 | 854 | sgp = scsi_sg_pools + cmd->sglist_len; |
1da177e4 LT |
855 | mempool_free(sgl, sgp->pool); |
856 | } | |
857 | ||
b58d9154 FT |
858 | EXPORT_SYMBOL(scsi_free_sgtable); |
859 | ||
1da177e4 LT |
860 | /* |
861 | * Function: scsi_release_buffers() | |
862 | * | |
863 | * Purpose: Completion processing for block device I/O requests. | |
864 | * | |
865 | * Arguments: cmd - command that we are bailing. | |
866 | * | |
867 | * Lock status: Assumed that no lock is held upon entry. | |
868 | * | |
869 | * Returns: Nothing | |
870 | * | |
871 | * Notes: In the event that an upper level driver rejects a | |
872 | * command, we must release resources allocated during | |
873 | * the __init_io() function. Primarily this would involve | |
874 | * the scatter-gather table, and potentially any bounce | |
875 | * buffers. | |
876 | */ | |
877 | static void scsi_release_buffers(struct scsi_cmnd *cmd) | |
878 | { | |
1da177e4 | 879 | if (cmd->use_sg) |
0cde8d95 | 880 | scsi_free_sgtable(cmd); |
1da177e4 LT |
881 | |
882 | /* | |
883 | * Zero these out. They now point to freed memory, and it is | |
884 | * dangerous to hang onto the pointers. | |
885 | */ | |
1da177e4 LT |
886 | cmd->request_buffer = NULL; |
887 | cmd->request_bufflen = 0; | |
888 | } | |
889 | ||
890 | /* | |
891 | * Function: scsi_io_completion() | |
892 | * | |
893 | * Purpose: Completion processing for block device I/O requests. | |
894 | * | |
895 | * Arguments: cmd - command that is finished. | |
896 | * | |
897 | * Lock status: Assumed that no lock is held upon entry. | |
898 | * | |
899 | * Returns: Nothing | |
900 | * | |
901 | * Notes: This function is matched in terms of capabilities to | |
902 | * the function that created the scatter-gather list. | |
903 | * In other words, if there are no bounce buffers | |
904 | * (the normal case for most drivers), we don't need | |
905 | * the logic to deal with cleaning up afterwards. | |
906 | * | |
907 | * We must do one of several things here: | |
908 | * | |
909 | * a) Call scsi_end_request. This will finish off the | |
910 | * specified number of sectors. If we are done, the | |
911 | * command block will be released, and the queue | |
912 | * function will be goosed. If we are not done, then | |
913 | * scsi_end_request will directly goose the queue. | |
914 | * | |
915 | * b) We can just use scsi_requeue_command() here. This would | |
916 | * be used if we just wanted to retry, for example. | |
917 | */ | |
03aba2f7 | 918 | void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) |
1da177e4 LT |
919 | { |
920 | int result = cmd->result; | |
631c228c | 921 | int this_count = cmd->request_bufflen; |
165125e1 | 922 | struct request_queue *q = cmd->device->request_queue; |
1da177e4 LT |
923 | struct request *req = cmd->request; |
924 | int clear_errors = 1; | |
925 | struct scsi_sense_hdr sshdr; | |
926 | int sense_valid = 0; | |
927 | int sense_deferred = 0; | |
928 | ||
631c228c | 929 | scsi_release_buffers(cmd); |
1da177e4 LT |
930 | |
931 | if (result) { | |
932 | sense_valid = scsi_command_normalize_sense(cmd, &sshdr); | |
933 | if (sense_valid) | |
934 | sense_deferred = scsi_sense_is_deferred(&sshdr); | |
935 | } | |
631c228c | 936 | |
1da177e4 LT |
937 | if (blk_pc_request(req)) { /* SG_IO ioctl from block level */ |
938 | req->errors = result; | |
939 | if (result) { | |
940 | clear_errors = 0; | |
941 | if (sense_valid && req->sense) { | |
942 | /* | |
943 | * SG_IO wants current and deferred errors | |
944 | */ | |
945 | int len = 8 + cmd->sense_buffer[7]; | |
946 | ||
947 | if (len > SCSI_SENSE_BUFFERSIZE) | |
948 | len = SCSI_SENSE_BUFFERSIZE; | |
949 | memcpy(req->sense, cmd->sense_buffer, len); | |
950 | req->sense_len = len; | |
951 | } | |
b22f687d PW |
952 | } |
953 | req->data_len = cmd->resid; | |
1da177e4 LT |
954 | } |
955 | ||
1da177e4 LT |
956 | /* |
957 | * Next deal with any sectors which we were able to correctly | |
958 | * handle. | |
959 | */ | |
d6b0c537 JB |
960 | SCSI_LOG_HLCOMPLETE(1, printk("%ld sectors total, " |
961 | "%d bytes done.\n", | |
962 | req->nr_sectors, good_bytes)); | |
963 | SCSI_LOG_HLCOMPLETE(1, printk("use_sg is %d\n", cmd->use_sg)); | |
964 | ||
965 | if (clear_errors) | |
966 | req->errors = 0; | |
967 | ||
968 | /* A number of bytes were successfully read. If there | |
969 | * are leftovers and there is some kind of error | |
970 | * (result != 0), retry the rest. | |
971 | */ | |
972 | if (scsi_end_request(cmd, 1, good_bytes, result == 0) == NULL) | |
973 | return; | |
03aba2f7 LT |
974 | |
975 | /* good_bytes = 0, or (inclusive) there were leftovers and | |
976 | * result = 0, so scsi_end_request couldn't retry. | |
1da177e4 LT |
977 | */ |
978 | if (sense_valid && !sense_deferred) { | |
979 | switch (sshdr.sense_key) { | |
980 | case UNIT_ATTENTION: | |
981 | if (cmd->device->removable) { | |
03aba2f7 | 982 | /* Detected disc change. Set a bit |
1da177e4 LT |
983 | * and quietly refuse further access. |
984 | */ | |
985 | cmd->device->changed = 1; | |
03aba2f7 | 986 | scsi_end_request(cmd, 0, this_count, 1); |
1da177e4 LT |
987 | return; |
988 | } else { | |
03aba2f7 LT |
989 | /* Must have been a power glitch, or a |
990 | * bus reset. Could not have been a | |
991 | * media change, so we just retry the | |
992 | * request and see what happens. | |
993 | */ | |
1da177e4 LT |
994 | scsi_requeue_command(q, cmd); |
995 | return; | |
996 | } | |
997 | break; | |
998 | case ILLEGAL_REQUEST: | |
03aba2f7 LT |
999 | /* If we had an ILLEGAL REQUEST returned, then |
1000 | * we may have performed an unsupported | |
1001 | * command. The only thing this should be | |
1002 | * would be a ten byte read where only a six | |
1003 | * byte read was supported. Also, on a system | |
1004 | * where READ CAPACITY failed, we may have | |
1005 | * read past the end of the disk. | |
1006 | */ | |
26a68019 JA |
1007 | if ((cmd->device->use_10_for_rw && |
1008 | sshdr.asc == 0x20 && sshdr.ascq == 0x00) && | |
1da177e4 LT |
1009 | (cmd->cmnd[0] == READ_10 || |
1010 | cmd->cmnd[0] == WRITE_10)) { | |
1011 | cmd->device->use_10_for_rw = 0; | |
03aba2f7 LT |
1012 | /* This will cause a retry with a |
1013 | * 6-byte command. | |
1da177e4 LT |
1014 | */ |
1015 | scsi_requeue_command(q, cmd); | |
03aba2f7 | 1016 | return; |
1da177e4 | 1017 | } else { |
e91442b6 | 1018 | scsi_end_request(cmd, 0, this_count, 1); |
1da177e4 LT |
1019 | return; |
1020 | } | |
1021 | break; | |
1022 | case NOT_READY: | |
03aba2f7 | 1023 | /* If the device is in the process of becoming |
f3e93f73 | 1024 | * ready, or has a temporary blockage, retry. |
1da177e4 | 1025 | */ |
f3e93f73 JB |
1026 | if (sshdr.asc == 0x04) { |
1027 | switch (sshdr.ascq) { | |
1028 | case 0x01: /* becoming ready */ | |
1029 | case 0x04: /* format in progress */ | |
1030 | case 0x05: /* rebuild in progress */ | |
1031 | case 0x06: /* recalculation in progress */ | |
1032 | case 0x07: /* operation in progress */ | |
1033 | case 0x08: /* Long write in progress */ | |
1034 | case 0x09: /* self test in progress */ | |
1035 | scsi_requeue_command(q, cmd); | |
1036 | return; | |
1037 | default: | |
1038 | break; | |
1039 | } | |
1da177e4 | 1040 | } |
311b581e JB |
1041 | if (!(req->cmd_flags & REQ_QUIET)) |
1042 | scsi_cmd_print_sense_hdr(cmd, | |
1043 | "Device not ready", | |
1044 | &sshdr); | |
1045 | ||
e91442b6 | 1046 | scsi_end_request(cmd, 0, this_count, 1); |
1da177e4 LT |
1047 | return; |
1048 | case VOLUME_OVERFLOW: | |
4aff5e23 | 1049 | if (!(req->cmd_flags & REQ_QUIET)) { |
3bf743e7 | 1050 | scmd_printk(KERN_INFO, cmd, |
03aba2f7 | 1051 | "Volume overflow, CDB: "); |
631c228c | 1052 | __scsi_print_command(cmd->cmnd); |
3173d8c3 JB |
1053 | scsi_print_sense("", cmd); |
1054 | } | |
03aba2f7 LT |
1055 | /* See SSC3rXX or current. */ |
1056 | scsi_end_request(cmd, 0, this_count, 1); | |
1da177e4 LT |
1057 | return; |
1058 | default: | |
1059 | break; | |
1060 | } | |
03aba2f7 | 1061 | } |
1da177e4 | 1062 | if (host_byte(result) == DID_RESET) { |
03aba2f7 LT |
1063 | /* Third party bus reset or reset for error recovery |
1064 | * reasons. Just retry the request and see what | |
1065 | * happens. | |
1da177e4 LT |
1066 | */ |
1067 | scsi_requeue_command(q, cmd); | |
1068 | return; | |
1069 | } | |
1070 | if (result) { | |
4aff5e23 | 1071 | if (!(req->cmd_flags & REQ_QUIET)) { |
a4d04a4c | 1072 | scsi_print_result(cmd); |
3173d8c3 JB |
1073 | if (driver_byte(result) & DRIVER_SENSE) |
1074 | scsi_print_sense("", cmd); | |
1075 | } | |
1da177e4 | 1076 | } |
03aba2f7 | 1077 | scsi_end_request(cmd, 0, this_count, !result); |
1da177e4 | 1078 | } |
1da177e4 LT |
1079 | |
1080 | /* | |
1081 | * Function: scsi_init_io() | |
1082 | * | |
1083 | * Purpose: SCSI I/O initialize function. | |
1084 | * | |
1085 | * Arguments: cmd - Command descriptor we wish to initialize | |
1086 | * | |
1087 | * Returns: 0 on success | |
1088 | * BLKPREP_DEFER if the failure is retryable | |
1089 | * BLKPREP_KILL if the failure is fatal | |
1090 | */ | |
1091 | static int scsi_init_io(struct scsi_cmnd *cmd) | |
1092 | { | |
1093 | struct request *req = cmd->request; | |
1da177e4 LT |
1094 | int count; |
1095 | ||
1096 | /* | |
3b003157 | 1097 | * We used to not use scatter-gather for single segment request, |
1da177e4 LT |
1098 | * but now we do (it makes highmem I/O easier to support without |
1099 | * kmapping pages) | |
1100 | */ | |
1101 | cmd->use_sg = req->nr_phys_segments; | |
1102 | ||
1103 | /* | |
3b003157 | 1104 | * If sg table allocation fails, requeue request later. |
1da177e4 | 1105 | */ |
a8474ce2 JA |
1106 | cmd->request_buffer = scsi_alloc_sgtable(cmd, GFP_ATOMIC); |
1107 | if (unlikely(!cmd->request_buffer)) { | |
7c72ce81 | 1108 | scsi_unprep_request(req); |
1da177e4 | 1109 | return BLKPREP_DEFER; |
7c72ce81 | 1110 | } |
1da177e4 | 1111 | |
3b003157 | 1112 | req->buffer = NULL; |
1da177e4 LT |
1113 | if (blk_pc_request(req)) |
1114 | cmd->request_bufflen = req->data_len; | |
3b003157 CH |
1115 | else |
1116 | cmd->request_bufflen = req->nr_sectors << 9; | |
1da177e4 LT |
1117 | |
1118 | /* | |
1119 | * Next, walk the list, and fill in the addresses and sizes of | |
1120 | * each segment. | |
1121 | */ | |
1122 | count = blk_rq_map_sg(req->q, req, cmd->request_buffer); | |
1da177e4 LT |
1123 | if (likely(count <= cmd->use_sg)) { |
1124 | cmd->use_sg = count; | |
3b003157 | 1125 | return BLKPREP_OK; |
1da177e4 LT |
1126 | } |
1127 | ||
1128 | printk(KERN_ERR "Incorrect number of segments after building list\n"); | |
1129 | printk(KERN_ERR "counted %d, received %d\n", count, cmd->use_sg); | |
1130 | printk(KERN_ERR "req nr_sec %lu, cur_nr_sec %u\n", req->nr_sectors, | |
1131 | req->current_nr_sectors); | |
1132 | ||
1da177e4 LT |
1133 | return BLKPREP_KILL; |
1134 | } | |
1135 | ||
3b003157 CH |
1136 | static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev, |
1137 | struct request *req) | |
1138 | { | |
1139 | struct scsi_cmnd *cmd; | |
1140 | ||
1141 | if (!req->special) { | |
1142 | cmd = scsi_get_command(sdev, GFP_ATOMIC); | |
1143 | if (unlikely(!cmd)) | |
1144 | return NULL; | |
1145 | req->special = cmd; | |
1146 | } else { | |
1147 | cmd = req->special; | |
1148 | } | |
1149 | ||
1150 | /* pull a tag out of the request if we have one */ | |
1151 | cmd->tag = req->tag; | |
1152 | cmd->request = req; | |
1153 | ||
1154 | return cmd; | |
1155 | } | |
1156 | ||
7f9a6bc4 | 1157 | int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req) |
7b16318d | 1158 | { |
3b003157 | 1159 | struct scsi_cmnd *cmd; |
7f9a6bc4 JB |
1160 | int ret = scsi_prep_state_check(sdev, req); |
1161 | ||
1162 | if (ret != BLKPREP_OK) | |
1163 | return ret; | |
3b003157 CH |
1164 | |
1165 | cmd = scsi_get_cmd_from_req(sdev, req); | |
1166 | if (unlikely(!cmd)) | |
1167 | return BLKPREP_DEFER; | |
1168 | ||
1169 | /* | |
1170 | * BLOCK_PC requests may transfer data, in which case they must | |
1171 | * a bio attached to them. Or they might contain a SCSI command | |
1172 | * that does not transfer data, in which case they may optionally | |
1173 | * submit a request without an attached bio. | |
1174 | */ | |
1175 | if (req->bio) { | |
1176 | int ret; | |
1177 | ||
1178 | BUG_ON(!req->nr_phys_segments); | |
1179 | ||
1180 | ret = scsi_init_io(cmd); | |
1181 | if (unlikely(ret)) | |
1182 | return ret; | |
1183 | } else { | |
1184 | BUG_ON(req->data_len); | |
1185 | BUG_ON(req->data); | |
1186 | ||
1187 | cmd->request_bufflen = 0; | |
1188 | cmd->request_buffer = NULL; | |
1189 | cmd->use_sg = 0; | |
1190 | req->buffer = NULL; | |
1191 | } | |
7b16318d | 1192 | |
46c43db1 | 1193 | BUILD_BUG_ON(sizeof(req->cmd) > sizeof(cmd->cmnd)); |
7b16318d JB |
1194 | memcpy(cmd->cmnd, req->cmd, sizeof(cmd->cmnd)); |
1195 | cmd->cmd_len = req->cmd_len; | |
1196 | if (!req->data_len) | |
1197 | cmd->sc_data_direction = DMA_NONE; | |
1198 | else if (rq_data_dir(req) == WRITE) | |
1199 | cmd->sc_data_direction = DMA_TO_DEVICE; | |
1200 | else | |
1201 | cmd->sc_data_direction = DMA_FROM_DEVICE; | |
1202 | ||
1203 | cmd->transfersize = req->data_len; | |
1204 | cmd->allowed = req->retries; | |
1205 | cmd->timeout_per_command = req->timeout; | |
3b003157 | 1206 | return BLKPREP_OK; |
7b16318d | 1207 | } |
7f9a6bc4 | 1208 | EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd); |
7b16318d | 1209 | |
3b003157 CH |
1210 | /* |
1211 | * Setup a REQ_TYPE_FS command. These are simple read/write request | |
1212 | * from filesystems that still need to be translated to SCSI CDBs from | |
1213 | * the ULD. | |
1214 | */ | |
7f9a6bc4 | 1215 | int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req) |
1da177e4 | 1216 | { |
1da177e4 | 1217 | struct scsi_cmnd *cmd; |
7f9a6bc4 | 1218 | int ret = scsi_prep_state_check(sdev, req); |
1da177e4 | 1219 | |
7f9a6bc4 JB |
1220 | if (ret != BLKPREP_OK) |
1221 | return ret; | |
1da177e4 | 1222 | /* |
3b003157 | 1223 | * Filesystem requests must transfer data. |
1da177e4 | 1224 | */ |
3b003157 CH |
1225 | BUG_ON(!req->nr_phys_segments); |
1226 | ||
1227 | cmd = scsi_get_cmd_from_req(sdev, req); | |
1228 | if (unlikely(!cmd)) | |
1229 | return BLKPREP_DEFER; | |
1230 | ||
7f9a6bc4 | 1231 | return scsi_init_io(cmd); |
3b003157 | 1232 | } |
7f9a6bc4 | 1233 | EXPORT_SYMBOL(scsi_setup_fs_cmnd); |
3b003157 | 1234 | |
7f9a6bc4 | 1235 | int scsi_prep_state_check(struct scsi_device *sdev, struct request *req) |
3b003157 | 1236 | { |
3b003157 CH |
1237 | int ret = BLKPREP_OK; |
1238 | ||
1da177e4 | 1239 | /* |
3b003157 CH |
1240 | * If the device is not in running state we will reject some |
1241 | * or all commands. | |
1da177e4 | 1242 | */ |
3b003157 CH |
1243 | if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { |
1244 | switch (sdev->sdev_state) { | |
1245 | case SDEV_OFFLINE: | |
1246 | /* | |
1247 | * If the device is offline we refuse to process any | |
1248 | * commands. The device must be brought online | |
1249 | * before trying any recovery commands. | |
1250 | */ | |
1251 | sdev_printk(KERN_ERR, sdev, | |
1252 | "rejecting I/O to offline device\n"); | |
1253 | ret = BLKPREP_KILL; | |
1254 | break; | |
1255 | case SDEV_DEL: | |
1256 | /* | |
1257 | * If the device is fully deleted, we refuse to | |
1258 | * process any commands as well. | |
1259 | */ | |
9ccfc756 | 1260 | sdev_printk(KERN_ERR, sdev, |
3b003157 CH |
1261 | "rejecting I/O to dead device\n"); |
1262 | ret = BLKPREP_KILL; | |
1263 | break; | |
1264 | case SDEV_QUIESCE: | |
1265 | case SDEV_BLOCK: | |
1266 | /* | |
1267 | * If the devices is blocked we defer normal commands. | |
1268 | */ | |
1269 | if (!(req->cmd_flags & REQ_PREEMPT)) | |
1270 | ret = BLKPREP_DEFER; | |
1271 | break; | |
1272 | default: | |
1273 | /* | |
1274 | * For any other not fully online state we only allow | |
1275 | * special commands. In particular any user initiated | |
1276 | * command is not allowed. | |
1277 | */ | |
1278 | if (!(req->cmd_flags & REQ_PREEMPT)) | |
1279 | ret = BLKPREP_KILL; | |
1280 | break; | |
1da177e4 | 1281 | } |
1da177e4 | 1282 | } |
7f9a6bc4 JB |
1283 | return ret; |
1284 | } | |
1285 | EXPORT_SYMBOL(scsi_prep_state_check); | |
1da177e4 | 1286 | |
7f9a6bc4 JB |
1287 | int scsi_prep_return(struct request_queue *q, struct request *req, int ret) |
1288 | { | |
1289 | struct scsi_device *sdev = q->queuedata; | |
1da177e4 | 1290 | |
3b003157 CH |
1291 | switch (ret) { |
1292 | case BLKPREP_KILL: | |
1293 | req->errors = DID_NO_CONNECT << 16; | |
7f9a6bc4 JB |
1294 | /* release the command and kill it */ |
1295 | if (req->special) { | |
1296 | struct scsi_cmnd *cmd = req->special; | |
1297 | scsi_release_buffers(cmd); | |
1298 | scsi_put_command(cmd); | |
1299 | req->special = NULL; | |
1300 | } | |
3b003157 CH |
1301 | break; |
1302 | case BLKPREP_DEFER: | |
1da177e4 | 1303 | /* |
3b003157 CH |
1304 | * If we defer, the elv_next_request() returns NULL, but the |
1305 | * queue must be restarted, so we plug here if no returning | |
1306 | * command will automatically do that. | |
1da177e4 | 1307 | */ |
3b003157 CH |
1308 | if (sdev->device_busy == 0) |
1309 | blk_plug_device(q); | |
1310 | break; | |
1311 | default: | |
1312 | req->cmd_flags |= REQ_DONTPREP; | |
1da177e4 LT |
1313 | } |
1314 | ||
3b003157 | 1315 | return ret; |
1da177e4 | 1316 | } |
7f9a6bc4 JB |
1317 | EXPORT_SYMBOL(scsi_prep_return); |
1318 | ||
1319 | static int scsi_prep_fn(struct request_queue *q, struct request *req) | |
1320 | { | |
1321 | struct scsi_device *sdev = q->queuedata; | |
1322 | int ret = BLKPREP_KILL; | |
1323 | ||
1324 | if (req->cmd_type == REQ_TYPE_BLOCK_PC) | |
1325 | ret = scsi_setup_blk_pc_cmnd(sdev, req); | |
1326 | return scsi_prep_return(q, req, ret); | |
1327 | } | |
1da177e4 LT |
1328 | |
1329 | /* | |
1330 | * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else | |
1331 | * return 0. | |
1332 | * | |
1333 | * Called with the queue_lock held. | |
1334 | */ | |
1335 | static inline int scsi_dev_queue_ready(struct request_queue *q, | |
1336 | struct scsi_device *sdev) | |
1337 | { | |
1338 | if (sdev->device_busy >= sdev->queue_depth) | |
1339 | return 0; | |
1340 | if (sdev->device_busy == 0 && sdev->device_blocked) { | |
1341 | /* | |
1342 | * unblock after device_blocked iterates to zero | |
1343 | */ | |
1344 | if (--sdev->device_blocked == 0) { | |
1345 | SCSI_LOG_MLQUEUE(3, | |
9ccfc756 JB |
1346 | sdev_printk(KERN_INFO, sdev, |
1347 | "unblocking device at zero depth\n")); | |
1da177e4 LT |
1348 | } else { |
1349 | blk_plug_device(q); | |
1350 | return 0; | |
1351 | } | |
1352 | } | |
1353 | if (sdev->device_blocked) | |
1354 | return 0; | |
1355 | ||
1356 | return 1; | |
1357 | } | |
1358 | ||
1359 | /* | |
1360 | * scsi_host_queue_ready: if we can send requests to shost, return 1 else | |
1361 | * return 0. We must end up running the queue again whenever 0 is | |
1362 | * returned, else IO can hang. | |
1363 | * | |
1364 | * Called with host_lock held. | |
1365 | */ | |
1366 | static inline int scsi_host_queue_ready(struct request_queue *q, | |
1367 | struct Scsi_Host *shost, | |
1368 | struct scsi_device *sdev) | |
1369 | { | |
939647ee | 1370 | if (scsi_host_in_recovery(shost)) |
1da177e4 LT |
1371 | return 0; |
1372 | if (shost->host_busy == 0 && shost->host_blocked) { | |
1373 | /* | |
1374 | * unblock after host_blocked iterates to zero | |
1375 | */ | |
1376 | if (--shost->host_blocked == 0) { | |
1377 | SCSI_LOG_MLQUEUE(3, | |
1378 | printk("scsi%d unblocking host at zero depth\n", | |
1379 | shost->host_no)); | |
1380 | } else { | |
1381 | blk_plug_device(q); | |
1382 | return 0; | |
1383 | } | |
1384 | } | |
1385 | if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) || | |
1386 | shost->host_blocked || shost->host_self_blocked) { | |
1387 | if (list_empty(&sdev->starved_entry)) | |
1388 | list_add_tail(&sdev->starved_entry, &shost->starved_list); | |
1389 | return 0; | |
1390 | } | |
1391 | ||
1392 | /* We're OK to process the command, so we can't be starved */ | |
1393 | if (!list_empty(&sdev->starved_entry)) | |
1394 | list_del_init(&sdev->starved_entry); | |
1395 | ||
1396 | return 1; | |
1397 | } | |
1398 | ||
1399 | /* | |
e91442b6 | 1400 | * Kill a request for a dead device |
1da177e4 | 1401 | */ |
165125e1 | 1402 | static void scsi_kill_request(struct request *req, struct request_queue *q) |
1da177e4 | 1403 | { |
e91442b6 | 1404 | struct scsi_cmnd *cmd = req->special; |
e36e0c80 TH |
1405 | struct scsi_device *sdev = cmd->device; |
1406 | struct Scsi_Host *shost = sdev->host; | |
1da177e4 | 1407 | |
788ce43a JB |
1408 | blkdev_dequeue_request(req); |
1409 | ||
e91442b6 JB |
1410 | if (unlikely(cmd == NULL)) { |
1411 | printk(KERN_CRIT "impossible request in %s.\n", | |
1412 | __FUNCTION__); | |
1413 | BUG(); | |
1da177e4 | 1414 | } |
e91442b6 JB |
1415 | |
1416 | scsi_init_cmd_errh(cmd); | |
1417 | cmd->result = DID_NO_CONNECT << 16; | |
1418 | atomic_inc(&cmd->device->iorequest_cnt); | |
e36e0c80 TH |
1419 | |
1420 | /* | |
1421 | * SCSI request completion path will do scsi_device_unbusy(), | |
1422 | * bump busy counts. To bump the counters, we need to dance | |
1423 | * with the locks as normal issue path does. | |
1424 | */ | |
1425 | sdev->device_busy++; | |
1426 | spin_unlock(sdev->request_queue->queue_lock); | |
1427 | spin_lock(shost->host_lock); | |
1428 | shost->host_busy++; | |
1429 | spin_unlock(shost->host_lock); | |
1430 | spin_lock(sdev->request_queue->queue_lock); | |
1431 | ||
e91442b6 | 1432 | __scsi_done(cmd); |
1da177e4 LT |
1433 | } |
1434 | ||
1aea6434 JA |
1435 | static void scsi_softirq_done(struct request *rq) |
1436 | { | |
1437 | struct scsi_cmnd *cmd = rq->completion_data; | |
8884efab | 1438 | unsigned long wait_for = (cmd->allowed + 1) * cmd->timeout_per_command; |
1aea6434 JA |
1439 | int disposition; |
1440 | ||
1441 | INIT_LIST_HEAD(&cmd->eh_entry); | |
1442 | ||
1443 | disposition = scsi_decide_disposition(cmd); | |
1444 | if (disposition != SUCCESS && | |
1445 | time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { | |
1446 | sdev_printk(KERN_ERR, cmd->device, | |
1447 | "timing out command, waited %lus\n", | |
1448 | wait_for/HZ); | |
1449 | disposition = SUCCESS; | |
1450 | } | |
1451 | ||
1452 | scsi_log_completion(cmd, disposition); | |
1453 | ||
1454 | switch (disposition) { | |
1455 | case SUCCESS: | |
1456 | scsi_finish_command(cmd); | |
1457 | break; | |
1458 | case NEEDS_RETRY: | |
596f482a | 1459 | scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY); |
1aea6434 JA |
1460 | break; |
1461 | case ADD_TO_MLQUEUE: | |
1462 | scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); | |
1463 | break; | |
1464 | default: | |
1465 | if (!scsi_eh_scmd_add(cmd, 0)) | |
1466 | scsi_finish_command(cmd); | |
1467 | } | |
1468 | } | |
1469 | ||
1da177e4 LT |
1470 | /* |
1471 | * Function: scsi_request_fn() | |
1472 | * | |
1473 | * Purpose: Main strategy routine for SCSI. | |
1474 | * | |
1475 | * Arguments: q - Pointer to actual queue. | |
1476 | * | |
1477 | * Returns: Nothing | |
1478 | * | |
1479 | * Lock status: IO request lock assumed to be held when called. | |
1480 | */ | |
1481 | static void scsi_request_fn(struct request_queue *q) | |
1482 | { | |
1483 | struct scsi_device *sdev = q->queuedata; | |
1484 | struct Scsi_Host *shost; | |
1485 | struct scsi_cmnd *cmd; | |
1486 | struct request *req; | |
1487 | ||
1488 | if (!sdev) { | |
1489 | printk("scsi: killing requests for dead queue\n"); | |
e91442b6 JB |
1490 | while ((req = elv_next_request(q)) != NULL) |
1491 | scsi_kill_request(req, q); | |
1da177e4 LT |
1492 | return; |
1493 | } | |
1494 | ||
1495 | if(!get_device(&sdev->sdev_gendev)) | |
1496 | /* We must be tearing the block queue down already */ | |
1497 | return; | |
1498 | ||
1499 | /* | |
1500 | * To start with, we keep looping until the queue is empty, or until | |
1501 | * the host is no longer able to accept any more requests. | |
1502 | */ | |
1503 | shost = sdev->host; | |
1504 | while (!blk_queue_plugged(q)) { | |
1505 | int rtn; | |
1506 | /* | |
1507 | * get next queueable request. We do this early to make sure | |
1508 | * that the request is fully prepared even if we cannot | |
1509 | * accept it. | |
1510 | */ | |
1511 | req = elv_next_request(q); | |
1512 | if (!req || !scsi_dev_queue_ready(q, sdev)) | |
1513 | break; | |
1514 | ||
1515 | if (unlikely(!scsi_device_online(sdev))) { | |
9ccfc756 JB |
1516 | sdev_printk(KERN_ERR, sdev, |
1517 | "rejecting I/O to offline device\n"); | |
e91442b6 | 1518 | scsi_kill_request(req, q); |
1da177e4 LT |
1519 | continue; |
1520 | } | |
1521 | ||
1522 | ||
1523 | /* | |
1524 | * Remove the request from the request list. | |
1525 | */ | |
1526 | if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req))) | |
1527 | blkdev_dequeue_request(req); | |
1528 | sdev->device_busy++; | |
1529 | ||
1530 | spin_unlock(q->queue_lock); | |
e91442b6 JB |
1531 | cmd = req->special; |
1532 | if (unlikely(cmd == NULL)) { | |
1533 | printk(KERN_CRIT "impossible request in %s.\n" | |
1534 | "please mail a stack trace to " | |
4aff5e23 | 1535 | "linux-scsi@vger.kernel.org\n", |
e91442b6 | 1536 | __FUNCTION__); |
4aff5e23 | 1537 | blk_dump_rq_flags(req, "foo"); |
e91442b6 JB |
1538 | BUG(); |
1539 | } | |
1da177e4 LT |
1540 | spin_lock(shost->host_lock); |
1541 | ||
1542 | if (!scsi_host_queue_ready(q, shost, sdev)) | |
1543 | goto not_ready; | |
1544 | if (sdev->single_lun) { | |
1545 | if (scsi_target(sdev)->starget_sdev_user && | |
1546 | scsi_target(sdev)->starget_sdev_user != sdev) | |
1547 | goto not_ready; | |
1548 | scsi_target(sdev)->starget_sdev_user = sdev; | |
1549 | } | |
1550 | shost->host_busy++; | |
1551 | ||
1552 | /* | |
1553 | * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will | |
1554 | * take the lock again. | |
1555 | */ | |
1556 | spin_unlock_irq(shost->host_lock); | |
1557 | ||
1da177e4 LT |
1558 | /* |
1559 | * Finally, initialize any error handling parameters, and set up | |
1560 | * the timers for timeouts. | |
1561 | */ | |
1562 | scsi_init_cmd_errh(cmd); | |
1563 | ||
1564 | /* | |
1565 | * Dispatch the command to the low-level driver. | |
1566 | */ | |
1567 | rtn = scsi_dispatch_cmd(cmd); | |
1568 | spin_lock_irq(q->queue_lock); | |
1569 | if(rtn) { | |
1570 | /* we're refusing the command; because of | |
1571 | * the way locks get dropped, we need to | |
1572 | * check here if plugging is required */ | |
1573 | if(sdev->device_busy == 0) | |
1574 | blk_plug_device(q); | |
1575 | ||
1576 | break; | |
1577 | } | |
1578 | } | |
1579 | ||
1580 | goto out; | |
1581 | ||
1582 | not_ready: | |
1583 | spin_unlock_irq(shost->host_lock); | |
1584 | ||
1585 | /* | |
1586 | * lock q, handle tag, requeue req, and decrement device_busy. We | |
1587 | * must return with queue_lock held. | |
1588 | * | |
1589 | * Decrementing device_busy without checking it is OK, as all such | |
1590 | * cases (host limits or settings) should run the queue at some | |
1591 | * later time. | |
1592 | */ | |
1593 | spin_lock_irq(q->queue_lock); | |
1594 | blk_requeue_request(q, req); | |
1595 | sdev->device_busy--; | |
1596 | if(sdev->device_busy == 0) | |
1597 | blk_plug_device(q); | |
1598 | out: | |
1599 | /* must be careful here...if we trigger the ->remove() function | |
1600 | * we cannot be holding the q lock */ | |
1601 | spin_unlock_irq(q->queue_lock); | |
1602 | put_device(&sdev->sdev_gendev); | |
1603 | spin_lock_irq(q->queue_lock); | |
1604 | } | |
1605 | ||
1606 | u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost) | |
1607 | { | |
1608 | struct device *host_dev; | |
1609 | u64 bounce_limit = 0xffffffff; | |
1610 | ||
1611 | if (shost->unchecked_isa_dma) | |
1612 | return BLK_BOUNCE_ISA; | |
1613 | /* | |
1614 | * Platforms with virtual-DMA translation | |
1615 | * hardware have no practical limit. | |
1616 | */ | |
1617 | if (!PCI_DMA_BUS_IS_PHYS) | |
1618 | return BLK_BOUNCE_ANY; | |
1619 | ||
1620 | host_dev = scsi_get_device(shost); | |
1621 | if (host_dev && host_dev->dma_mask) | |
1622 | bounce_limit = *host_dev->dma_mask; | |
1623 | ||
1624 | return bounce_limit; | |
1625 | } | |
1626 | EXPORT_SYMBOL(scsi_calculate_bounce_limit); | |
1627 | ||
b58d9154 FT |
1628 | struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost, |
1629 | request_fn_proc *request_fn) | |
1da177e4 | 1630 | { |
1da177e4 LT |
1631 | struct request_queue *q; |
1632 | ||
b58d9154 | 1633 | q = blk_init_queue(request_fn, NULL); |
1da177e4 LT |
1634 | if (!q) |
1635 | return NULL; | |
1636 | ||
a8474ce2 JA |
1637 | /* |
1638 | * this limit is imposed by hardware restrictions | |
1639 | */ | |
1da177e4 | 1640 | blk_queue_max_hw_segments(q, shost->sg_tablesize); |
a8474ce2 JA |
1641 | |
1642 | /* | |
1643 | * In the future, sg chaining support will be mandatory and this | |
1644 | * ifdef can then go away. Right now we don't have all archs | |
1645 | * converted, so better keep it safe. | |
1646 | */ | |
1647 | #ifdef ARCH_HAS_SG_CHAIN | |
9cb83c75 FT |
1648 | if (shost->use_sg_chaining) |
1649 | blk_queue_max_phys_segments(q, SCSI_MAX_SG_CHAIN_SEGMENTS); | |
1650 | else | |
1651 | blk_queue_max_phys_segments(q, SCSI_MAX_SG_SEGMENTS); | |
a8474ce2 JA |
1652 | #else |
1653 | blk_queue_max_phys_segments(q, SCSI_MAX_SG_SEGMENTS); | |
1654 | #endif | |
1655 | ||
1da177e4 LT |
1656 | blk_queue_max_sectors(q, shost->max_sectors); |
1657 | blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost)); | |
1658 | blk_queue_segment_boundary(q, shost->dma_boundary); | |
1da177e4 | 1659 | |
1da177e4 LT |
1660 | if (!shost->use_clustering) |
1661 | clear_bit(QUEUE_FLAG_CLUSTER, &q->queue_flags); | |
1662 | return q; | |
1663 | } | |
b58d9154 FT |
1664 | EXPORT_SYMBOL(__scsi_alloc_queue); |
1665 | ||
1666 | struct request_queue *scsi_alloc_queue(struct scsi_device *sdev) | |
1667 | { | |
1668 | struct request_queue *q; | |
1669 | ||
1670 | q = __scsi_alloc_queue(sdev->host, scsi_request_fn); | |
1671 | if (!q) | |
1672 | return NULL; | |
1673 | ||
1674 | blk_queue_prep_rq(q, scsi_prep_fn); | |
b58d9154 FT |
1675 | blk_queue_softirq_done(q, scsi_softirq_done); |
1676 | return q; | |
1677 | } | |
1da177e4 LT |
1678 | |
1679 | void scsi_free_queue(struct request_queue *q) | |
1680 | { | |
1681 | blk_cleanup_queue(q); | |
1682 | } | |
1683 | ||
1684 | /* | |
1685 | * Function: scsi_block_requests() | |
1686 | * | |
1687 | * Purpose: Utility function used by low-level drivers to prevent further | |
1688 | * commands from being queued to the device. | |
1689 | * | |
1690 | * Arguments: shost - Host in question | |
1691 | * | |
1692 | * Returns: Nothing | |
1693 | * | |
1694 | * Lock status: No locks are assumed held. | |
1695 | * | |
1696 | * Notes: There is no timer nor any other means by which the requests | |
1697 | * get unblocked other than the low-level driver calling | |
1698 | * scsi_unblock_requests(). | |
1699 | */ | |
1700 | void scsi_block_requests(struct Scsi_Host *shost) | |
1701 | { | |
1702 | shost->host_self_blocked = 1; | |
1703 | } | |
1704 | EXPORT_SYMBOL(scsi_block_requests); | |
1705 | ||
1706 | /* | |
1707 | * Function: scsi_unblock_requests() | |
1708 | * | |
1709 | * Purpose: Utility function used by low-level drivers to allow further | |
1710 | * commands from being queued to the device. | |
1711 | * | |
1712 | * Arguments: shost - Host in question | |
1713 | * | |
1714 | * Returns: Nothing | |
1715 | * | |
1716 | * Lock status: No locks are assumed held. | |
1717 | * | |
1718 | * Notes: There is no timer nor any other means by which the requests | |
1719 | * get unblocked other than the low-level driver calling | |
1720 | * scsi_unblock_requests(). | |
1721 | * | |
1722 | * This is done as an API function so that changes to the | |
1723 | * internals of the scsi mid-layer won't require wholesale | |
1724 | * changes to drivers that use this feature. | |
1725 | */ | |
1726 | void scsi_unblock_requests(struct Scsi_Host *shost) | |
1727 | { | |
1728 | shost->host_self_blocked = 0; | |
1729 | scsi_run_host_queues(shost); | |
1730 | } | |
1731 | EXPORT_SYMBOL(scsi_unblock_requests); | |
1732 | ||
1733 | int __init scsi_init_queue(void) | |
1734 | { | |
1735 | int i; | |
1736 | ||
aa7b5cd7 MC |
1737 | scsi_io_context_cache = kmem_cache_create("scsi_io_context", |
1738 | sizeof(struct scsi_io_context), | |
20c2df83 | 1739 | 0, 0, NULL); |
aa7b5cd7 MC |
1740 | if (!scsi_io_context_cache) { |
1741 | printk(KERN_ERR "SCSI: can't init scsi io context cache\n"); | |
1742 | return -ENOMEM; | |
1743 | } | |
1744 | ||
1da177e4 LT |
1745 | for (i = 0; i < SG_MEMPOOL_NR; i++) { |
1746 | struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; | |
1747 | int size = sgp->size * sizeof(struct scatterlist); | |
1748 | ||
1749 | sgp->slab = kmem_cache_create(sgp->name, size, 0, | |
20c2df83 | 1750 | SLAB_HWCACHE_ALIGN, NULL); |
1da177e4 LT |
1751 | if (!sgp->slab) { |
1752 | printk(KERN_ERR "SCSI: can't init sg slab %s\n", | |
1753 | sgp->name); | |
1754 | } | |
1755 | ||
93d2341c MD |
1756 | sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE, |
1757 | sgp->slab); | |
1da177e4 LT |
1758 | if (!sgp->pool) { |
1759 | printk(KERN_ERR "SCSI: can't init sg mempool %s\n", | |
1760 | sgp->name); | |
1761 | } | |
1762 | } | |
1763 | ||
1764 | return 0; | |
1765 | } | |
1766 | ||
1767 | void scsi_exit_queue(void) | |
1768 | { | |
1769 | int i; | |
1770 | ||
aa7b5cd7 MC |
1771 | kmem_cache_destroy(scsi_io_context_cache); |
1772 | ||
1da177e4 LT |
1773 | for (i = 0; i < SG_MEMPOOL_NR; i++) { |
1774 | struct scsi_host_sg_pool *sgp = scsi_sg_pools + i; | |
1775 | mempool_destroy(sgp->pool); | |
1776 | kmem_cache_destroy(sgp->slab); | |
1777 | } | |
1778 | } | |
5baba830 JB |
1779 | |
1780 | /** | |
1781 | * scsi_mode_select - issue a mode select | |
1782 | * @sdev: SCSI device to be queried | |
1783 | * @pf: Page format bit (1 == standard, 0 == vendor specific) | |
1784 | * @sp: Save page bit (0 == don't save, 1 == save) | |
1785 | * @modepage: mode page being requested | |
1786 | * @buffer: request buffer (may not be smaller than eight bytes) | |
1787 | * @len: length of request buffer. | |
1788 | * @timeout: command timeout | |
1789 | * @retries: number of retries before failing | |
1790 | * @data: returns a structure abstracting the mode header data | |
1791 | * @sense: place to put sense data (or NULL if no sense to be collected). | |
1792 | * must be SCSI_SENSE_BUFFERSIZE big. | |
1793 | * | |
1794 | * Returns zero if successful; negative error number or scsi | |
1795 | * status on error | |
1796 | * | |
1797 | */ | |
1798 | int | |
1799 | scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage, | |
1800 | unsigned char *buffer, int len, int timeout, int retries, | |
1801 | struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) | |
1802 | { | |
1803 | unsigned char cmd[10]; | |
1804 | unsigned char *real_buffer; | |
1805 | int ret; | |
1806 | ||
1807 | memset(cmd, 0, sizeof(cmd)); | |
1808 | cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0); | |
1809 | ||
1810 | if (sdev->use_10_for_ms) { | |
1811 | if (len > 65535) | |
1812 | return -EINVAL; | |
1813 | real_buffer = kmalloc(8 + len, GFP_KERNEL); | |
1814 | if (!real_buffer) | |
1815 | return -ENOMEM; | |
1816 | memcpy(real_buffer + 8, buffer, len); | |
1817 | len += 8; | |
1818 | real_buffer[0] = 0; | |
1819 | real_buffer[1] = 0; | |
1820 | real_buffer[2] = data->medium_type; | |
1821 | real_buffer[3] = data->device_specific; | |
1822 | real_buffer[4] = data->longlba ? 0x01 : 0; | |
1823 | real_buffer[5] = 0; | |
1824 | real_buffer[6] = data->block_descriptor_length >> 8; | |
1825 | real_buffer[7] = data->block_descriptor_length; | |
1826 | ||
1827 | cmd[0] = MODE_SELECT_10; | |
1828 | cmd[7] = len >> 8; | |
1829 | cmd[8] = len; | |
1830 | } else { | |
1831 | if (len > 255 || data->block_descriptor_length > 255 || | |
1832 | data->longlba) | |
1833 | return -EINVAL; | |
1834 | ||
1835 | real_buffer = kmalloc(4 + len, GFP_KERNEL); | |
1836 | if (!real_buffer) | |
1837 | return -ENOMEM; | |
1838 | memcpy(real_buffer + 4, buffer, len); | |
1839 | len += 4; | |
1840 | real_buffer[0] = 0; | |
1841 | real_buffer[1] = data->medium_type; | |
1842 | real_buffer[2] = data->device_specific; | |
1843 | real_buffer[3] = data->block_descriptor_length; | |
1844 | ||
1845 | ||
1846 | cmd[0] = MODE_SELECT; | |
1847 | cmd[4] = len; | |
1848 | } | |
1849 | ||
1850 | ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len, | |
1851 | sshdr, timeout, retries); | |
1852 | kfree(real_buffer); | |
1853 | return ret; | |
1854 | } | |
1855 | EXPORT_SYMBOL_GPL(scsi_mode_select); | |
1856 | ||
1da177e4 | 1857 | /** |
ea73a9f2 | 1858 | * scsi_mode_sense - issue a mode sense, falling back from 10 to |
1da177e4 | 1859 | * six bytes if necessary. |
1cf72699 | 1860 | * @sdev: SCSI device to be queried |
1da177e4 LT |
1861 | * @dbd: set if mode sense will allow block descriptors to be returned |
1862 | * @modepage: mode page being requested | |
1863 | * @buffer: request buffer (may not be smaller than eight bytes) | |
1864 | * @len: length of request buffer. | |
1865 | * @timeout: command timeout | |
1866 | * @retries: number of retries before failing | |
1867 | * @data: returns a structure abstracting the mode header data | |
1cf72699 JB |
1868 | * @sense: place to put sense data (or NULL if no sense to be collected). |
1869 | * must be SCSI_SENSE_BUFFERSIZE big. | |
1da177e4 LT |
1870 | * |
1871 | * Returns zero if unsuccessful, or the header offset (either 4 | |
1872 | * or 8 depending on whether a six or ten byte command was | |
1873 | * issued) if successful. | |
1874 | **/ | |
1875 | int | |
1cf72699 | 1876 | scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, |
1da177e4 | 1877 | unsigned char *buffer, int len, int timeout, int retries, |
5baba830 JB |
1878 | struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) |
1879 | { | |
1da177e4 LT |
1880 | unsigned char cmd[12]; |
1881 | int use_10_for_ms; | |
1882 | int header_length; | |
1cf72699 | 1883 | int result; |
ea73a9f2 | 1884 | struct scsi_sense_hdr my_sshdr; |
1da177e4 LT |
1885 | |
1886 | memset(data, 0, sizeof(*data)); | |
1887 | memset(&cmd[0], 0, 12); | |
1888 | cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */ | |
1889 | cmd[2] = modepage; | |
1890 | ||
ea73a9f2 JB |
1891 | /* caller might not be interested in sense, but we need it */ |
1892 | if (!sshdr) | |
1893 | sshdr = &my_sshdr; | |
1894 | ||
1da177e4 | 1895 | retry: |
1cf72699 | 1896 | use_10_for_ms = sdev->use_10_for_ms; |
1da177e4 LT |
1897 | |
1898 | if (use_10_for_ms) { | |
1899 | if (len < 8) | |
1900 | len = 8; | |
1901 | ||
1902 | cmd[0] = MODE_SENSE_10; | |
1903 | cmd[8] = len; | |
1904 | header_length = 8; | |
1905 | } else { | |
1906 | if (len < 4) | |
1907 | len = 4; | |
1908 | ||
1909 | cmd[0] = MODE_SENSE; | |
1910 | cmd[4] = len; | |
1911 | header_length = 4; | |
1912 | } | |
1913 | ||
1da177e4 LT |
1914 | memset(buffer, 0, len); |
1915 | ||
1cf72699 | 1916 | result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len, |
ea73a9f2 | 1917 | sshdr, timeout, retries); |
1da177e4 LT |
1918 | |
1919 | /* This code looks awful: what it's doing is making sure an | |
1920 | * ILLEGAL REQUEST sense return identifies the actual command | |
1921 | * byte as the problem. MODE_SENSE commands can return | |
1922 | * ILLEGAL REQUEST if the code page isn't supported */ | |
1923 | ||
1cf72699 JB |
1924 | if (use_10_for_ms && !scsi_status_is_good(result) && |
1925 | (driver_byte(result) & DRIVER_SENSE)) { | |
ea73a9f2 JB |
1926 | if (scsi_sense_valid(sshdr)) { |
1927 | if ((sshdr->sense_key == ILLEGAL_REQUEST) && | |
1928 | (sshdr->asc == 0x20) && (sshdr->ascq == 0)) { | |
1da177e4 LT |
1929 | /* |
1930 | * Invalid command operation code | |
1931 | */ | |
1cf72699 | 1932 | sdev->use_10_for_ms = 0; |
1da177e4 LT |
1933 | goto retry; |
1934 | } | |
1935 | } | |
1936 | } | |
1937 | ||
1cf72699 | 1938 | if(scsi_status_is_good(result)) { |
6d73c851 AV |
1939 | if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b && |
1940 | (modepage == 6 || modepage == 8))) { | |
1941 | /* Initio breakage? */ | |
1942 | header_length = 0; | |
1943 | data->length = 13; | |
1944 | data->medium_type = 0; | |
1945 | data->device_specific = 0; | |
1946 | data->longlba = 0; | |
1947 | data->block_descriptor_length = 0; | |
1948 | } else if(use_10_for_ms) { | |
1da177e4 LT |
1949 | data->length = buffer[0]*256 + buffer[1] + 2; |
1950 | data->medium_type = buffer[2]; | |
1951 | data->device_specific = buffer[3]; | |
1952 | data->longlba = buffer[4] & 0x01; | |
1953 | data->block_descriptor_length = buffer[6]*256 | |
1954 | + buffer[7]; | |
1955 | } else { | |
1956 | data->length = buffer[0] + 1; | |
1957 | data->medium_type = buffer[1]; | |
1958 | data->device_specific = buffer[2]; | |
1959 | data->block_descriptor_length = buffer[3]; | |
1960 | } | |
6d73c851 | 1961 | data->header_length = header_length; |
1da177e4 LT |
1962 | } |
1963 | ||
1cf72699 | 1964 | return result; |
1da177e4 LT |
1965 | } |
1966 | EXPORT_SYMBOL(scsi_mode_sense); | |
1967 | ||
1968 | int | |
1969 | scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries) | |
1970 | { | |
1da177e4 LT |
1971 | char cmd[] = { |
1972 | TEST_UNIT_READY, 0, 0, 0, 0, 0, | |
1973 | }; | |
ea73a9f2 | 1974 | struct scsi_sense_hdr sshdr; |
1da177e4 LT |
1975 | int result; |
1976 | ||
ea73a9f2 | 1977 | result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, &sshdr, |
1cf72699 | 1978 | timeout, retries); |
1da177e4 | 1979 | |
1cf72699 | 1980 | if ((driver_byte(result) & DRIVER_SENSE) && sdev->removable) { |
1da177e4 | 1981 | |
ea73a9f2 | 1982 | if ((scsi_sense_valid(&sshdr)) && |
1da177e4 LT |
1983 | ((sshdr.sense_key == UNIT_ATTENTION) || |
1984 | (sshdr.sense_key == NOT_READY))) { | |
1985 | sdev->changed = 1; | |
1cf72699 | 1986 | result = 0; |
1da177e4 LT |
1987 | } |
1988 | } | |
1da177e4 LT |
1989 | return result; |
1990 | } | |
1991 | EXPORT_SYMBOL(scsi_test_unit_ready); | |
1992 | ||
1993 | /** | |
1994 | * scsi_device_set_state - Take the given device through the device | |
1995 | * state model. | |
1996 | * @sdev: scsi device to change the state of. | |
1997 | * @state: state to change to. | |
1998 | * | |
1999 | * Returns zero if unsuccessful or an error if the requested | |
2000 | * transition is illegal. | |
2001 | **/ | |
2002 | int | |
2003 | scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) | |
2004 | { | |
2005 | enum scsi_device_state oldstate = sdev->sdev_state; | |
2006 | ||
2007 | if (state == oldstate) | |
2008 | return 0; | |
2009 | ||
2010 | switch (state) { | |
2011 | case SDEV_CREATED: | |
2012 | /* There are no legal states that come back to | |
2013 | * created. This is the manually initialised start | |
2014 | * state */ | |
2015 | goto illegal; | |
2016 | ||
2017 | case SDEV_RUNNING: | |
2018 | switch (oldstate) { | |
2019 | case SDEV_CREATED: | |
2020 | case SDEV_OFFLINE: | |
2021 | case SDEV_QUIESCE: | |
2022 | case SDEV_BLOCK: | |
2023 | break; | |
2024 | default: | |
2025 | goto illegal; | |
2026 | } | |
2027 | break; | |
2028 | ||
2029 | case SDEV_QUIESCE: | |
2030 | switch (oldstate) { | |
2031 | case SDEV_RUNNING: | |
2032 | case SDEV_OFFLINE: | |
2033 | break; | |
2034 | default: | |
2035 | goto illegal; | |
2036 | } | |
2037 | break; | |
2038 | ||
2039 | case SDEV_OFFLINE: | |
2040 | switch (oldstate) { | |
2041 | case SDEV_CREATED: | |
2042 | case SDEV_RUNNING: | |
2043 | case SDEV_QUIESCE: | |
2044 | case SDEV_BLOCK: | |
2045 | break; | |
2046 | default: | |
2047 | goto illegal; | |
2048 | } | |
2049 | break; | |
2050 | ||
2051 | case SDEV_BLOCK: | |
2052 | switch (oldstate) { | |
2053 | case SDEV_CREATED: | |
2054 | case SDEV_RUNNING: | |
2055 | break; | |
2056 | default: | |
2057 | goto illegal; | |
2058 | } | |
2059 | break; | |
2060 | ||
2061 | case SDEV_CANCEL: | |
2062 | switch (oldstate) { | |
2063 | case SDEV_CREATED: | |
2064 | case SDEV_RUNNING: | |
9ea72909 | 2065 | case SDEV_QUIESCE: |
1da177e4 LT |
2066 | case SDEV_OFFLINE: |
2067 | case SDEV_BLOCK: | |
2068 | break; | |
2069 | default: | |
2070 | goto illegal; | |
2071 | } | |
2072 | break; | |
2073 | ||
2074 | case SDEV_DEL: | |
2075 | switch (oldstate) { | |
309bd271 BK |
2076 | case SDEV_CREATED: |
2077 | case SDEV_RUNNING: | |
2078 | case SDEV_OFFLINE: | |
1da177e4 LT |
2079 | case SDEV_CANCEL: |
2080 | break; | |
2081 | default: | |
2082 | goto illegal; | |
2083 | } | |
2084 | break; | |
2085 | ||
2086 | } | |
2087 | sdev->sdev_state = state; | |
2088 | return 0; | |
2089 | ||
2090 | illegal: | |
2091 | SCSI_LOG_ERROR_RECOVERY(1, | |
9ccfc756 JB |
2092 | sdev_printk(KERN_ERR, sdev, |
2093 | "Illegal state transition %s->%s\n", | |
2094 | scsi_device_state_name(oldstate), | |
2095 | scsi_device_state_name(state)) | |
1da177e4 LT |
2096 | ); |
2097 | return -EINVAL; | |
2098 | } | |
2099 | EXPORT_SYMBOL(scsi_device_set_state); | |
2100 | ||
2101 | /** | |
2102 | * scsi_device_quiesce - Block user issued commands. | |
2103 | * @sdev: scsi device to quiesce. | |
2104 | * | |
2105 | * This works by trying to transition to the SDEV_QUIESCE state | |
2106 | * (which must be a legal transition). When the device is in this | |
2107 | * state, only special requests will be accepted, all others will | |
2108 | * be deferred. Since special requests may also be requeued requests, | |
2109 | * a successful return doesn't guarantee the device will be | |
2110 | * totally quiescent. | |
2111 | * | |
2112 | * Must be called with user context, may sleep. | |
2113 | * | |
2114 | * Returns zero if unsuccessful or an error if not. | |
2115 | **/ | |
2116 | int | |
2117 | scsi_device_quiesce(struct scsi_device *sdev) | |
2118 | { | |
2119 | int err = scsi_device_set_state(sdev, SDEV_QUIESCE); | |
2120 | if (err) | |
2121 | return err; | |
2122 | ||
2123 | scsi_run_queue(sdev->request_queue); | |
2124 | while (sdev->device_busy) { | |
2125 | msleep_interruptible(200); | |
2126 | scsi_run_queue(sdev->request_queue); | |
2127 | } | |
2128 | return 0; | |
2129 | } | |
2130 | EXPORT_SYMBOL(scsi_device_quiesce); | |
2131 | ||
2132 | /** | |
2133 | * scsi_device_resume - Restart user issued commands to a quiesced device. | |
2134 | * @sdev: scsi device to resume. | |
2135 | * | |
2136 | * Moves the device from quiesced back to running and restarts the | |
2137 | * queues. | |
2138 | * | |
2139 | * Must be called with user context, may sleep. | |
2140 | **/ | |
2141 | void | |
2142 | scsi_device_resume(struct scsi_device *sdev) | |
2143 | { | |
2144 | if(scsi_device_set_state(sdev, SDEV_RUNNING)) | |
2145 | return; | |
2146 | scsi_run_queue(sdev->request_queue); | |
2147 | } | |
2148 | EXPORT_SYMBOL(scsi_device_resume); | |
2149 | ||
2150 | static void | |
2151 | device_quiesce_fn(struct scsi_device *sdev, void *data) | |
2152 | { | |
2153 | scsi_device_quiesce(sdev); | |
2154 | } | |
2155 | ||
2156 | void | |
2157 | scsi_target_quiesce(struct scsi_target *starget) | |
2158 | { | |
2159 | starget_for_each_device(starget, NULL, device_quiesce_fn); | |
2160 | } | |
2161 | EXPORT_SYMBOL(scsi_target_quiesce); | |
2162 | ||
2163 | static void | |
2164 | device_resume_fn(struct scsi_device *sdev, void *data) | |
2165 | { | |
2166 | scsi_device_resume(sdev); | |
2167 | } | |
2168 | ||
2169 | void | |
2170 | scsi_target_resume(struct scsi_target *starget) | |
2171 | { | |
2172 | starget_for_each_device(starget, NULL, device_resume_fn); | |
2173 | } | |
2174 | EXPORT_SYMBOL(scsi_target_resume); | |
2175 | ||
2176 | /** | |
2177 | * scsi_internal_device_block - internal function to put a device | |
2178 | * temporarily into the SDEV_BLOCK state | |
2179 | * @sdev: device to block | |
2180 | * | |
2181 | * Block request made by scsi lld's to temporarily stop all | |
2182 | * scsi commands on the specified device. Called from interrupt | |
2183 | * or normal process context. | |
2184 | * | |
2185 | * Returns zero if successful or error if not | |
2186 | * | |
2187 | * Notes: | |
2188 | * This routine transitions the device to the SDEV_BLOCK state | |
2189 | * (which must be a legal transition). When the device is in this | |
2190 | * state, all commands are deferred until the scsi lld reenables | |
2191 | * the device with scsi_device_unblock or device_block_tmo fires. | |
2192 | * This routine assumes the host_lock is held on entry. | |
2193 | **/ | |
2194 | int | |
2195 | scsi_internal_device_block(struct scsi_device *sdev) | |
2196 | { | |
165125e1 | 2197 | struct request_queue *q = sdev->request_queue; |
1da177e4 LT |
2198 | unsigned long flags; |
2199 | int err = 0; | |
2200 | ||
2201 | err = scsi_device_set_state(sdev, SDEV_BLOCK); | |
2202 | if (err) | |
2203 | return err; | |
2204 | ||
2205 | /* | |
2206 | * The device has transitioned to SDEV_BLOCK. Stop the | |
2207 | * block layer from calling the midlayer with this device's | |
2208 | * request queue. | |
2209 | */ | |
2210 | spin_lock_irqsave(q->queue_lock, flags); | |
2211 | blk_stop_queue(q); | |
2212 | spin_unlock_irqrestore(q->queue_lock, flags); | |
2213 | ||
2214 | return 0; | |
2215 | } | |
2216 | EXPORT_SYMBOL_GPL(scsi_internal_device_block); | |
2217 | ||
2218 | /** | |
2219 | * scsi_internal_device_unblock - resume a device after a block request | |
2220 | * @sdev: device to resume | |
2221 | * | |
2222 | * Called by scsi lld's or the midlayer to restart the device queue | |
2223 | * for the previously suspended scsi device. Called from interrupt or | |
2224 | * normal process context. | |
2225 | * | |
2226 | * Returns zero if successful or error if not. | |
2227 | * | |
2228 | * Notes: | |
2229 | * This routine transitions the device to the SDEV_RUNNING state | |
2230 | * (which must be a legal transition) allowing the midlayer to | |
2231 | * goose the queue for this device. This routine assumes the | |
2232 | * host_lock is held upon entry. | |
2233 | **/ | |
2234 | int | |
2235 | scsi_internal_device_unblock(struct scsi_device *sdev) | |
2236 | { | |
165125e1 | 2237 | struct request_queue *q = sdev->request_queue; |
1da177e4 LT |
2238 | int err; |
2239 | unsigned long flags; | |
2240 | ||
2241 | /* | |
2242 | * Try to transition the scsi device to SDEV_RUNNING | |
2243 | * and goose the device queue if successful. | |
2244 | */ | |
2245 | err = scsi_device_set_state(sdev, SDEV_RUNNING); | |
2246 | if (err) | |
2247 | return err; | |
2248 | ||
2249 | spin_lock_irqsave(q->queue_lock, flags); | |
2250 | blk_start_queue(q); | |
2251 | spin_unlock_irqrestore(q->queue_lock, flags); | |
2252 | ||
2253 | return 0; | |
2254 | } | |
2255 | EXPORT_SYMBOL_GPL(scsi_internal_device_unblock); | |
2256 | ||
2257 | static void | |
2258 | device_block(struct scsi_device *sdev, void *data) | |
2259 | { | |
2260 | scsi_internal_device_block(sdev); | |
2261 | } | |
2262 | ||
2263 | static int | |
2264 | target_block(struct device *dev, void *data) | |
2265 | { | |
2266 | if (scsi_is_target_device(dev)) | |
2267 | starget_for_each_device(to_scsi_target(dev), NULL, | |
2268 | device_block); | |
2269 | return 0; | |
2270 | } | |
2271 | ||
2272 | void | |
2273 | scsi_target_block(struct device *dev) | |
2274 | { | |
2275 | if (scsi_is_target_device(dev)) | |
2276 | starget_for_each_device(to_scsi_target(dev), NULL, | |
2277 | device_block); | |
2278 | else | |
2279 | device_for_each_child(dev, NULL, target_block); | |
2280 | } | |
2281 | EXPORT_SYMBOL_GPL(scsi_target_block); | |
2282 | ||
2283 | static void | |
2284 | device_unblock(struct scsi_device *sdev, void *data) | |
2285 | { | |
2286 | scsi_internal_device_unblock(sdev); | |
2287 | } | |
2288 | ||
2289 | static int | |
2290 | target_unblock(struct device *dev, void *data) | |
2291 | { | |
2292 | if (scsi_is_target_device(dev)) | |
2293 | starget_for_each_device(to_scsi_target(dev), NULL, | |
2294 | device_unblock); | |
2295 | return 0; | |
2296 | } | |
2297 | ||
2298 | void | |
2299 | scsi_target_unblock(struct device *dev) | |
2300 | { | |
2301 | if (scsi_is_target_device(dev)) | |
2302 | starget_for_each_device(to_scsi_target(dev), NULL, | |
2303 | device_unblock); | |
2304 | else | |
2305 | device_for_each_child(dev, NULL, target_unblock); | |
2306 | } | |
2307 | EXPORT_SYMBOL_GPL(scsi_target_unblock); | |
cdb8c2a6 GL |
2308 | |
2309 | /** | |
2310 | * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt | |
2311 | * @sg: scatter-gather list | |
2312 | * @sg_count: number of segments in sg | |
2313 | * @offset: offset in bytes into sg, on return offset into the mapped area | |
2314 | * @len: bytes to map, on return number of bytes mapped | |
2315 | * | |
2316 | * Returns virtual address of the start of the mapped page | |
2317 | */ | |
c6132da1 | 2318 | void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count, |
cdb8c2a6 GL |
2319 | size_t *offset, size_t *len) |
2320 | { | |
2321 | int i; | |
2322 | size_t sg_len = 0, len_complete = 0; | |
c6132da1 | 2323 | struct scatterlist *sg; |
cdb8c2a6 GL |
2324 | struct page *page; |
2325 | ||
22cfefb5 AM |
2326 | WARN_ON(!irqs_disabled()); |
2327 | ||
c6132da1 | 2328 | for_each_sg(sgl, sg, sg_count, i) { |
cdb8c2a6 | 2329 | len_complete = sg_len; /* Complete sg-entries */ |
c6132da1 | 2330 | sg_len += sg->length; |
cdb8c2a6 GL |
2331 | if (sg_len > *offset) |
2332 | break; | |
2333 | } | |
2334 | ||
2335 | if (unlikely(i == sg_count)) { | |
169e1a2a AM |
2336 | printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, " |
2337 | "elements %d\n", | |
cdb8c2a6 GL |
2338 | __FUNCTION__, sg_len, *offset, sg_count); |
2339 | WARN_ON(1); | |
2340 | return NULL; | |
2341 | } | |
2342 | ||
2343 | /* Offset starting from the beginning of first page in this sg-entry */ | |
c6132da1 | 2344 | *offset = *offset - len_complete + sg->offset; |
cdb8c2a6 GL |
2345 | |
2346 | /* Assumption: contiguous pages can be accessed as "page + i" */ | |
c6132da1 | 2347 | page = nth_page(sg->page, (*offset >> PAGE_SHIFT)); |
cdb8c2a6 GL |
2348 | *offset &= ~PAGE_MASK; |
2349 | ||
2350 | /* Bytes in this sg-entry from *offset to the end of the page */ | |
2351 | sg_len = PAGE_SIZE - *offset; | |
2352 | if (*len > sg_len) | |
2353 | *len = sg_len; | |
2354 | ||
2355 | return kmap_atomic(page, KM_BIO_SRC_IRQ); | |
2356 | } | |
2357 | EXPORT_SYMBOL(scsi_kmap_atomic_sg); | |
2358 | ||
2359 | /** | |
2360 | * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously | |
2361 | * mapped with scsi_kmap_atomic_sg | |
2362 | * @virt: virtual address to be unmapped | |
2363 | */ | |
2364 | void scsi_kunmap_atomic_sg(void *virt) | |
2365 | { | |
2366 | kunmap_atomic(virt, KM_BIO_SRC_IRQ); | |
2367 | } | |
2368 | EXPORT_SYMBOL(scsi_kunmap_atomic_sg); |