]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/scsi/scsi_lib.c
[SCSI] sd: WRITE SAME(16) / UNMAP support
[net-next-2.6.git] / drivers / scsi / scsi_lib.c
CommitLineData
1da177e4
LT
1/*
2 * scsi_lib.c Copyright (C) 1999 Eric Youngdale
3 *
4 * SCSI queueing library.
5 * Initial versions: Eric Youngdale (eric@andante.org).
6 * Based upon conversations with large numbers
7 * of people at Linux Expo.
8 */
9
10#include <linux/bio.h>
d3f46f39 11#include <linux/bitops.h>
1da177e4
LT
12#include <linux/blkdev.h>
13#include <linux/completion.h>
14#include <linux/kernel.h>
15#include <linux/mempool.h>
16#include <linux/slab.h>
17#include <linux/init.h>
18#include <linux/pci.h>
19#include <linux/delay.h>
faead26d 20#include <linux/hardirq.h>
c6132da1 21#include <linux/scatterlist.h>
1da177e4
LT
22
23#include <scsi/scsi.h>
beb40487 24#include <scsi/scsi_cmnd.h>
1da177e4
LT
25#include <scsi/scsi_dbg.h>
26#include <scsi/scsi_device.h>
27#include <scsi/scsi_driver.h>
28#include <scsi/scsi_eh.h>
29#include <scsi/scsi_host.h>
1da177e4
LT
30
31#include "scsi_priv.h"
32#include "scsi_logging.h"
33
34
6391a113 35#define SG_MEMPOOL_NR ARRAY_SIZE(scsi_sg_pools)
5972511b 36#define SG_MEMPOOL_SIZE 2
1da177e4
LT
37
38struct scsi_host_sg_pool {
39 size_t size;
a8474ce2 40 char *name;
e18b890b 41 struct kmem_cache *slab;
1da177e4
LT
42 mempool_t *pool;
43};
44
d3f46f39
JB
45#define SP(x) { x, "sgpool-" __stringify(x) }
46#if (SCSI_MAX_SG_SEGMENTS < 32)
47#error SCSI_MAX_SG_SEGMENTS is too small (must be 32 or greater)
48#endif
52c1da39 49static struct scsi_host_sg_pool scsi_sg_pools[] = {
1da177e4
LT
50 SP(8),
51 SP(16),
fd820f40 52#if (SCSI_MAX_SG_SEGMENTS > 32)
d3f46f39 53 SP(32),
fd820f40 54#if (SCSI_MAX_SG_SEGMENTS > 64)
d3f46f39
JB
55 SP(64),
56#if (SCSI_MAX_SG_SEGMENTS > 128)
1da177e4 57 SP(128),
d3f46f39
JB
58#if (SCSI_MAX_SG_SEGMENTS > 256)
59#error SCSI_MAX_SG_SEGMENTS is too large (256 MAX)
fd820f40
FT
60#endif
61#endif
62#endif
d3f46f39
JB
63#endif
64 SP(SCSI_MAX_SG_SEGMENTS)
a8474ce2 65};
1da177e4
LT
66#undef SP
67
7027ad72 68struct kmem_cache *scsi_sdb_cache;
6f9a35e2 69
a1bf9d1d 70static void scsi_run_queue(struct request_queue *q);
e91442b6
JB
71
72/*
73 * Function: scsi_unprep_request()
74 *
75 * Purpose: Remove all preparation done for a request, including its
76 * associated scsi_cmnd, so that it can be requeued.
77 *
78 * Arguments: req - request to unprepare
79 *
80 * Lock status: Assumed that no locks are held upon entry.
81 *
82 * Returns: Nothing.
83 */
84static void scsi_unprep_request(struct request *req)
85{
86 struct scsi_cmnd *cmd = req->special;
87
4aff5e23 88 req->cmd_flags &= ~REQ_DONTPREP;
beb40487 89 req->special = NULL;
e91442b6 90
e91442b6
JB
91 scsi_put_command(cmd);
92}
a1bf9d1d 93
4f5299ac
JB
94/**
95 * __scsi_queue_insert - private queue insertion
96 * @cmd: The SCSI command being requeued
97 * @reason: The reason for the requeue
98 * @unbusy: Whether the queue should be unbusied
1da177e4 99 *
4f5299ac
JB
100 * This is a private queue insertion. The public interface
101 * scsi_queue_insert() always assumes the queue should be unbusied
102 * because it's always called before the completion. This function is
103 * for a requeue after completion, which should only occur in this
104 * file.
1da177e4 105 */
4f5299ac 106static int __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, int unbusy)
1da177e4
LT
107{
108 struct Scsi_Host *host = cmd->device->host;
109 struct scsi_device *device = cmd->device;
f0c0a376 110 struct scsi_target *starget = scsi_target(device);
a1bf9d1d
TH
111 struct request_queue *q = device->request_queue;
112 unsigned long flags;
1da177e4
LT
113
114 SCSI_LOG_MLQUEUE(1,
115 printk("Inserting command %p into mlqueue\n", cmd));
116
117 /*
d8c37e7b 118 * Set the appropriate busy bit for the device/host.
1da177e4
LT
119 *
120 * If the host/device isn't busy, assume that something actually
121 * completed, and that we should be able to queue a command now.
122 *
123 * Note that the prior mid-layer assumption that any host could
124 * always queue at least one command is now broken. The mid-layer
125 * will implement a user specifiable stall (see
126 * scsi_host.max_host_blocked and scsi_device.max_device_blocked)
127 * if a command is requeued with no other commands outstanding
128 * either for the device or for the host.
129 */
f0c0a376
MC
130 switch (reason) {
131 case SCSI_MLQUEUE_HOST_BUSY:
1da177e4 132 host->host_blocked = host->max_host_blocked;
f0c0a376
MC
133 break;
134 case SCSI_MLQUEUE_DEVICE_BUSY:
1da177e4 135 device->device_blocked = device->max_device_blocked;
f0c0a376
MC
136 break;
137 case SCSI_MLQUEUE_TARGET_BUSY:
138 starget->target_blocked = starget->max_target_blocked;
139 break;
140 }
1da177e4 141
1da177e4
LT
142 /*
143 * Decrement the counters, since these commands are no longer
144 * active on the host/device.
145 */
4f5299ac
JB
146 if (unbusy)
147 scsi_device_unbusy(device);
1da177e4
LT
148
149 /*
a1bf9d1d
TH
150 * Requeue this command. It will go before all other commands
151 * that are already in the queue.
1da177e4
LT
152 *
153 * NOTE: there is magic here about the way the queue is plugged if
154 * we have no outstanding commands.
155 *
a1bf9d1d 156 * Although we *don't* plug the queue, we call the request
1da177e4
LT
157 * function. The SCSI request function detects the blocked condition
158 * and plugs the queue appropriately.
a1bf9d1d
TH
159 */
160 spin_lock_irqsave(q->queue_lock, flags);
59897dad 161 blk_requeue_request(q, cmd->request);
a1bf9d1d
TH
162 spin_unlock_irqrestore(q->queue_lock, flags);
163
164 scsi_run_queue(q);
165
1da177e4
LT
166 return 0;
167}
168
4f5299ac
JB
169/*
170 * Function: scsi_queue_insert()
171 *
172 * Purpose: Insert a command in the midlevel queue.
173 *
174 * Arguments: cmd - command that we are adding to queue.
175 * reason - why we are inserting command to queue.
176 *
177 * Lock status: Assumed that lock is not held upon entry.
178 *
179 * Returns: Nothing.
180 *
181 * Notes: We do this for one of two cases. Either the host is busy
182 * and it cannot accept any more commands for the time being,
183 * or the device returned QUEUE_FULL and can accept no more
184 * commands.
185 * Notes: This could be called either from an interrupt context or a
186 * normal process context.
187 */
188int scsi_queue_insert(struct scsi_cmnd *cmd, int reason)
189{
190 return __scsi_queue_insert(cmd, reason, 1);
191}
39216033 192/**
33aa687d 193 * scsi_execute - insert request and wait for the result
39216033
JB
194 * @sdev: scsi device
195 * @cmd: scsi command
196 * @data_direction: data direction
197 * @buffer: data buffer
198 * @bufflen: len of buffer
199 * @sense: optional sense buffer
200 * @timeout: request timeout in seconds
201 * @retries: number of times to retry request
33aa687d 202 * @flags: or into request flags;
f4f4e47e 203 * @resid: optional residual length
39216033 204 *
59c51591 205 * returns the req->errors value which is the scsi_cmnd result
ea73a9f2 206 * field.
eb44820c 207 */
33aa687d
JB
208int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
209 int data_direction, void *buffer, unsigned bufflen,
f4f4e47e
FT
210 unsigned char *sense, int timeout, int retries, int flags,
211 int *resid)
39216033
JB
212{
213 struct request *req;
214 int write = (data_direction == DMA_TO_DEVICE);
215 int ret = DRIVER_ERROR << 24;
216
217 req = blk_get_request(sdev->request_queue, write, __GFP_WAIT);
218
219 if (bufflen && blk_rq_map_kern(sdev->request_queue, req,
220 buffer, bufflen, __GFP_WAIT))
221 goto out;
222
223 req->cmd_len = COMMAND_SIZE(cmd[0]);
224 memcpy(req->cmd, cmd, req->cmd_len);
225 req->sense = sense;
226 req->sense_len = 0;
17e01f21 227 req->retries = retries;
39216033 228 req->timeout = timeout;
4aff5e23
JA
229 req->cmd_type = REQ_TYPE_BLOCK_PC;
230 req->cmd_flags |= flags | REQ_QUIET | REQ_PREEMPT;
39216033
JB
231
232 /*
233 * head injection *required* here otherwise quiesce won't work
234 */
235 blk_execute_rq(req->q, NULL, req, 1);
236
bdb2b8ca
AS
237 /*
238 * Some devices (USB mass-storage in particular) may transfer
239 * garbage data together with a residue indicating that the data
240 * is invalid. Prevent the garbage from being misinterpreted
241 * and prevent security leaks by zeroing out the excess data.
242 */
c3a4d78c
TH
243 if (unlikely(req->resid_len > 0 && req->resid_len <= bufflen))
244 memset(buffer + (bufflen - req->resid_len), 0, req->resid_len);
bdb2b8ca 245
f4f4e47e 246 if (resid)
c3a4d78c 247 *resid = req->resid_len;
39216033
JB
248 ret = req->errors;
249 out:
250 blk_put_request(req);
251
252 return ret;
253}
33aa687d 254EXPORT_SYMBOL(scsi_execute);
39216033 255
ea73a9f2
JB
256
257int scsi_execute_req(struct scsi_device *sdev, const unsigned char *cmd,
258 int data_direction, void *buffer, unsigned bufflen,
f4f4e47e
FT
259 struct scsi_sense_hdr *sshdr, int timeout, int retries,
260 int *resid)
ea73a9f2
JB
261{
262 char *sense = NULL;
1ccb48bb
AM
263 int result;
264
ea73a9f2 265 if (sshdr) {
24669f75 266 sense = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_NOIO);
ea73a9f2
JB
267 if (!sense)
268 return DRIVER_ERROR << 24;
ea73a9f2 269 }
1ccb48bb 270 result = scsi_execute(sdev, cmd, data_direction, buffer, bufflen,
f4f4e47e 271 sense, timeout, retries, 0, resid);
ea73a9f2 272 if (sshdr)
e514385b 273 scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, sshdr);
ea73a9f2
JB
274
275 kfree(sense);
276 return result;
277}
278EXPORT_SYMBOL(scsi_execute_req);
279
1da177e4
LT
280/*
281 * Function: scsi_init_cmd_errh()
282 *
283 * Purpose: Initialize cmd fields related to error handling.
284 *
285 * Arguments: cmd - command that is ready to be queued.
286 *
1da177e4
LT
287 * Notes: This function has the job of initializing a number of
288 * fields related to error handling. Typically this will
289 * be called once for each command, as required.
290 */
631c228c 291static void scsi_init_cmd_errh(struct scsi_cmnd *cmd)
1da177e4 292{
1da177e4 293 cmd->serial_number = 0;
30b0c37b 294 scsi_set_resid(cmd, 0);
b80ca4f7 295 memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE);
1da177e4 296 if (cmd->cmd_len == 0)
db4742dd 297 cmd->cmd_len = scsi_command_size(cmd->cmnd);
1da177e4
LT
298}
299
300void scsi_device_unbusy(struct scsi_device *sdev)
301{
302 struct Scsi_Host *shost = sdev->host;
f0c0a376 303 struct scsi_target *starget = scsi_target(sdev);
1da177e4
LT
304 unsigned long flags;
305
306 spin_lock_irqsave(shost->host_lock, flags);
307 shost->host_busy--;
f0c0a376 308 starget->target_busy--;
939647ee 309 if (unlikely(scsi_host_in_recovery(shost) &&
ee7863bc 310 (shost->host_failed || shost->host_eh_scheduled)))
1da177e4
LT
311 scsi_eh_wakeup(shost);
312 spin_unlock(shost->host_lock);
152587de 313 spin_lock(sdev->request_queue->queue_lock);
1da177e4 314 sdev->device_busy--;
152587de 315 spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags);
1da177e4
LT
316}
317
318/*
319 * Called for single_lun devices on IO completion. Clear starget_sdev_user,
320 * and call blk_run_queue for all the scsi_devices on the target -
321 * including current_sdev first.
322 *
323 * Called with *no* scsi locks held.
324 */
325static void scsi_single_lun_run(struct scsi_device *current_sdev)
326{
327 struct Scsi_Host *shost = current_sdev->host;
328 struct scsi_device *sdev, *tmp;
329 struct scsi_target *starget = scsi_target(current_sdev);
330 unsigned long flags;
331
332 spin_lock_irqsave(shost->host_lock, flags);
333 starget->starget_sdev_user = NULL;
334 spin_unlock_irqrestore(shost->host_lock, flags);
335
336 /*
337 * Call blk_run_queue for all LUNs on the target, starting with
338 * current_sdev. We race with others (to set starget_sdev_user),
339 * but in most cases, we will be first. Ideally, each LU on the
340 * target would get some limited time or requests on the target.
341 */
342 blk_run_queue(current_sdev->request_queue);
343
344 spin_lock_irqsave(shost->host_lock, flags);
345 if (starget->starget_sdev_user)
346 goto out;
347 list_for_each_entry_safe(sdev, tmp, &starget->devices,
348 same_target_siblings) {
349 if (sdev == current_sdev)
350 continue;
351 if (scsi_device_get(sdev))
352 continue;
353
354 spin_unlock_irqrestore(shost->host_lock, flags);
355 blk_run_queue(sdev->request_queue);
356 spin_lock_irqsave(shost->host_lock, flags);
357
358 scsi_device_put(sdev);
359 }
360 out:
361 spin_unlock_irqrestore(shost->host_lock, flags);
362}
363
9d112517
KU
364static inline int scsi_device_is_busy(struct scsi_device *sdev)
365{
366 if (sdev->device_busy >= sdev->queue_depth || sdev->device_blocked)
367 return 1;
368
369 return 0;
370}
371
f0c0a376
MC
372static inline int scsi_target_is_busy(struct scsi_target *starget)
373{
374 return ((starget->can_queue > 0 &&
375 starget->target_busy >= starget->can_queue) ||
376 starget->target_blocked);
377}
378
9d112517
KU
379static inline int scsi_host_is_busy(struct Scsi_Host *shost)
380{
381 if ((shost->can_queue > 0 && shost->host_busy >= shost->can_queue) ||
382 shost->host_blocked || shost->host_self_blocked)
383 return 1;
384
385 return 0;
386}
387
1da177e4
LT
388/*
389 * Function: scsi_run_queue()
390 *
391 * Purpose: Select a proper request queue to serve next
392 *
393 * Arguments: q - last request's queue
394 *
395 * Returns: Nothing
396 *
397 * Notes: The previous command was completely finished, start
398 * a new one if possible.
399 */
400static void scsi_run_queue(struct request_queue *q)
401{
2a3a59e5 402 struct scsi_device *sdev = q->queuedata;
1da177e4 403 struct Scsi_Host *shost = sdev->host;
2a3a59e5 404 LIST_HEAD(starved_list);
1da177e4
LT
405 unsigned long flags;
406
25d7c363 407 if (scsi_target(sdev)->single_lun)
1da177e4
LT
408 scsi_single_lun_run(sdev);
409
410 spin_lock_irqsave(shost->host_lock, flags);
2a3a59e5
MC
411 list_splice_init(&shost->starved_list, &starved_list);
412
413 while (!list_empty(&starved_list)) {
75ad23bc
NP
414 int flagset;
415
1da177e4
LT
416 /*
417 * As long as shost is accepting commands and we have
418 * starved queues, call blk_run_queue. scsi_request_fn
419 * drops the queue_lock and can add us back to the
420 * starved_list.
421 *
422 * host_lock protects the starved_list and starved_entry.
423 * scsi_request_fn must get the host_lock before checking
424 * or modifying starved_list or starved_entry.
425 */
2a3a59e5 426 if (scsi_host_is_busy(shost))
f0c0a376 427 break;
f0c0a376 428
2a3a59e5
MC
429 sdev = list_entry(starved_list.next,
430 struct scsi_device, starved_entry);
431 list_del_init(&sdev->starved_entry);
f0c0a376
MC
432 if (scsi_target_is_busy(scsi_target(sdev))) {
433 list_move_tail(&sdev->starved_entry,
434 &shost->starved_list);
435 continue;
436 }
437
75ad23bc
NP
438 spin_unlock(shost->host_lock);
439
440 spin_lock(sdev->request_queue->queue_lock);
441 flagset = test_bit(QUEUE_FLAG_REENTER, &q->queue_flags) &&
442 !test_bit(QUEUE_FLAG_REENTER,
443 &sdev->request_queue->queue_flags);
444 if (flagset)
445 queue_flag_set(QUEUE_FLAG_REENTER, sdev->request_queue);
446 __blk_run_queue(sdev->request_queue);
447 if (flagset)
448 queue_flag_clear(QUEUE_FLAG_REENTER, sdev->request_queue);
449 spin_unlock(sdev->request_queue->queue_lock);
04846f25 450
75ad23bc 451 spin_lock(shost->host_lock);
1da177e4 452 }
2a3a59e5
MC
453 /* put any unprocessed entries back */
454 list_splice(&starved_list, &shost->starved_list);
1da177e4
LT
455 spin_unlock_irqrestore(shost->host_lock, flags);
456
457 blk_run_queue(q);
458}
459
460/*
461 * Function: scsi_requeue_command()
462 *
463 * Purpose: Handle post-processing of completed commands.
464 *
465 * Arguments: q - queue to operate on
466 * cmd - command that may need to be requeued.
467 *
468 * Returns: Nothing
469 *
470 * Notes: After command completion, there may be blocks left
471 * over which weren't finished by the previous command
472 * this can be for a number of reasons - the main one is
473 * I/O errors in the middle of the request, in which case
474 * we need to request the blocks that come after the bad
475 * sector.
e91442b6 476 * Notes: Upon return, cmd is a stale pointer.
1da177e4
LT
477 */
478static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
479{
e91442b6 480 struct request *req = cmd->request;
283369cc
TH
481 unsigned long flags;
482
283369cc 483 spin_lock_irqsave(q->queue_lock, flags);
02bd3499 484 scsi_unprep_request(req);
e91442b6 485 blk_requeue_request(q, req);
283369cc 486 spin_unlock_irqrestore(q->queue_lock, flags);
1da177e4
LT
487
488 scsi_run_queue(q);
489}
490
491void scsi_next_command(struct scsi_cmnd *cmd)
492{
49d7bc64
LT
493 struct scsi_device *sdev = cmd->device;
494 struct request_queue *q = sdev->request_queue;
495
496 /* need to hold a reference on the device before we let go of the cmd */
497 get_device(&sdev->sdev_gendev);
1da177e4
LT
498
499 scsi_put_command(cmd);
500 scsi_run_queue(q);
49d7bc64
LT
501
502 /* ok to remove device now */
503 put_device(&sdev->sdev_gendev);
1da177e4
LT
504}
505
506void scsi_run_host_queues(struct Scsi_Host *shost)
507{
508 struct scsi_device *sdev;
509
510 shost_for_each_device(sdev, shost)
511 scsi_run_queue(sdev->request_queue);
512}
513
79ed2429
JB
514static void __scsi_release_buffers(struct scsi_cmnd *, int);
515
1da177e4
LT
516/*
517 * Function: scsi_end_request()
518 *
519 * Purpose: Post-processing of completed commands (usually invoked at end
520 * of upper level post-processing and scsi_io_completion).
521 *
522 * Arguments: cmd - command that is complete.
610d8b0c 523 * error - 0 if I/O indicates success, < 0 for I/O error.
1da177e4
LT
524 * bytes - number of bytes of completed I/O
525 * requeue - indicates whether we should requeue leftovers.
526 *
527 * Lock status: Assumed that lock is not held upon entry.
528 *
e91442b6 529 * Returns: cmd if requeue required, NULL otherwise.
1da177e4
LT
530 *
531 * Notes: This is called for block device requests in order to
532 * mark some number of sectors as complete.
533 *
534 * We are guaranteeing that the request queue will be goosed
535 * at some point during this call.
e91442b6 536 * Notes: If cmd was requeued, upon return it will be a stale pointer.
1da177e4 537 */
610d8b0c 538static struct scsi_cmnd *scsi_end_request(struct scsi_cmnd *cmd, int error,
1da177e4
LT
539 int bytes, int requeue)
540{
165125e1 541 struct request_queue *q = cmd->device->request_queue;
1da177e4 542 struct request *req = cmd->request;
1da177e4
LT
543
544 /*
545 * If there are blocks left over at the end, set up the command
546 * to queue the remainder of them.
547 */
610d8b0c 548 if (blk_end_request(req, error, bytes)) {
1da177e4 549 /* kill remainder if no retrys */
4a27446f 550 if (error && scsi_noretry_cmd(cmd))
e458824f 551 blk_end_request_all(req, error);
1da177e4 552 else {
e91442b6 553 if (requeue) {
1da177e4
LT
554 /*
555 * Bleah. Leftovers again. Stick the
556 * leftovers in the front of the
557 * queue, and goose the queue again.
558 */
79ed2429 559 scsi_release_buffers(cmd);
1da177e4 560 scsi_requeue_command(q, cmd);
e91442b6
JB
561 cmd = NULL;
562 }
1da177e4
LT
563 return cmd;
564 }
565 }
566
1da177e4
LT
567 /*
568 * This will goose the queue request function at the end, so we don't
569 * need to worry about launching another command.
570 */
79ed2429 571 __scsi_release_buffers(cmd, 0);
1da177e4
LT
572 scsi_next_command(cmd);
573 return NULL;
574}
575
a8474ce2
JA
576static inline unsigned int scsi_sgtable_index(unsigned short nents)
577{
578 unsigned int index;
579
d3f46f39
JB
580 BUG_ON(nents > SCSI_MAX_SG_SEGMENTS);
581
582 if (nents <= 8)
a8474ce2 583 index = 0;
d3f46f39
JB
584 else
585 index = get_count_order(nents) - 3;
1da177e4 586
a8474ce2
JA
587 return index;
588}
589
5ed7959e 590static void scsi_sg_free(struct scatterlist *sgl, unsigned int nents)
a8474ce2
JA
591{
592 struct scsi_host_sg_pool *sgp;
a8474ce2 593
5ed7959e
JA
594 sgp = scsi_sg_pools + scsi_sgtable_index(nents);
595 mempool_free(sgl, sgp->pool);
596}
a8474ce2 597
5ed7959e
JA
598static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
599{
600 struct scsi_host_sg_pool *sgp;
a8474ce2 601
5ed7959e
JA
602 sgp = scsi_sg_pools + scsi_sgtable_index(nents);
603 return mempool_alloc(sgp->pool, gfp_mask);
604}
a3bec5c5 605
30b0c37b
BH
606static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents,
607 gfp_t gfp_mask)
5ed7959e
JA
608{
609 int ret;
a8474ce2 610
30b0c37b 611 BUG_ON(!nents);
a8474ce2 612
30b0c37b
BH
613 ret = __sg_alloc_table(&sdb->table, nents, SCSI_MAX_SG_SEGMENTS,
614 gfp_mask, scsi_sg_alloc);
5ed7959e 615 if (unlikely(ret))
30b0c37b 616 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS,
7cedb1f1 617 scsi_sg_free);
45711f1a 618
a8474ce2 619 return ret;
1da177e4
LT
620}
621
30b0c37b 622static void scsi_free_sgtable(struct scsi_data_buffer *sdb)
1da177e4 623{
30b0c37b 624 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, scsi_sg_free);
1da177e4
LT
625}
626
79ed2429
JB
627static void __scsi_release_buffers(struct scsi_cmnd *cmd, int do_bidi_check)
628{
629
630 if (cmd->sdb.table.nents)
631 scsi_free_sgtable(&cmd->sdb);
632
633 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
634
635 if (do_bidi_check && scsi_bidi_cmnd(cmd)) {
636 struct scsi_data_buffer *bidi_sdb =
637 cmd->request->next_rq->special;
638 scsi_free_sgtable(bidi_sdb);
639 kmem_cache_free(scsi_sdb_cache, bidi_sdb);
640 cmd->request->next_rq->special = NULL;
641 }
642
643 if (scsi_prot_sg_count(cmd))
644 scsi_free_sgtable(cmd->prot_sdb);
645}
646
1da177e4
LT
647/*
648 * Function: scsi_release_buffers()
649 *
650 * Purpose: Completion processing for block device I/O requests.
651 *
652 * Arguments: cmd - command that we are bailing.
653 *
654 * Lock status: Assumed that no lock is held upon entry.
655 *
656 * Returns: Nothing
657 *
658 * Notes: In the event that an upper level driver rejects a
659 * command, we must release resources allocated during
660 * the __init_io() function. Primarily this would involve
661 * the scatter-gather table, and potentially any bounce
662 * buffers.
663 */
bb52d82f 664void scsi_release_buffers(struct scsi_cmnd *cmd)
1da177e4 665{
79ed2429 666 __scsi_release_buffers(cmd, 1);
1da177e4 667}
bb52d82f 668EXPORT_SYMBOL(scsi_release_buffers);
1da177e4
LT
669
670/*
671 * Function: scsi_io_completion()
672 *
673 * Purpose: Completion processing for block device I/O requests.
674 *
675 * Arguments: cmd - command that is finished.
676 *
677 * Lock status: Assumed that no lock is held upon entry.
678 *
679 * Returns: Nothing
680 *
681 * Notes: This function is matched in terms of capabilities to
682 * the function that created the scatter-gather list.
683 * In other words, if there are no bounce buffers
684 * (the normal case for most drivers), we don't need
685 * the logic to deal with cleaning up afterwards.
686 *
b60af5b0
AS
687 * We must call scsi_end_request(). This will finish off
688 * the specified number of sectors. If we are done, the
689 * command block will be released and the queue function
690 * will be goosed. If we are not done then we have to
691 * figure out what to do next:
1da177e4 692 *
b60af5b0
AS
693 * a) We can call scsi_requeue_command(). The request
694 * will be unprepared and put back on the queue. Then
695 * a new command will be created for it. This should
696 * be used if we made forward progress, or if we want
697 * to switch from READ(10) to READ(6) for example.
1da177e4 698 *
b60af5b0
AS
699 * b) We can call scsi_queue_insert(). The request will
700 * be put back on the queue and retried using the same
701 * command as before, possibly after a delay.
702 *
703 * c) We can call blk_end_request() with -EIO to fail
704 * the remainder of the request.
1da177e4 705 */
03aba2f7 706void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
1da177e4
LT
707{
708 int result = cmd->result;
165125e1 709 struct request_queue *q = cmd->device->request_queue;
1da177e4 710 struct request *req = cmd->request;
fa8e36c3 711 int error = 0;
1da177e4
LT
712 struct scsi_sense_hdr sshdr;
713 int sense_valid = 0;
714 int sense_deferred = 0;
b60af5b0
AS
715 enum {ACTION_FAIL, ACTION_REPREP, ACTION_RETRY,
716 ACTION_DELAYED_RETRY} action;
717 char *description = NULL;
1da177e4 718
1da177e4
LT
719 if (result) {
720 sense_valid = scsi_command_normalize_sense(cmd, &sshdr);
721 if (sense_valid)
722 sense_deferred = scsi_sense_is_deferred(&sshdr);
723 }
631c228c 724
1da177e4
LT
725 if (blk_pc_request(req)) { /* SG_IO ioctl from block level */
726 req->errors = result;
727 if (result) {
1da177e4
LT
728 if (sense_valid && req->sense) {
729 /*
730 * SG_IO wants current and deferred errors
731 */
732 int len = 8 + cmd->sense_buffer[7];
733
734 if (len > SCSI_SENSE_BUFFERSIZE)
735 len = SCSI_SENSE_BUFFERSIZE;
736 memcpy(req->sense, cmd->sense_buffer, len);
737 req->sense_len = len;
738 }
fa8e36c3
JB
739 if (!sense_deferred)
740 error = -EIO;
b22f687d 741 }
e6bb7a96
FT
742
743 req->resid_len = scsi_get_resid(cmd);
744
6f9a35e2 745 if (scsi_bidi_cmnd(cmd)) {
e6bb7a96
FT
746 /*
747 * Bidi commands Must be complete as a whole,
748 * both sides at once.
749 */
750 req->next_rq->resid_len = scsi_in(cmd)->resid;
751
752 blk_end_request_all(req, 0);
753
754 scsi_release_buffers(cmd);
755 scsi_next_command(cmd);
6f9a35e2
BH
756 return;
757 }
1da177e4
LT
758 }
759
6f9a35e2 760 BUG_ON(blk_bidi_rq(req)); /* bidi not support for !blk_pc_request yet */
30b0c37b 761
1da177e4
LT
762 /*
763 * Next deal with any sectors which we were able to correctly
764 * handle.
765 */
83096ebf 766 SCSI_LOG_HLCOMPLETE(1, printk("%u sectors total, "
d6b0c537 767 "%d bytes done.\n",
83096ebf 768 blk_rq_sectors(req), good_bytes));
d6b0c537 769
a9bddd74
JB
770 /*
771 * Recovered errors need reporting, but they're always treated
772 * as success, so fiddle the result code here. For BLOCK_PC
773 * we already took a copy of the original into rq->errors which
774 * is what gets returned to the user
775 */
776 if (sense_valid && sshdr.sense_key == RECOVERED_ERROR) {
777 if (!(req->cmd_flags & REQ_QUIET))
778 scsi_print_sense("", cmd);
779 result = 0;
780 /* BLOCK_PC may have set error */
781 error = 0;
782 }
783
784 /*
785 * A number of bytes were successfully read. If there
d6b0c537
JB
786 * are leftovers and there is some kind of error
787 * (result != 0), retry the rest.
788 */
fa8e36c3 789 if (scsi_end_request(cmd, error, good_bytes, result == 0) == NULL)
d6b0c537 790 return;
03aba2f7 791
3e695f89
MP
792 error = -EIO;
793
b60af5b0
AS
794 if (host_byte(result) == DID_RESET) {
795 /* Third party bus reset or reset for error recovery
796 * reasons. Just retry the command and see what
797 * happens.
798 */
799 action = ACTION_RETRY;
800 } else if (sense_valid && !sense_deferred) {
1da177e4
LT
801 switch (sshdr.sense_key) {
802 case UNIT_ATTENTION:
803 if (cmd->device->removable) {
03aba2f7 804 /* Detected disc change. Set a bit
1da177e4
LT
805 * and quietly refuse further access.
806 */
807 cmd->device->changed = 1;
b60af5b0
AS
808 description = "Media Changed";
809 action = ACTION_FAIL;
1da177e4 810 } else {
03aba2f7
LT
811 /* Must have been a power glitch, or a
812 * bus reset. Could not have been a
813 * media change, so we just retry the
b60af5b0 814 * command and see what happens.
03aba2f7 815 */
b60af5b0 816 action = ACTION_RETRY;
1da177e4
LT
817 }
818 break;
819 case ILLEGAL_REQUEST:
03aba2f7
LT
820 /* If we had an ILLEGAL REQUEST returned, then
821 * we may have performed an unsupported
822 * command. The only thing this should be
823 * would be a ten byte read where only a six
824 * byte read was supported. Also, on a system
825 * where READ CAPACITY failed, we may have
826 * read past the end of the disk.
827 */
26a68019
JA
828 if ((cmd->device->use_10_for_rw &&
829 sshdr.asc == 0x20 && sshdr.ascq == 0x00) &&
1da177e4
LT
830 (cmd->cmnd[0] == READ_10 ||
831 cmd->cmnd[0] == WRITE_10)) {
b60af5b0 832 /* This will issue a new 6-byte command. */
1da177e4 833 cmd->device->use_10_for_rw = 0;
b60af5b0 834 action = ACTION_REPREP;
3e695f89
MP
835 } else if (sshdr.asc == 0x10) /* DIX */ {
836 description = "Host Data Integrity Failure";
837 action = ACTION_FAIL;
838 error = -EILSEQ;
b60af5b0
AS
839 } else
840 action = ACTION_FAIL;
841 break;
511e44f4 842 case ABORTED_COMMAND:
126c0982 843 action = ACTION_FAIL;
511e44f4 844 if (sshdr.asc == 0x10) { /* DIF */
3e695f89 845 description = "Target Data Integrity Failure";
3e695f89 846 error = -EILSEQ;
126c0982 847 }
1da177e4
LT
848 break;
849 case NOT_READY:
03aba2f7 850 /* If the device is in the process of becoming
f3e93f73 851 * ready, or has a temporary blockage, retry.
1da177e4 852 */
f3e93f73
JB
853 if (sshdr.asc == 0x04) {
854 switch (sshdr.ascq) {
855 case 0x01: /* becoming ready */
856 case 0x04: /* format in progress */
857 case 0x05: /* rebuild in progress */
858 case 0x06: /* recalculation in progress */
859 case 0x07: /* operation in progress */
860 case 0x08: /* Long write in progress */
861 case 0x09: /* self test in progress */
b60af5b0 862 action = ACTION_DELAYED_RETRY;
f3e93f73 863 break;
3dbf6a54
AS
864 default:
865 description = "Device not ready";
866 action = ACTION_FAIL;
867 break;
f3e93f73 868 }
b60af5b0
AS
869 } else {
870 description = "Device not ready";
871 action = ACTION_FAIL;
1da177e4 872 }
b60af5b0 873 break;
1da177e4 874 case VOLUME_OVERFLOW:
03aba2f7 875 /* See SSC3rXX or current. */
b60af5b0
AS
876 action = ACTION_FAIL;
877 break;
1da177e4 878 default:
b60af5b0
AS
879 description = "Unhandled sense code";
880 action = ACTION_FAIL;
1da177e4
LT
881 break;
882 }
b60af5b0
AS
883 } else {
884 description = "Unhandled error code";
885 action = ACTION_FAIL;
03aba2f7 886 }
b60af5b0
AS
887
888 switch (action) {
889 case ACTION_FAIL:
890 /* Give up and fail the remainder of the request */
79ed2429 891 scsi_release_buffers(cmd);
4aff5e23 892 if (!(req->cmd_flags & REQ_QUIET)) {
b60af5b0 893 if (description)
3dbf6a54 894 scmd_printk(KERN_INFO, cmd, "%s\n",
b60af5b0 895 description);
a4d04a4c 896 scsi_print_result(cmd);
3173d8c3
JB
897 if (driver_byte(result) & DRIVER_SENSE)
898 scsi_print_sense("", cmd);
002b1eb2 899 scsi_print_command(cmd);
3173d8c3 900 }
ad630826 901 if (blk_end_request_err(req, error))
da6c5c72
TH
902 scsi_requeue_command(q, cmd);
903 else
904 scsi_next_command(cmd);
b60af5b0
AS
905 break;
906 case ACTION_REPREP:
907 /* Unprep the request and put it back at the head of the queue.
908 * A new command will be prepared and issued.
909 */
79ed2429 910 scsi_release_buffers(cmd);
b60af5b0
AS
911 scsi_requeue_command(q, cmd);
912 break;
913 case ACTION_RETRY:
914 /* Retry the same command immediately */
4f5299ac 915 __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, 0);
b60af5b0
AS
916 break;
917 case ACTION_DELAYED_RETRY:
918 /* Retry the same command after a delay */
4f5299ac 919 __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, 0);
b60af5b0 920 break;
1da177e4
LT
921 }
922}
1da177e4 923
6f9a35e2
BH
924static int scsi_init_sgtable(struct request *req, struct scsi_data_buffer *sdb,
925 gfp_t gfp_mask)
1da177e4 926{
6f9a35e2 927 int count;
1da177e4
LT
928
929 /*
3b003157 930 * If sg table allocation fails, requeue request later.
1da177e4 931 */
30b0c37b
BH
932 if (unlikely(scsi_alloc_sgtable(sdb, req->nr_phys_segments,
933 gfp_mask))) {
1da177e4 934 return BLKPREP_DEFER;
7c72ce81 935 }
1da177e4 936
3b003157 937 req->buffer = NULL;
1da177e4
LT
938
939 /*
940 * Next, walk the list, and fill in the addresses and sizes of
941 * each segment.
942 */
30b0c37b
BH
943 count = blk_rq_map_sg(req->q, req, sdb->table.sgl);
944 BUG_ON(count > sdb->table.nents);
945 sdb->table.nents = count;
1011c1b9 946 sdb->length = blk_rq_bytes(req);
4a03d90e 947 return BLKPREP_OK;
1da177e4 948}
6f9a35e2
BH
949
950/*
951 * Function: scsi_init_io()
952 *
953 * Purpose: SCSI I/O initialize function.
954 *
955 * Arguments: cmd - Command descriptor we wish to initialize
956 *
957 * Returns: 0 on success
958 * BLKPREP_DEFER if the failure is retryable
959 * BLKPREP_KILL if the failure is fatal
960 */
961int scsi_init_io(struct scsi_cmnd *cmd, gfp_t gfp_mask)
962{
963 int error = scsi_init_sgtable(cmd->request, &cmd->sdb, gfp_mask);
964 if (error)
965 goto err_exit;
966
967 if (blk_bidi_rq(cmd->request)) {
968 struct scsi_data_buffer *bidi_sdb = kmem_cache_zalloc(
6362abd3 969 scsi_sdb_cache, GFP_ATOMIC);
6f9a35e2
BH
970 if (!bidi_sdb) {
971 error = BLKPREP_DEFER;
972 goto err_exit;
973 }
974
975 cmd->request->next_rq->special = bidi_sdb;
976 error = scsi_init_sgtable(cmd->request->next_rq, bidi_sdb,
977 GFP_ATOMIC);
978 if (error)
979 goto err_exit;
980 }
981
7027ad72
MP
982 if (blk_integrity_rq(cmd->request)) {
983 struct scsi_data_buffer *prot_sdb = cmd->prot_sdb;
984 int ivecs, count;
985
986 BUG_ON(prot_sdb == NULL);
987 ivecs = blk_rq_count_integrity_sg(cmd->request);
988
989 if (scsi_alloc_sgtable(prot_sdb, ivecs, gfp_mask)) {
990 error = BLKPREP_DEFER;
991 goto err_exit;
992 }
993
994 count = blk_rq_map_integrity_sg(cmd->request,
995 prot_sdb->table.sgl);
996 BUG_ON(unlikely(count > ivecs));
997
998 cmd->prot_sdb = prot_sdb;
999 cmd->prot_sdb->table.nents = count;
1000 }
1001
6f9a35e2
BH
1002 return BLKPREP_OK ;
1003
1004err_exit:
1005 scsi_release_buffers(cmd);
1006 if (error == BLKPREP_KILL)
1007 scsi_put_command(cmd);
1008 else /* BLKPREP_DEFER */
1009 scsi_unprep_request(cmd->request);
1010
1011 return error;
1012}
bb52d82f 1013EXPORT_SYMBOL(scsi_init_io);
1da177e4 1014
3b003157
CH
1015static struct scsi_cmnd *scsi_get_cmd_from_req(struct scsi_device *sdev,
1016 struct request *req)
1017{
1018 struct scsi_cmnd *cmd;
1019
1020 if (!req->special) {
1021 cmd = scsi_get_command(sdev, GFP_ATOMIC);
1022 if (unlikely(!cmd))
1023 return NULL;
1024 req->special = cmd;
1025 } else {
1026 cmd = req->special;
1027 }
1028
1029 /* pull a tag out of the request if we have one */
1030 cmd->tag = req->tag;
1031 cmd->request = req;
1032
64a87b24
BH
1033 cmd->cmnd = req->cmd;
1034
3b003157
CH
1035 return cmd;
1036}
1037
7f9a6bc4 1038int scsi_setup_blk_pc_cmnd(struct scsi_device *sdev, struct request *req)
7b16318d 1039{
3b003157 1040 struct scsi_cmnd *cmd;
7f9a6bc4
JB
1041 int ret = scsi_prep_state_check(sdev, req);
1042
1043 if (ret != BLKPREP_OK)
1044 return ret;
3b003157
CH
1045
1046 cmd = scsi_get_cmd_from_req(sdev, req);
1047 if (unlikely(!cmd))
1048 return BLKPREP_DEFER;
1049
1050 /*
1051 * BLOCK_PC requests may transfer data, in which case they must
1052 * a bio attached to them. Or they might contain a SCSI command
1053 * that does not transfer data, in which case they may optionally
1054 * submit a request without an attached bio.
1055 */
1056 if (req->bio) {
1057 int ret;
1058
1059 BUG_ON(!req->nr_phys_segments);
1060
bb52d82f 1061 ret = scsi_init_io(cmd, GFP_ATOMIC);
3b003157
CH
1062 if (unlikely(ret))
1063 return ret;
1064 } else {
b0790410 1065 BUG_ON(blk_rq_bytes(req));
3b003157 1066
30b0c37b 1067 memset(&cmd->sdb, 0, sizeof(cmd->sdb));
3b003157
CH
1068 req->buffer = NULL;
1069 }
7b16318d 1070
7b16318d 1071 cmd->cmd_len = req->cmd_len;
b0790410 1072 if (!blk_rq_bytes(req))
7b16318d
JB
1073 cmd->sc_data_direction = DMA_NONE;
1074 else if (rq_data_dir(req) == WRITE)
1075 cmd->sc_data_direction = DMA_TO_DEVICE;
1076 else
1077 cmd->sc_data_direction = DMA_FROM_DEVICE;
1078
b0790410 1079 cmd->transfersize = blk_rq_bytes(req);
7b16318d 1080 cmd->allowed = req->retries;
3b003157 1081 return BLKPREP_OK;
7b16318d 1082}
7f9a6bc4 1083EXPORT_SYMBOL(scsi_setup_blk_pc_cmnd);
7b16318d 1084
3b003157
CH
1085/*
1086 * Setup a REQ_TYPE_FS command. These are simple read/write request
1087 * from filesystems that still need to be translated to SCSI CDBs from
1088 * the ULD.
1089 */
7f9a6bc4 1090int scsi_setup_fs_cmnd(struct scsi_device *sdev, struct request *req)
1da177e4 1091{
1da177e4 1092 struct scsi_cmnd *cmd;
7f9a6bc4 1093 int ret = scsi_prep_state_check(sdev, req);
1da177e4 1094
7f9a6bc4
JB
1095 if (ret != BLKPREP_OK)
1096 return ret;
a6a8d9f8
CS
1097
1098 if (unlikely(sdev->scsi_dh_data && sdev->scsi_dh_data->scsi_dh
1099 && sdev->scsi_dh_data->scsi_dh->prep_fn)) {
1100 ret = sdev->scsi_dh_data->scsi_dh->prep_fn(sdev, req);
1101 if (ret != BLKPREP_OK)
1102 return ret;
1103 }
1104
1da177e4 1105 /*
3b003157 1106 * Filesystem requests must transfer data.
1da177e4 1107 */
3b003157
CH
1108 BUG_ON(!req->nr_phys_segments);
1109
1110 cmd = scsi_get_cmd_from_req(sdev, req);
1111 if (unlikely(!cmd))
1112 return BLKPREP_DEFER;
1113
64a87b24 1114 memset(cmd->cmnd, 0, BLK_MAX_CDB);
bb52d82f 1115 return scsi_init_io(cmd, GFP_ATOMIC);
3b003157 1116}
7f9a6bc4 1117EXPORT_SYMBOL(scsi_setup_fs_cmnd);
3b003157 1118
7f9a6bc4 1119int scsi_prep_state_check(struct scsi_device *sdev, struct request *req)
3b003157 1120{
3b003157
CH
1121 int ret = BLKPREP_OK;
1122
1da177e4 1123 /*
3b003157
CH
1124 * If the device is not in running state we will reject some
1125 * or all commands.
1da177e4 1126 */
3b003157
CH
1127 if (unlikely(sdev->sdev_state != SDEV_RUNNING)) {
1128 switch (sdev->sdev_state) {
1129 case SDEV_OFFLINE:
1130 /*
1131 * If the device is offline we refuse to process any
1132 * commands. The device must be brought online
1133 * before trying any recovery commands.
1134 */
1135 sdev_printk(KERN_ERR, sdev,
1136 "rejecting I/O to offline device\n");
1137 ret = BLKPREP_KILL;
1138 break;
1139 case SDEV_DEL:
1140 /*
1141 * If the device is fully deleted, we refuse to
1142 * process any commands as well.
1143 */
9ccfc756 1144 sdev_printk(KERN_ERR, sdev,
3b003157
CH
1145 "rejecting I/O to dead device\n");
1146 ret = BLKPREP_KILL;
1147 break;
1148 case SDEV_QUIESCE:
1149 case SDEV_BLOCK:
6f4267e3 1150 case SDEV_CREATED_BLOCK:
3b003157
CH
1151 /*
1152 * If the devices is blocked we defer normal commands.
1153 */
1154 if (!(req->cmd_flags & REQ_PREEMPT))
1155 ret = BLKPREP_DEFER;
1156 break;
1157 default:
1158 /*
1159 * For any other not fully online state we only allow
1160 * special commands. In particular any user initiated
1161 * command is not allowed.
1162 */
1163 if (!(req->cmd_flags & REQ_PREEMPT))
1164 ret = BLKPREP_KILL;
1165 break;
1da177e4 1166 }
1da177e4 1167 }
7f9a6bc4
JB
1168 return ret;
1169}
1170EXPORT_SYMBOL(scsi_prep_state_check);
1da177e4 1171
7f9a6bc4
JB
1172int scsi_prep_return(struct request_queue *q, struct request *req, int ret)
1173{
1174 struct scsi_device *sdev = q->queuedata;
1da177e4 1175
3b003157
CH
1176 switch (ret) {
1177 case BLKPREP_KILL:
1178 req->errors = DID_NO_CONNECT << 16;
7f9a6bc4
JB
1179 /* release the command and kill it */
1180 if (req->special) {
1181 struct scsi_cmnd *cmd = req->special;
1182 scsi_release_buffers(cmd);
1183 scsi_put_command(cmd);
1184 req->special = NULL;
1185 }
3b003157
CH
1186 break;
1187 case BLKPREP_DEFER:
1da177e4 1188 /*
9934c8c0 1189 * If we defer, the blk_peek_request() returns NULL, but the
3b003157
CH
1190 * queue must be restarted, so we plug here if no returning
1191 * command will automatically do that.
1da177e4 1192 */
3b003157
CH
1193 if (sdev->device_busy == 0)
1194 blk_plug_device(q);
1195 break;
1196 default:
1197 req->cmd_flags |= REQ_DONTPREP;
1da177e4
LT
1198 }
1199
3b003157 1200 return ret;
1da177e4 1201}
7f9a6bc4
JB
1202EXPORT_SYMBOL(scsi_prep_return);
1203
751bf4d7 1204int scsi_prep_fn(struct request_queue *q, struct request *req)
7f9a6bc4
JB
1205{
1206 struct scsi_device *sdev = q->queuedata;
1207 int ret = BLKPREP_KILL;
1208
1209 if (req->cmd_type == REQ_TYPE_BLOCK_PC)
1210 ret = scsi_setup_blk_pc_cmnd(sdev, req);
1211 return scsi_prep_return(q, req, ret);
1212}
b391277a 1213EXPORT_SYMBOL(scsi_prep_fn);
1da177e4
LT
1214
1215/*
1216 * scsi_dev_queue_ready: if we can send requests to sdev, return 1 else
1217 * return 0.
1218 *
1219 * Called with the queue_lock held.
1220 */
1221static inline int scsi_dev_queue_ready(struct request_queue *q,
1222 struct scsi_device *sdev)
1223{
1da177e4
LT
1224 if (sdev->device_busy == 0 && sdev->device_blocked) {
1225 /*
1226 * unblock after device_blocked iterates to zero
1227 */
1228 if (--sdev->device_blocked == 0) {
1229 SCSI_LOG_MLQUEUE(3,
9ccfc756
JB
1230 sdev_printk(KERN_INFO, sdev,
1231 "unblocking device at zero depth\n"));
1da177e4
LT
1232 } else {
1233 blk_plug_device(q);
1234 return 0;
1235 }
1236 }
9d112517 1237 if (scsi_device_is_busy(sdev))
1da177e4
LT
1238 return 0;
1239
1240 return 1;
1241}
1242
f0c0a376
MC
1243
1244/*
1245 * scsi_target_queue_ready: checks if there we can send commands to target
1246 * @sdev: scsi device on starget to check.
1247 *
1248 * Called with the host lock held.
1249 */
1250static inline int scsi_target_queue_ready(struct Scsi_Host *shost,
1251 struct scsi_device *sdev)
1252{
1253 struct scsi_target *starget = scsi_target(sdev);
1254
1255 if (starget->single_lun) {
1256 if (starget->starget_sdev_user &&
1257 starget->starget_sdev_user != sdev)
1258 return 0;
1259 starget->starget_sdev_user = sdev;
1260 }
1261
1262 if (starget->target_busy == 0 && starget->target_blocked) {
1263 /*
1264 * unblock after target_blocked iterates to zero
1265 */
1266 if (--starget->target_blocked == 0) {
1267 SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget,
1268 "unblocking target at zero depth\n"));
b4efdd58 1269 } else
f0c0a376 1270 return 0;
f0c0a376
MC
1271 }
1272
1273 if (scsi_target_is_busy(starget)) {
1274 if (list_empty(&sdev->starved_entry)) {
1275 list_add_tail(&sdev->starved_entry,
1276 &shost->starved_list);
1277 return 0;
1278 }
1279 }
1280
1281 /* We're OK to process the command, so we can't be starved */
1282 if (!list_empty(&sdev->starved_entry))
1283 list_del_init(&sdev->starved_entry);
1284 return 1;
1285}
1286
1da177e4
LT
1287/*
1288 * scsi_host_queue_ready: if we can send requests to shost, return 1 else
1289 * return 0. We must end up running the queue again whenever 0 is
1290 * returned, else IO can hang.
1291 *
1292 * Called with host_lock held.
1293 */
1294static inline int scsi_host_queue_ready(struct request_queue *q,
1295 struct Scsi_Host *shost,
1296 struct scsi_device *sdev)
1297{
939647ee 1298 if (scsi_host_in_recovery(shost))
1da177e4
LT
1299 return 0;
1300 if (shost->host_busy == 0 && shost->host_blocked) {
1301 /*
1302 * unblock after host_blocked iterates to zero
1303 */
1304 if (--shost->host_blocked == 0) {
1305 SCSI_LOG_MLQUEUE(3,
1306 printk("scsi%d unblocking host at zero depth\n",
1307 shost->host_no));
1308 } else {
1da177e4
LT
1309 return 0;
1310 }
1311 }
9d112517 1312 if (scsi_host_is_busy(shost)) {
1da177e4
LT
1313 if (list_empty(&sdev->starved_entry))
1314 list_add_tail(&sdev->starved_entry, &shost->starved_list);
1315 return 0;
1316 }
1317
1318 /* We're OK to process the command, so we can't be starved */
1319 if (!list_empty(&sdev->starved_entry))
1320 list_del_init(&sdev->starved_entry);
1321
1322 return 1;
1323}
1324
6c5121b7
KU
1325/*
1326 * Busy state exporting function for request stacking drivers.
1327 *
1328 * For efficiency, no lock is taken to check the busy state of
1329 * shost/starget/sdev, since the returned value is not guaranteed and
1330 * may be changed after request stacking drivers call the function,
1331 * regardless of taking lock or not.
1332 *
1333 * When scsi can't dispatch I/Os anymore and needs to kill I/Os
1334 * (e.g. !sdev), scsi needs to return 'not busy'.
1335 * Otherwise, request stacking drivers may hold requests forever.
1336 */
1337static int scsi_lld_busy(struct request_queue *q)
1338{
1339 struct scsi_device *sdev = q->queuedata;
1340 struct Scsi_Host *shost;
1341 struct scsi_target *starget;
1342
1343 if (!sdev)
1344 return 0;
1345
1346 shost = sdev->host;
1347 starget = scsi_target(sdev);
1348
1349 if (scsi_host_in_recovery(shost) || scsi_host_is_busy(shost) ||
1350 scsi_target_is_busy(starget) || scsi_device_is_busy(sdev))
1351 return 1;
1352
1353 return 0;
1354}
1355
1da177e4 1356/*
e91442b6 1357 * Kill a request for a dead device
1da177e4 1358 */
165125e1 1359static void scsi_kill_request(struct request *req, struct request_queue *q)
1da177e4 1360{
e91442b6 1361 struct scsi_cmnd *cmd = req->special;
03b14708
JS
1362 struct scsi_device *sdev;
1363 struct scsi_target *starget;
1364 struct Scsi_Host *shost;
1da177e4 1365
9934c8c0 1366 blk_start_request(req);
788ce43a 1367
e91442b6
JB
1368 if (unlikely(cmd == NULL)) {
1369 printk(KERN_CRIT "impossible request in %s.\n",
cadbd4a5 1370 __func__);
e91442b6 1371 BUG();
1da177e4 1372 }
e91442b6 1373
03b14708
JS
1374 sdev = cmd->device;
1375 starget = scsi_target(sdev);
1376 shost = sdev->host;
e91442b6
JB
1377 scsi_init_cmd_errh(cmd);
1378 cmd->result = DID_NO_CONNECT << 16;
1379 atomic_inc(&cmd->device->iorequest_cnt);
e36e0c80
TH
1380
1381 /*
1382 * SCSI request completion path will do scsi_device_unbusy(),
1383 * bump busy counts. To bump the counters, we need to dance
1384 * with the locks as normal issue path does.
1385 */
1386 sdev->device_busy++;
1387 spin_unlock(sdev->request_queue->queue_lock);
1388 spin_lock(shost->host_lock);
1389 shost->host_busy++;
f0c0a376 1390 starget->target_busy++;
e36e0c80
TH
1391 spin_unlock(shost->host_lock);
1392 spin_lock(sdev->request_queue->queue_lock);
1393
242f9dcb 1394 blk_complete_request(req);
1da177e4
LT
1395}
1396
1aea6434
JA
1397static void scsi_softirq_done(struct request *rq)
1398{
242f9dcb
JA
1399 struct scsi_cmnd *cmd = rq->special;
1400 unsigned long wait_for = (cmd->allowed + 1) * rq->timeout;
1aea6434
JA
1401 int disposition;
1402
1403 INIT_LIST_HEAD(&cmd->eh_entry);
1404
242f9dcb
JA
1405 /*
1406 * Set the serial numbers back to zero
1407 */
1408 cmd->serial_number = 0;
1409
1410 atomic_inc(&cmd->device->iodone_cnt);
1411 if (cmd->result)
1412 atomic_inc(&cmd->device->ioerr_cnt);
1413
1aea6434
JA
1414 disposition = scsi_decide_disposition(cmd);
1415 if (disposition != SUCCESS &&
1416 time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) {
1417 sdev_printk(KERN_ERR, cmd->device,
1418 "timing out command, waited %lus\n",
1419 wait_for/HZ);
1420 disposition = SUCCESS;
1421 }
1422
1423 scsi_log_completion(cmd, disposition);
1424
1425 switch (disposition) {
1426 case SUCCESS:
1427 scsi_finish_command(cmd);
1428 break;
1429 case NEEDS_RETRY:
596f482a 1430 scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY);
1aea6434
JA
1431 break;
1432 case ADD_TO_MLQUEUE:
1433 scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY);
1434 break;
1435 default:
1436 if (!scsi_eh_scmd_add(cmd, 0))
1437 scsi_finish_command(cmd);
1438 }
1439}
1440
1da177e4
LT
1441/*
1442 * Function: scsi_request_fn()
1443 *
1444 * Purpose: Main strategy routine for SCSI.
1445 *
1446 * Arguments: q - Pointer to actual queue.
1447 *
1448 * Returns: Nothing
1449 *
1450 * Lock status: IO request lock assumed to be held when called.
1451 */
1452static void scsi_request_fn(struct request_queue *q)
1453{
1454 struct scsi_device *sdev = q->queuedata;
1455 struct Scsi_Host *shost;
1456 struct scsi_cmnd *cmd;
1457 struct request *req;
1458
1459 if (!sdev) {
1460 printk("scsi: killing requests for dead queue\n");
9934c8c0 1461 while ((req = blk_peek_request(q)) != NULL)
e91442b6 1462 scsi_kill_request(req, q);
1da177e4
LT
1463 return;
1464 }
1465
1466 if(!get_device(&sdev->sdev_gendev))
1467 /* We must be tearing the block queue down already */
1468 return;
1469
1470 /*
1471 * To start with, we keep looping until the queue is empty, or until
1472 * the host is no longer able to accept any more requests.
1473 */
1474 shost = sdev->host;
1475 while (!blk_queue_plugged(q)) {
1476 int rtn;
1477 /*
1478 * get next queueable request. We do this early to make sure
1479 * that the request is fully prepared even if we cannot
1480 * accept it.
1481 */
9934c8c0 1482 req = blk_peek_request(q);
1da177e4
LT
1483 if (!req || !scsi_dev_queue_ready(q, sdev))
1484 break;
1485
1486 if (unlikely(!scsi_device_online(sdev))) {
9ccfc756
JB
1487 sdev_printk(KERN_ERR, sdev,
1488 "rejecting I/O to offline device\n");
e91442b6 1489 scsi_kill_request(req, q);
1da177e4
LT
1490 continue;
1491 }
1492
1493
1494 /*
1495 * Remove the request from the request list.
1496 */
1497 if (!(blk_queue_tagged(q) && !blk_queue_start_tag(q, req)))
9934c8c0 1498 blk_start_request(req);
1da177e4
LT
1499 sdev->device_busy++;
1500
1501 spin_unlock(q->queue_lock);
e91442b6
JB
1502 cmd = req->special;
1503 if (unlikely(cmd == NULL)) {
1504 printk(KERN_CRIT "impossible request in %s.\n"
1505 "please mail a stack trace to "
4aff5e23 1506 "linux-scsi@vger.kernel.org\n",
cadbd4a5 1507 __func__);
4aff5e23 1508 blk_dump_rq_flags(req, "foo");
e91442b6
JB
1509 BUG();
1510 }
1da177e4
LT
1511 spin_lock(shost->host_lock);
1512
ecefe8a9
MC
1513 /*
1514 * We hit this when the driver is using a host wide
1515 * tag map. For device level tag maps the queue_depth check
1516 * in the device ready fn would prevent us from trying
1517 * to allocate a tag. Since the map is a shared host resource
1518 * we add the dev to the starved list so it eventually gets
1519 * a run when a tag is freed.
1520 */
6bd522f6 1521 if (blk_queue_tagged(q) && !blk_rq_tagged(req)) {
ecefe8a9
MC
1522 if (list_empty(&sdev->starved_entry))
1523 list_add_tail(&sdev->starved_entry,
1524 &shost->starved_list);
1525 goto not_ready;
1526 }
1527
f0c0a376
MC
1528 if (!scsi_target_queue_ready(shost, sdev))
1529 goto not_ready;
1530
1da177e4
LT
1531 if (!scsi_host_queue_ready(q, shost, sdev))
1532 goto not_ready;
f0c0a376
MC
1533
1534 scsi_target(sdev)->target_busy++;
1da177e4
LT
1535 shost->host_busy++;
1536
1537 /*
1538 * XXX(hch): This is rather suboptimal, scsi_dispatch_cmd will
1539 * take the lock again.
1540 */
1541 spin_unlock_irq(shost->host_lock);
1542
1da177e4
LT
1543 /*
1544 * Finally, initialize any error handling parameters, and set up
1545 * the timers for timeouts.
1546 */
1547 scsi_init_cmd_errh(cmd);
1548
1549 /*
1550 * Dispatch the command to the low-level driver.
1551 */
1552 rtn = scsi_dispatch_cmd(cmd);
1553 spin_lock_irq(q->queue_lock);
1554 if(rtn) {
1555 /* we're refusing the command; because of
1556 * the way locks get dropped, we need to
1557 * check here if plugging is required */
1558 if(sdev->device_busy == 0)
1559 blk_plug_device(q);
1560
1561 break;
1562 }
1563 }
1564
1565 goto out;
1566
1567 not_ready:
1568 spin_unlock_irq(shost->host_lock);
1569
1570 /*
1571 * lock q, handle tag, requeue req, and decrement device_busy. We
1572 * must return with queue_lock held.
1573 *
1574 * Decrementing device_busy without checking it is OK, as all such
1575 * cases (host limits or settings) should run the queue at some
1576 * later time.
1577 */
1578 spin_lock_irq(q->queue_lock);
1579 blk_requeue_request(q, req);
1580 sdev->device_busy--;
1581 if(sdev->device_busy == 0)
1582 blk_plug_device(q);
1583 out:
1584 /* must be careful here...if we trigger the ->remove() function
1585 * we cannot be holding the q lock */
1586 spin_unlock_irq(q->queue_lock);
1587 put_device(&sdev->sdev_gendev);
1588 spin_lock_irq(q->queue_lock);
1589}
1590
1591u64 scsi_calculate_bounce_limit(struct Scsi_Host *shost)
1592{
1593 struct device *host_dev;
1594 u64 bounce_limit = 0xffffffff;
1595
1596 if (shost->unchecked_isa_dma)
1597 return BLK_BOUNCE_ISA;
1598 /*
1599 * Platforms with virtual-DMA translation
1600 * hardware have no practical limit.
1601 */
1602 if (!PCI_DMA_BUS_IS_PHYS)
1603 return BLK_BOUNCE_ANY;
1604
1605 host_dev = scsi_get_device(shost);
1606 if (host_dev && host_dev->dma_mask)
1607 bounce_limit = *host_dev->dma_mask;
1608
1609 return bounce_limit;
1610}
1611EXPORT_SYMBOL(scsi_calculate_bounce_limit);
1612
b58d9154
FT
1613struct request_queue *__scsi_alloc_queue(struct Scsi_Host *shost,
1614 request_fn_proc *request_fn)
1da177e4 1615{
1da177e4 1616 struct request_queue *q;
860ac568 1617 struct device *dev = shost->shost_gendev.parent;
1da177e4 1618
b58d9154 1619 q = blk_init_queue(request_fn, NULL);
1da177e4
LT
1620 if (!q)
1621 return NULL;
1622
a8474ce2
JA
1623 /*
1624 * this limit is imposed by hardware restrictions
1625 */
1da177e4 1626 blk_queue_max_hw_segments(q, shost->sg_tablesize);
d3f46f39 1627 blk_queue_max_phys_segments(q, SCSI_MAX_SG_CHAIN_SEGMENTS);
a8474ce2 1628
1da177e4
LT
1629 blk_queue_max_sectors(q, shost->max_sectors);
1630 blk_queue_bounce_limit(q, scsi_calculate_bounce_limit(shost));
1631 blk_queue_segment_boundary(q, shost->dma_boundary);
99c84dbd 1632 dma_set_seg_boundary(dev, shost->dma_boundary);
1da177e4 1633
860ac568
FT
1634 blk_queue_max_segment_size(q, dma_get_max_seg_size(dev));
1635
75ad23bc 1636 /* New queue, no concurrency on queue_flags */
1da177e4 1637 if (!shost->use_clustering)
75ad23bc 1638 queue_flag_clear_unlocked(QUEUE_FLAG_CLUSTER, q);
465ff318
JB
1639
1640 /*
1641 * set a reasonable default alignment on word boundaries: the
1642 * host and device may alter it using
1643 * blk_queue_update_dma_alignment() later.
1644 */
1645 blk_queue_dma_alignment(q, 0x03);
1646
1da177e4
LT
1647 return q;
1648}
b58d9154
FT
1649EXPORT_SYMBOL(__scsi_alloc_queue);
1650
1651struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
1652{
1653 struct request_queue *q;
1654
1655 q = __scsi_alloc_queue(sdev->host, scsi_request_fn);
1656 if (!q)
1657 return NULL;
1658
1659 blk_queue_prep_rq(q, scsi_prep_fn);
b58d9154 1660 blk_queue_softirq_done(q, scsi_softirq_done);
242f9dcb 1661 blk_queue_rq_timed_out(q, scsi_times_out);
6c5121b7 1662 blk_queue_lld_busy(q, scsi_lld_busy);
b58d9154
FT
1663 return q;
1664}
1da177e4
LT
1665
1666void scsi_free_queue(struct request_queue *q)
1667{
1668 blk_cleanup_queue(q);
1669}
1670
1671/*
1672 * Function: scsi_block_requests()
1673 *
1674 * Purpose: Utility function used by low-level drivers to prevent further
1675 * commands from being queued to the device.
1676 *
1677 * Arguments: shost - Host in question
1678 *
1679 * Returns: Nothing
1680 *
1681 * Lock status: No locks are assumed held.
1682 *
1683 * Notes: There is no timer nor any other means by which the requests
1684 * get unblocked other than the low-level driver calling
1685 * scsi_unblock_requests().
1686 */
1687void scsi_block_requests(struct Scsi_Host *shost)
1688{
1689 shost->host_self_blocked = 1;
1690}
1691EXPORT_SYMBOL(scsi_block_requests);
1692
1693/*
1694 * Function: scsi_unblock_requests()
1695 *
1696 * Purpose: Utility function used by low-level drivers to allow further
1697 * commands from being queued to the device.
1698 *
1699 * Arguments: shost - Host in question
1700 *
1701 * Returns: Nothing
1702 *
1703 * Lock status: No locks are assumed held.
1704 *
1705 * Notes: There is no timer nor any other means by which the requests
1706 * get unblocked other than the low-level driver calling
1707 * scsi_unblock_requests().
1708 *
1709 * This is done as an API function so that changes to the
1710 * internals of the scsi mid-layer won't require wholesale
1711 * changes to drivers that use this feature.
1712 */
1713void scsi_unblock_requests(struct Scsi_Host *shost)
1714{
1715 shost->host_self_blocked = 0;
1716 scsi_run_host_queues(shost);
1717}
1718EXPORT_SYMBOL(scsi_unblock_requests);
1719
1720int __init scsi_init_queue(void)
1721{
1722 int i;
1723
6362abd3
MP
1724 scsi_sdb_cache = kmem_cache_create("scsi_data_buffer",
1725 sizeof(struct scsi_data_buffer),
1726 0, 0, NULL);
1727 if (!scsi_sdb_cache) {
1728 printk(KERN_ERR "SCSI: can't init scsi sdb cache\n");
f078727b 1729 return -ENOMEM;
6f9a35e2
BH
1730 }
1731
1da177e4
LT
1732 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1733 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1734 int size = sgp->size * sizeof(struct scatterlist);
1735
1736 sgp->slab = kmem_cache_create(sgp->name, size, 0,
20c2df83 1737 SLAB_HWCACHE_ALIGN, NULL);
1da177e4
LT
1738 if (!sgp->slab) {
1739 printk(KERN_ERR "SCSI: can't init sg slab %s\n",
1740 sgp->name);
6362abd3 1741 goto cleanup_sdb;
1da177e4
LT
1742 }
1743
93d2341c
MD
1744 sgp->pool = mempool_create_slab_pool(SG_MEMPOOL_SIZE,
1745 sgp->slab);
1da177e4
LT
1746 if (!sgp->pool) {
1747 printk(KERN_ERR "SCSI: can't init sg mempool %s\n",
1748 sgp->name);
6362abd3 1749 goto cleanup_sdb;
1da177e4
LT
1750 }
1751 }
1752
1753 return 0;
3d9dd6ee 1754
6362abd3 1755cleanup_sdb:
3d9dd6ee
FT
1756 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1757 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1758 if (sgp->pool)
1759 mempool_destroy(sgp->pool);
1760 if (sgp->slab)
1761 kmem_cache_destroy(sgp->slab);
1762 }
6362abd3 1763 kmem_cache_destroy(scsi_sdb_cache);
3d9dd6ee
FT
1764
1765 return -ENOMEM;
1da177e4
LT
1766}
1767
1768void scsi_exit_queue(void)
1769{
1770 int i;
1771
6362abd3 1772 kmem_cache_destroy(scsi_sdb_cache);
aa7b5cd7 1773
1da177e4
LT
1774 for (i = 0; i < SG_MEMPOOL_NR; i++) {
1775 struct scsi_host_sg_pool *sgp = scsi_sg_pools + i;
1776 mempool_destroy(sgp->pool);
1777 kmem_cache_destroy(sgp->slab);
1778 }
1779}
5baba830
JB
1780
1781/**
1782 * scsi_mode_select - issue a mode select
1783 * @sdev: SCSI device to be queried
1784 * @pf: Page format bit (1 == standard, 0 == vendor specific)
1785 * @sp: Save page bit (0 == don't save, 1 == save)
1786 * @modepage: mode page being requested
1787 * @buffer: request buffer (may not be smaller than eight bytes)
1788 * @len: length of request buffer.
1789 * @timeout: command timeout
1790 * @retries: number of retries before failing
1791 * @data: returns a structure abstracting the mode header data
eb44820c 1792 * @sshdr: place to put sense data (or NULL if no sense to be collected).
5baba830
JB
1793 * must be SCSI_SENSE_BUFFERSIZE big.
1794 *
1795 * Returns zero if successful; negative error number or scsi
1796 * status on error
1797 *
1798 */
1799int
1800scsi_mode_select(struct scsi_device *sdev, int pf, int sp, int modepage,
1801 unsigned char *buffer, int len, int timeout, int retries,
1802 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
1803{
1804 unsigned char cmd[10];
1805 unsigned char *real_buffer;
1806 int ret;
1807
1808 memset(cmd, 0, sizeof(cmd));
1809 cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0);
1810
1811 if (sdev->use_10_for_ms) {
1812 if (len > 65535)
1813 return -EINVAL;
1814 real_buffer = kmalloc(8 + len, GFP_KERNEL);
1815 if (!real_buffer)
1816 return -ENOMEM;
1817 memcpy(real_buffer + 8, buffer, len);
1818 len += 8;
1819 real_buffer[0] = 0;
1820 real_buffer[1] = 0;
1821 real_buffer[2] = data->medium_type;
1822 real_buffer[3] = data->device_specific;
1823 real_buffer[4] = data->longlba ? 0x01 : 0;
1824 real_buffer[5] = 0;
1825 real_buffer[6] = data->block_descriptor_length >> 8;
1826 real_buffer[7] = data->block_descriptor_length;
1827
1828 cmd[0] = MODE_SELECT_10;
1829 cmd[7] = len >> 8;
1830 cmd[8] = len;
1831 } else {
1832 if (len > 255 || data->block_descriptor_length > 255 ||
1833 data->longlba)
1834 return -EINVAL;
1835
1836 real_buffer = kmalloc(4 + len, GFP_KERNEL);
1837 if (!real_buffer)
1838 return -ENOMEM;
1839 memcpy(real_buffer + 4, buffer, len);
1840 len += 4;
1841 real_buffer[0] = 0;
1842 real_buffer[1] = data->medium_type;
1843 real_buffer[2] = data->device_specific;
1844 real_buffer[3] = data->block_descriptor_length;
1845
1846
1847 cmd[0] = MODE_SELECT;
1848 cmd[4] = len;
1849 }
1850
1851 ret = scsi_execute_req(sdev, cmd, DMA_TO_DEVICE, real_buffer, len,
f4f4e47e 1852 sshdr, timeout, retries, NULL);
5baba830
JB
1853 kfree(real_buffer);
1854 return ret;
1855}
1856EXPORT_SYMBOL_GPL(scsi_mode_select);
1857
1da177e4 1858/**
eb44820c 1859 * scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary.
1cf72699 1860 * @sdev: SCSI device to be queried
1da177e4
LT
1861 * @dbd: set if mode sense will allow block descriptors to be returned
1862 * @modepage: mode page being requested
1863 * @buffer: request buffer (may not be smaller than eight bytes)
1864 * @len: length of request buffer.
1865 * @timeout: command timeout
1866 * @retries: number of retries before failing
1867 * @data: returns a structure abstracting the mode header data
eb44820c 1868 * @sshdr: place to put sense data (or NULL if no sense to be collected).
1cf72699 1869 * must be SCSI_SENSE_BUFFERSIZE big.
1da177e4
LT
1870 *
1871 * Returns zero if unsuccessful, or the header offset (either 4
1872 * or 8 depending on whether a six or ten byte command was
1873 * issued) if successful.
eb44820c 1874 */
1da177e4 1875int
1cf72699 1876scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage,
1da177e4 1877 unsigned char *buffer, int len, int timeout, int retries,
5baba830
JB
1878 struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr)
1879{
1da177e4
LT
1880 unsigned char cmd[12];
1881 int use_10_for_ms;
1882 int header_length;
1cf72699 1883 int result;
ea73a9f2 1884 struct scsi_sense_hdr my_sshdr;
1da177e4
LT
1885
1886 memset(data, 0, sizeof(*data));
1887 memset(&cmd[0], 0, 12);
1888 cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */
1889 cmd[2] = modepage;
1890
ea73a9f2
JB
1891 /* caller might not be interested in sense, but we need it */
1892 if (!sshdr)
1893 sshdr = &my_sshdr;
1894
1da177e4 1895 retry:
1cf72699 1896 use_10_for_ms = sdev->use_10_for_ms;
1da177e4
LT
1897
1898 if (use_10_for_ms) {
1899 if (len < 8)
1900 len = 8;
1901
1902 cmd[0] = MODE_SENSE_10;
1903 cmd[8] = len;
1904 header_length = 8;
1905 } else {
1906 if (len < 4)
1907 len = 4;
1908
1909 cmd[0] = MODE_SENSE;
1910 cmd[4] = len;
1911 header_length = 4;
1912 }
1913
1da177e4
LT
1914 memset(buffer, 0, len);
1915
1cf72699 1916 result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer, len,
f4f4e47e 1917 sshdr, timeout, retries, NULL);
1da177e4
LT
1918
1919 /* This code looks awful: what it's doing is making sure an
1920 * ILLEGAL REQUEST sense return identifies the actual command
1921 * byte as the problem. MODE_SENSE commands can return
1922 * ILLEGAL REQUEST if the code page isn't supported */
1923
1cf72699
JB
1924 if (use_10_for_ms && !scsi_status_is_good(result) &&
1925 (driver_byte(result) & DRIVER_SENSE)) {
ea73a9f2
JB
1926 if (scsi_sense_valid(sshdr)) {
1927 if ((sshdr->sense_key == ILLEGAL_REQUEST) &&
1928 (sshdr->asc == 0x20) && (sshdr->ascq == 0)) {
1da177e4
LT
1929 /*
1930 * Invalid command operation code
1931 */
1cf72699 1932 sdev->use_10_for_ms = 0;
1da177e4
LT
1933 goto retry;
1934 }
1935 }
1936 }
1937
1cf72699 1938 if(scsi_status_is_good(result)) {
6d73c851
AV
1939 if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b &&
1940 (modepage == 6 || modepage == 8))) {
1941 /* Initio breakage? */
1942 header_length = 0;
1943 data->length = 13;
1944 data->medium_type = 0;
1945 data->device_specific = 0;
1946 data->longlba = 0;
1947 data->block_descriptor_length = 0;
1948 } else if(use_10_for_ms) {
1da177e4
LT
1949 data->length = buffer[0]*256 + buffer[1] + 2;
1950 data->medium_type = buffer[2];
1951 data->device_specific = buffer[3];
1952 data->longlba = buffer[4] & 0x01;
1953 data->block_descriptor_length = buffer[6]*256
1954 + buffer[7];
1955 } else {
1956 data->length = buffer[0] + 1;
1957 data->medium_type = buffer[1];
1958 data->device_specific = buffer[2];
1959 data->block_descriptor_length = buffer[3];
1960 }
6d73c851 1961 data->header_length = header_length;
1da177e4
LT
1962 }
1963
1cf72699 1964 return result;
1da177e4
LT
1965}
1966EXPORT_SYMBOL(scsi_mode_sense);
1967
001aac25
JB
1968/**
1969 * scsi_test_unit_ready - test if unit is ready
1970 * @sdev: scsi device to change the state of.
1971 * @timeout: command timeout
1972 * @retries: number of retries before failing
1973 * @sshdr_external: Optional pointer to struct scsi_sense_hdr for
1974 * returning sense. Make sure that this is cleared before passing
1975 * in.
1976 *
1977 * Returns zero if unsuccessful or an error if TUR failed. For
1978 * removable media, a return of NOT_READY or UNIT_ATTENTION is
1979 * translated to success, with the ->changed flag updated.
1980 **/
1da177e4 1981int
001aac25
JB
1982scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries,
1983 struct scsi_sense_hdr *sshdr_external)
1da177e4 1984{
1da177e4
LT
1985 char cmd[] = {
1986 TEST_UNIT_READY, 0, 0, 0, 0, 0,
1987 };
001aac25 1988 struct scsi_sense_hdr *sshdr;
1da177e4 1989 int result;
001aac25
JB
1990
1991 if (!sshdr_external)
1992 sshdr = kzalloc(sizeof(*sshdr), GFP_KERNEL);
1993 else
1994 sshdr = sshdr_external;
1995
1996 /* try to eat the UNIT_ATTENTION if there are enough retries */
1997 do {
1998 result = scsi_execute_req(sdev, cmd, DMA_NONE, NULL, 0, sshdr,
f4f4e47e 1999 timeout, retries, NULL);
32c356d7
JB
2000 if (sdev->removable && scsi_sense_valid(sshdr) &&
2001 sshdr->sense_key == UNIT_ATTENTION)
2002 sdev->changed = 1;
2003 } while (scsi_sense_valid(sshdr) &&
2004 sshdr->sense_key == UNIT_ATTENTION && --retries);
001aac25
JB
2005
2006 if (!sshdr)
2007 /* could not allocate sense buffer, so can't process it */
2008 return result;
1da177e4 2009
32c356d7
JB
2010 if (sdev->removable && scsi_sense_valid(sshdr) &&
2011 (sshdr->sense_key == UNIT_ATTENTION ||
2012 sshdr->sense_key == NOT_READY)) {
2013 sdev->changed = 1;
2014 result = 0;
1da177e4 2015 }
001aac25
JB
2016 if (!sshdr_external)
2017 kfree(sshdr);
1da177e4
LT
2018 return result;
2019}
2020EXPORT_SYMBOL(scsi_test_unit_ready);
2021
2022/**
eb44820c 2023 * scsi_device_set_state - Take the given device through the device state model.
1da177e4
LT
2024 * @sdev: scsi device to change the state of.
2025 * @state: state to change to.
2026 *
2027 * Returns zero if unsuccessful or an error if the requested
2028 * transition is illegal.
eb44820c 2029 */
1da177e4
LT
2030int
2031scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state)
2032{
2033 enum scsi_device_state oldstate = sdev->sdev_state;
2034
2035 if (state == oldstate)
2036 return 0;
2037
2038 switch (state) {
2039 case SDEV_CREATED:
6f4267e3
JB
2040 switch (oldstate) {
2041 case SDEV_CREATED_BLOCK:
2042 break;
2043 default:
2044 goto illegal;
2045 }
2046 break;
1da177e4
LT
2047
2048 case SDEV_RUNNING:
2049 switch (oldstate) {
2050 case SDEV_CREATED:
2051 case SDEV_OFFLINE:
2052 case SDEV_QUIESCE:
2053 case SDEV_BLOCK:
2054 break;
2055 default:
2056 goto illegal;
2057 }
2058 break;
2059
2060 case SDEV_QUIESCE:
2061 switch (oldstate) {
2062 case SDEV_RUNNING:
2063 case SDEV_OFFLINE:
2064 break;
2065 default:
2066 goto illegal;
2067 }
2068 break;
2069
2070 case SDEV_OFFLINE:
2071 switch (oldstate) {
2072 case SDEV_CREATED:
2073 case SDEV_RUNNING:
2074 case SDEV_QUIESCE:
2075 case SDEV_BLOCK:
2076 break;
2077 default:
2078 goto illegal;
2079 }
2080 break;
2081
2082 case SDEV_BLOCK:
2083 switch (oldstate) {
1da177e4 2084 case SDEV_RUNNING:
6f4267e3
JB
2085 case SDEV_CREATED_BLOCK:
2086 break;
2087 default:
2088 goto illegal;
2089 }
2090 break;
2091
2092 case SDEV_CREATED_BLOCK:
2093 switch (oldstate) {
2094 case SDEV_CREATED:
1da177e4
LT
2095 break;
2096 default:
2097 goto illegal;
2098 }
2099 break;
2100
2101 case SDEV_CANCEL:
2102 switch (oldstate) {
2103 case SDEV_CREATED:
2104 case SDEV_RUNNING:
9ea72909 2105 case SDEV_QUIESCE:
1da177e4
LT
2106 case SDEV_OFFLINE:
2107 case SDEV_BLOCK:
2108 break;
2109 default:
2110 goto illegal;
2111 }
2112 break;
2113
2114 case SDEV_DEL:
2115 switch (oldstate) {
309bd271
BK
2116 case SDEV_CREATED:
2117 case SDEV_RUNNING:
2118 case SDEV_OFFLINE:
1da177e4
LT
2119 case SDEV_CANCEL:
2120 break;
2121 default:
2122 goto illegal;
2123 }
2124 break;
2125
2126 }
2127 sdev->sdev_state = state;
2128 return 0;
2129
2130 illegal:
2131 SCSI_LOG_ERROR_RECOVERY(1,
9ccfc756
JB
2132 sdev_printk(KERN_ERR, sdev,
2133 "Illegal state transition %s->%s\n",
2134 scsi_device_state_name(oldstate),
2135 scsi_device_state_name(state))
1da177e4
LT
2136 );
2137 return -EINVAL;
2138}
2139EXPORT_SYMBOL(scsi_device_set_state);
2140
a341cd0f
JG
2141/**
2142 * sdev_evt_emit - emit a single SCSI device uevent
2143 * @sdev: associated SCSI device
2144 * @evt: event to emit
2145 *
2146 * Send a single uevent (scsi_event) to the associated scsi_device.
2147 */
2148static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt)
2149{
2150 int idx = 0;
2151 char *envp[3];
2152
2153 switch (evt->evt_type) {
2154 case SDEV_EVT_MEDIA_CHANGE:
2155 envp[idx++] = "SDEV_MEDIA_CHANGE=1";
2156 break;
2157
2158 default:
2159 /* do nothing */
2160 break;
2161 }
2162
2163 envp[idx++] = NULL;
2164
2165 kobject_uevent_env(&sdev->sdev_gendev.kobj, KOBJ_CHANGE, envp);
2166}
2167
2168/**
2169 * sdev_evt_thread - send a uevent for each scsi event
2170 * @work: work struct for scsi_device
2171 *
2172 * Dispatch queued events to their associated scsi_device kobjects
2173 * as uevents.
2174 */
2175void scsi_evt_thread(struct work_struct *work)
2176{
2177 struct scsi_device *sdev;
2178 LIST_HEAD(event_list);
2179
2180 sdev = container_of(work, struct scsi_device, event_work);
2181
2182 while (1) {
2183 struct scsi_event *evt;
2184 struct list_head *this, *tmp;
2185 unsigned long flags;
2186
2187 spin_lock_irqsave(&sdev->list_lock, flags);
2188 list_splice_init(&sdev->event_list, &event_list);
2189 spin_unlock_irqrestore(&sdev->list_lock, flags);
2190
2191 if (list_empty(&event_list))
2192 break;
2193
2194 list_for_each_safe(this, tmp, &event_list) {
2195 evt = list_entry(this, struct scsi_event, node);
2196 list_del(&evt->node);
2197 scsi_evt_emit(sdev, evt);
2198 kfree(evt);
2199 }
2200 }
2201}
2202
2203/**
2204 * sdev_evt_send - send asserted event to uevent thread
2205 * @sdev: scsi_device event occurred on
2206 * @evt: event to send
2207 *
2208 * Assert scsi device event asynchronously.
2209 */
2210void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt)
2211{
2212 unsigned long flags;
2213
4d1566ed
KS
2214#if 0
2215 /* FIXME: currently this check eliminates all media change events
2216 * for polled devices. Need to update to discriminate between AN
2217 * and polled events */
a341cd0f
JG
2218 if (!test_bit(evt->evt_type, sdev->supported_events)) {
2219 kfree(evt);
2220 return;
2221 }
4d1566ed 2222#endif
a341cd0f
JG
2223
2224 spin_lock_irqsave(&sdev->list_lock, flags);
2225 list_add_tail(&evt->node, &sdev->event_list);
2226 schedule_work(&sdev->event_work);
2227 spin_unlock_irqrestore(&sdev->list_lock, flags);
2228}
2229EXPORT_SYMBOL_GPL(sdev_evt_send);
2230
2231/**
2232 * sdev_evt_alloc - allocate a new scsi event
2233 * @evt_type: type of event to allocate
2234 * @gfpflags: GFP flags for allocation
2235 *
2236 * Allocates and returns a new scsi_event.
2237 */
2238struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type,
2239 gfp_t gfpflags)
2240{
2241 struct scsi_event *evt = kzalloc(sizeof(struct scsi_event), gfpflags);
2242 if (!evt)
2243 return NULL;
2244
2245 evt->evt_type = evt_type;
2246 INIT_LIST_HEAD(&evt->node);
2247
2248 /* evt_type-specific initialization, if any */
2249 switch (evt_type) {
2250 case SDEV_EVT_MEDIA_CHANGE:
2251 default:
2252 /* do nothing */
2253 break;
2254 }
2255
2256 return evt;
2257}
2258EXPORT_SYMBOL_GPL(sdev_evt_alloc);
2259
2260/**
2261 * sdev_evt_send_simple - send asserted event to uevent thread
2262 * @sdev: scsi_device event occurred on
2263 * @evt_type: type of event to send
2264 * @gfpflags: GFP flags for allocation
2265 *
2266 * Assert scsi device event asynchronously, given an event type.
2267 */
2268void sdev_evt_send_simple(struct scsi_device *sdev,
2269 enum scsi_device_event evt_type, gfp_t gfpflags)
2270{
2271 struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags);
2272 if (!evt) {
2273 sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n",
2274 evt_type);
2275 return;
2276 }
2277
2278 sdev_evt_send(sdev, evt);
2279}
2280EXPORT_SYMBOL_GPL(sdev_evt_send_simple);
2281
1da177e4
LT
2282/**
2283 * scsi_device_quiesce - Block user issued commands.
2284 * @sdev: scsi device to quiesce.
2285 *
2286 * This works by trying to transition to the SDEV_QUIESCE state
2287 * (which must be a legal transition). When the device is in this
2288 * state, only special requests will be accepted, all others will
2289 * be deferred. Since special requests may also be requeued requests,
2290 * a successful return doesn't guarantee the device will be
2291 * totally quiescent.
2292 *
2293 * Must be called with user context, may sleep.
2294 *
2295 * Returns zero if unsuccessful or an error if not.
eb44820c 2296 */
1da177e4
LT
2297int
2298scsi_device_quiesce(struct scsi_device *sdev)
2299{
2300 int err = scsi_device_set_state(sdev, SDEV_QUIESCE);
2301 if (err)
2302 return err;
2303
2304 scsi_run_queue(sdev->request_queue);
2305 while (sdev->device_busy) {
2306 msleep_interruptible(200);
2307 scsi_run_queue(sdev->request_queue);
2308 }
2309 return 0;
2310}
2311EXPORT_SYMBOL(scsi_device_quiesce);
2312
2313/**
2314 * scsi_device_resume - Restart user issued commands to a quiesced device.
2315 * @sdev: scsi device to resume.
2316 *
2317 * Moves the device from quiesced back to running and restarts the
2318 * queues.
2319 *
2320 * Must be called with user context, may sleep.
eb44820c 2321 */
1da177e4
LT
2322void
2323scsi_device_resume(struct scsi_device *sdev)
2324{
2325 if(scsi_device_set_state(sdev, SDEV_RUNNING))
2326 return;
2327 scsi_run_queue(sdev->request_queue);
2328}
2329EXPORT_SYMBOL(scsi_device_resume);
2330
2331static void
2332device_quiesce_fn(struct scsi_device *sdev, void *data)
2333{
2334 scsi_device_quiesce(sdev);
2335}
2336
2337void
2338scsi_target_quiesce(struct scsi_target *starget)
2339{
2340 starget_for_each_device(starget, NULL, device_quiesce_fn);
2341}
2342EXPORT_SYMBOL(scsi_target_quiesce);
2343
2344static void
2345device_resume_fn(struct scsi_device *sdev, void *data)
2346{
2347 scsi_device_resume(sdev);
2348}
2349
2350void
2351scsi_target_resume(struct scsi_target *starget)
2352{
2353 starget_for_each_device(starget, NULL, device_resume_fn);
2354}
2355EXPORT_SYMBOL(scsi_target_resume);
2356
2357/**
eb44820c 2358 * scsi_internal_device_block - internal function to put a device temporarily into the SDEV_BLOCK state
1da177e4
LT
2359 * @sdev: device to block
2360 *
2361 * Block request made by scsi lld's to temporarily stop all
2362 * scsi commands on the specified device. Called from interrupt
2363 * or normal process context.
2364 *
2365 * Returns zero if successful or error if not
2366 *
2367 * Notes:
2368 * This routine transitions the device to the SDEV_BLOCK state
2369 * (which must be a legal transition). When the device is in this
2370 * state, all commands are deferred until the scsi lld reenables
2371 * the device with scsi_device_unblock or device_block_tmo fires.
2372 * This routine assumes the host_lock is held on entry.
eb44820c 2373 */
1da177e4
LT
2374int
2375scsi_internal_device_block(struct scsi_device *sdev)
2376{
165125e1 2377 struct request_queue *q = sdev->request_queue;
1da177e4
LT
2378 unsigned long flags;
2379 int err = 0;
2380
2381 err = scsi_device_set_state(sdev, SDEV_BLOCK);
6f4267e3
JB
2382 if (err) {
2383 err = scsi_device_set_state(sdev, SDEV_CREATED_BLOCK);
2384
2385 if (err)
2386 return err;
2387 }
1da177e4
LT
2388
2389 /*
2390 * The device has transitioned to SDEV_BLOCK. Stop the
2391 * block layer from calling the midlayer with this device's
2392 * request queue.
2393 */
2394 spin_lock_irqsave(q->queue_lock, flags);
2395 blk_stop_queue(q);
2396 spin_unlock_irqrestore(q->queue_lock, flags);
2397
2398 return 0;
2399}
2400EXPORT_SYMBOL_GPL(scsi_internal_device_block);
2401
2402/**
2403 * scsi_internal_device_unblock - resume a device after a block request
2404 * @sdev: device to resume
2405 *
2406 * Called by scsi lld's or the midlayer to restart the device queue
2407 * for the previously suspended scsi device. Called from interrupt or
2408 * normal process context.
2409 *
2410 * Returns zero if successful or error if not.
2411 *
2412 * Notes:
2413 * This routine transitions the device to the SDEV_RUNNING state
2414 * (which must be a legal transition) allowing the midlayer to
2415 * goose the queue for this device. This routine assumes the
2416 * host_lock is held upon entry.
eb44820c 2417 */
1da177e4
LT
2418int
2419scsi_internal_device_unblock(struct scsi_device *sdev)
2420{
165125e1 2421 struct request_queue *q = sdev->request_queue;
1da177e4
LT
2422 unsigned long flags;
2423
2424 /*
2425 * Try to transition the scsi device to SDEV_RUNNING
2426 * and goose the device queue if successful.
2427 */
5c10e63c
TY
2428 if (sdev->sdev_state == SDEV_BLOCK)
2429 sdev->sdev_state = SDEV_RUNNING;
2430 else if (sdev->sdev_state == SDEV_CREATED_BLOCK)
2431 sdev->sdev_state = SDEV_CREATED;
2432 else
2433 return -EINVAL;
1da177e4
LT
2434
2435 spin_lock_irqsave(q->queue_lock, flags);
2436 blk_start_queue(q);
2437 spin_unlock_irqrestore(q->queue_lock, flags);
2438
2439 return 0;
2440}
2441EXPORT_SYMBOL_GPL(scsi_internal_device_unblock);
2442
2443static void
2444device_block(struct scsi_device *sdev, void *data)
2445{
2446 scsi_internal_device_block(sdev);
2447}
2448
2449static int
2450target_block(struct device *dev, void *data)
2451{
2452 if (scsi_is_target_device(dev))
2453 starget_for_each_device(to_scsi_target(dev), NULL,
2454 device_block);
2455 return 0;
2456}
2457
2458void
2459scsi_target_block(struct device *dev)
2460{
2461 if (scsi_is_target_device(dev))
2462 starget_for_each_device(to_scsi_target(dev), NULL,
2463 device_block);
2464 else
2465 device_for_each_child(dev, NULL, target_block);
2466}
2467EXPORT_SYMBOL_GPL(scsi_target_block);
2468
2469static void
2470device_unblock(struct scsi_device *sdev, void *data)
2471{
2472 scsi_internal_device_unblock(sdev);
2473}
2474
2475static int
2476target_unblock(struct device *dev, void *data)
2477{
2478 if (scsi_is_target_device(dev))
2479 starget_for_each_device(to_scsi_target(dev), NULL,
2480 device_unblock);
2481 return 0;
2482}
2483
2484void
2485scsi_target_unblock(struct device *dev)
2486{
2487 if (scsi_is_target_device(dev))
2488 starget_for_each_device(to_scsi_target(dev), NULL,
2489 device_unblock);
2490 else
2491 device_for_each_child(dev, NULL, target_unblock);
2492}
2493EXPORT_SYMBOL_GPL(scsi_target_unblock);
cdb8c2a6
GL
2494
2495/**
2496 * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt
eb44820c 2497 * @sgl: scatter-gather list
cdb8c2a6
GL
2498 * @sg_count: number of segments in sg
2499 * @offset: offset in bytes into sg, on return offset into the mapped area
2500 * @len: bytes to map, on return number of bytes mapped
2501 *
2502 * Returns virtual address of the start of the mapped page
2503 */
c6132da1 2504void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count,
cdb8c2a6
GL
2505 size_t *offset, size_t *len)
2506{
2507 int i;
2508 size_t sg_len = 0, len_complete = 0;
c6132da1 2509 struct scatterlist *sg;
cdb8c2a6
GL
2510 struct page *page;
2511
22cfefb5
AM
2512 WARN_ON(!irqs_disabled());
2513
c6132da1 2514 for_each_sg(sgl, sg, sg_count, i) {
cdb8c2a6 2515 len_complete = sg_len; /* Complete sg-entries */
c6132da1 2516 sg_len += sg->length;
cdb8c2a6
GL
2517 if (sg_len > *offset)
2518 break;
2519 }
2520
2521 if (unlikely(i == sg_count)) {
169e1a2a
AM
2522 printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, "
2523 "elements %d\n",
cadbd4a5 2524 __func__, sg_len, *offset, sg_count);
cdb8c2a6
GL
2525 WARN_ON(1);
2526 return NULL;
2527 }
2528
2529 /* Offset starting from the beginning of first page in this sg-entry */
c6132da1 2530 *offset = *offset - len_complete + sg->offset;
cdb8c2a6
GL
2531
2532 /* Assumption: contiguous pages can be accessed as "page + i" */
45711f1a 2533 page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT));
cdb8c2a6
GL
2534 *offset &= ~PAGE_MASK;
2535
2536 /* Bytes in this sg-entry from *offset to the end of the page */
2537 sg_len = PAGE_SIZE - *offset;
2538 if (*len > sg_len)
2539 *len = sg_len;
2540
2541 return kmap_atomic(page, KM_BIO_SRC_IRQ);
2542}
2543EXPORT_SYMBOL(scsi_kmap_atomic_sg);
2544
2545/**
eb44820c 2546 * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg
cdb8c2a6
GL
2547 * @virt: virtual address to be unmapped
2548 */
2549void scsi_kunmap_atomic_sg(void *virt)
2550{
2551 kunmap_atomic(virt, KM_BIO_SRC_IRQ);
2552}
2553EXPORT_SYMBOL(scsi_kunmap_atomic_sg);