]>
Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * scsi_error.c Copyright (C) 1997 Eric Youngdale | |
3 | * | |
4 | * SCSI error/timeout handling | |
5 | * Initial versions: Eric Youngdale. Based upon conversations with | |
6 | * Leonard Zubkoff and David Miller at Linux Expo, | |
7 | * ideas originating from all over the place. | |
8 | * | |
9 | * Restructured scsi_unjam_host and associated functions. | |
10 | * September 04, 2002 Mike Anderson (andmike@us.ibm.com) | |
11 | * | |
12 | * Forward port of Russell King's (rmk@arm.linux.org.uk) changes and | |
13 | * minor cleanups. | |
14 | * September 30, 2002 Mike Anderson (andmike@us.ibm.com) | |
15 | */ | |
16 | ||
17 | #include <linux/module.h> | |
18 | #include <linux/sched.h> | |
19 | #include <linux/timer.h> | |
20 | #include <linux/string.h> | |
21 | #include <linux/slab.h> | |
22 | #include <linux/kernel.h> | |
c5478def | 23 | #include <linux/kthread.h> |
1da177e4 LT |
24 | #include <linux/interrupt.h> |
25 | #include <linux/blkdev.h> | |
26 | #include <linux/delay.h> | |
27 | ||
28 | #include <scsi/scsi.h> | |
29 | #include <scsi/scsi_dbg.h> | |
30 | #include <scsi/scsi_device.h> | |
31 | #include <scsi/scsi_eh.h> | |
32 | #include <scsi/scsi_host.h> | |
33 | #include <scsi/scsi_ioctl.h> | |
34 | #include <scsi/scsi_request.h> | |
35 | ||
36 | #include "scsi_priv.h" | |
37 | #include "scsi_logging.h" | |
38 | ||
39 | #define SENSE_TIMEOUT (10*HZ) | |
40 | #define START_UNIT_TIMEOUT (30*HZ) | |
41 | ||
42 | /* | |
43 | * These should *probably* be handled by the host itself. | |
44 | * Since it is allowed to sleep, it probably should. | |
45 | */ | |
46 | #define BUS_RESET_SETTLE_TIME (10) | |
47 | #define HOST_RESET_SETTLE_TIME (10) | |
48 | ||
49 | /* called with shost->host_lock held */ | |
50 | void scsi_eh_wakeup(struct Scsi_Host *shost) | |
51 | { | |
52 | if (shost->host_busy == shost->host_failed) { | |
3ed7a470 | 53 | wake_up_process(shost->ehandler); |
1da177e4 LT |
54 | SCSI_LOG_ERROR_RECOVERY(5, |
55 | printk("Waking error handler thread\n")); | |
56 | } | |
57 | } | |
58 | ||
59 | /** | |
60 | * scsi_eh_scmd_add - add scsi cmd to error handling. | |
61 | * @scmd: scmd to run eh on. | |
62 | * @eh_flag: optional SCSI_EH flag. | |
63 | * | |
64 | * Return value: | |
65 | * 0 on failure. | |
66 | **/ | |
67 | int scsi_eh_scmd_add(struct scsi_cmnd *scmd, int eh_flag) | |
68 | { | |
69 | struct Scsi_Host *shost = scmd->device->host; | |
70 | unsigned long flags; | |
939647ee | 71 | int ret = 0; |
1da177e4 | 72 | |
3ed7a470 | 73 | if (!shost->ehandler) |
1da177e4 LT |
74 | return 0; |
75 | ||
76 | spin_lock_irqsave(shost->host_lock, flags); | |
939647ee JB |
77 | if (scsi_host_set_state(shost, SHOST_RECOVERY)) |
78 | if (scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY)) | |
79 | goto out_unlock; | |
1da177e4 | 80 | |
939647ee | 81 | ret = 1; |
3111b0d1 | 82 | scmd->eh_eflags |= eh_flag; |
1da177e4 | 83 | list_add_tail(&scmd->eh_entry, &shost->eh_cmd_q); |
1da177e4 LT |
84 | shost->host_failed++; |
85 | scsi_eh_wakeup(shost); | |
939647ee | 86 | out_unlock: |
1da177e4 | 87 | spin_unlock_irqrestore(shost->host_lock, flags); |
939647ee | 88 | return ret; |
1da177e4 LT |
89 | } |
90 | ||
91 | /** | |
92 | * scsi_add_timer - Start timeout timer for a single scsi command. | |
93 | * @scmd: scsi command that is about to start running. | |
94 | * @timeout: amount of time to allow this command to run. | |
95 | * @complete: timeout function to call if timer isn't canceled. | |
96 | * | |
97 | * Notes: | |
98 | * This should be turned into an inline function. Each scsi command | |
99 | * has its own timer, and as it is added to the queue, we set up the | |
100 | * timer. When the command completes, we cancel the timer. | |
101 | **/ | |
102 | void scsi_add_timer(struct scsi_cmnd *scmd, int timeout, | |
103 | void (*complete)(struct scsi_cmnd *)) | |
104 | { | |
105 | ||
106 | /* | |
107 | * If the clock was already running for this command, then | |
108 | * first delete the timer. The timer handling code gets rather | |
109 | * confused if we don't do this. | |
110 | */ | |
111 | if (scmd->eh_timeout.function) | |
112 | del_timer(&scmd->eh_timeout); | |
113 | ||
114 | scmd->eh_timeout.data = (unsigned long)scmd; | |
115 | scmd->eh_timeout.expires = jiffies + timeout; | |
116 | scmd->eh_timeout.function = (void (*)(unsigned long)) complete; | |
117 | ||
118 | SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p, time:" | |
119 | " %d, (%p)\n", __FUNCTION__, | |
120 | scmd, timeout, complete)); | |
121 | ||
122 | add_timer(&scmd->eh_timeout); | |
123 | } | |
1da177e4 LT |
124 | |
125 | /** | |
126 | * scsi_delete_timer - Delete/cancel timer for a given function. | |
127 | * @scmd: Cmd that we are canceling timer for | |
128 | * | |
129 | * Notes: | |
130 | * This should be turned into an inline function. | |
131 | * | |
132 | * Return value: | |
133 | * 1 if we were able to detach the timer. 0 if we blew it, and the | |
134 | * timer function has already started to run. | |
135 | **/ | |
136 | int scsi_delete_timer(struct scsi_cmnd *scmd) | |
137 | { | |
138 | int rtn; | |
139 | ||
140 | rtn = del_timer(&scmd->eh_timeout); | |
141 | ||
142 | SCSI_LOG_ERROR_RECOVERY(5, printk("%s: scmd: %p," | |
143 | " rtn: %d\n", __FUNCTION__, | |
144 | scmd, rtn)); | |
145 | ||
146 | scmd->eh_timeout.data = (unsigned long)NULL; | |
147 | scmd->eh_timeout.function = NULL; | |
148 | ||
149 | return rtn; | |
150 | } | |
1da177e4 LT |
151 | |
152 | /** | |
153 | * scsi_times_out - Timeout function for normal scsi commands. | |
154 | * @scmd: Cmd that is timing out. | |
155 | * | |
156 | * Notes: | |
157 | * We do not need to lock this. There is the potential for a race | |
158 | * only in that the normal completion handling might run, but if the | |
159 | * normal completion function determines that the timer has already | |
160 | * fired, then it mustn't do anything. | |
161 | **/ | |
162 | void scsi_times_out(struct scsi_cmnd *scmd) | |
163 | { | |
164 | scsi_log_completion(scmd, TIMEOUT_ERROR); | |
165 | ||
166 | if (scmd->device->host->hostt->eh_timed_out) | |
167 | switch (scmd->device->host->hostt->eh_timed_out(scmd)) { | |
168 | case EH_HANDLED: | |
169 | __scsi_done(scmd); | |
170 | return; | |
171 | case EH_RESET_TIMER: | |
172 | /* This allows a single retry even of a command | |
173 | * with allowed == 0 */ | |
174 | if (scmd->retries++ > scmd->allowed) | |
175 | break; | |
176 | scsi_add_timer(scmd, scmd->timeout_per_command, | |
177 | scsi_times_out); | |
178 | return; | |
179 | case EH_NOT_HANDLED: | |
180 | break; | |
181 | } | |
182 | ||
183 | if (unlikely(!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) { | |
939647ee JB |
184 | scmd->result |= DID_TIME_OUT << 16; |
185 | __scsi_done(scmd); | |
1da177e4 LT |
186 | } |
187 | } | |
188 | ||
189 | /** | |
190 | * scsi_block_when_processing_errors - Prevent cmds from being queued. | |
191 | * @sdev: Device on which we are performing recovery. | |
192 | * | |
193 | * Description: | |
194 | * We block until the host is out of error recovery, and then check to | |
195 | * see whether the host or the device is offline. | |
196 | * | |
197 | * Return value: | |
198 | * 0 when dev was taken offline by error recovery. 1 OK to proceed. | |
199 | **/ | |
200 | int scsi_block_when_processing_errors(struct scsi_device *sdev) | |
201 | { | |
202 | int online; | |
203 | ||
939647ee | 204 | wait_event(sdev->host->host_wait, !scsi_host_in_recovery(sdev->host)); |
1da177e4 LT |
205 | |
206 | online = scsi_device_online(sdev); | |
207 | ||
208 | SCSI_LOG_ERROR_RECOVERY(5, printk("%s: rtn: %d\n", __FUNCTION__, | |
209 | online)); | |
210 | ||
211 | return online; | |
212 | } | |
213 | EXPORT_SYMBOL(scsi_block_when_processing_errors); | |
214 | ||
215 | #ifdef CONFIG_SCSI_LOGGING | |
216 | /** | |
217 | * scsi_eh_prt_fail_stats - Log info on failures. | |
218 | * @shost: scsi host being recovered. | |
219 | * @work_q: Queue of scsi cmds to process. | |
220 | **/ | |
221 | static inline void scsi_eh_prt_fail_stats(struct Scsi_Host *shost, | |
222 | struct list_head *work_q) | |
223 | { | |
224 | struct scsi_cmnd *scmd; | |
225 | struct scsi_device *sdev; | |
226 | int total_failures = 0; | |
227 | int cmd_failed = 0; | |
228 | int cmd_cancel = 0; | |
229 | int devices_failed = 0; | |
230 | ||
231 | shost_for_each_device(sdev, shost) { | |
232 | list_for_each_entry(scmd, work_q, eh_entry) { | |
233 | if (scmd->device == sdev) { | |
234 | ++total_failures; | |
3111b0d1 | 235 | if (scmd->eh_eflags & SCSI_EH_CANCEL_CMD) |
1da177e4 LT |
236 | ++cmd_cancel; |
237 | else | |
238 | ++cmd_failed; | |
239 | } | |
240 | } | |
241 | ||
242 | if (cmd_cancel || cmd_failed) { | |
243 | SCSI_LOG_ERROR_RECOVERY(3, | |
9ccfc756 JB |
244 | sdev_printk(KERN_INFO, sdev, |
245 | "%s: cmds failed: %d, cancel: %d\n", | |
246 | __FUNCTION__, cmd_failed, | |
247 | cmd_cancel)); | |
1da177e4 LT |
248 | cmd_cancel = 0; |
249 | cmd_failed = 0; | |
250 | ++devices_failed; | |
251 | } | |
252 | } | |
253 | ||
254 | SCSI_LOG_ERROR_RECOVERY(2, printk("Total of %d commands on %d" | |
255 | " devices require eh work\n", | |
256 | total_failures, devices_failed)); | |
257 | } | |
258 | #endif | |
259 | ||
260 | /** | |
261 | * scsi_check_sense - Examine scsi cmd sense | |
262 | * @scmd: Cmd to have sense checked. | |
263 | * | |
264 | * Return value: | |
265 | * SUCCESS or FAILED or NEEDS_RETRY | |
266 | * | |
267 | * Notes: | |
268 | * When a deferred error is detected the current command has | |
269 | * not been executed and needs retrying. | |
270 | **/ | |
271 | static int scsi_check_sense(struct scsi_cmnd *scmd) | |
272 | { | |
273 | struct scsi_sense_hdr sshdr; | |
274 | ||
275 | if (! scsi_command_normalize_sense(scmd, &sshdr)) | |
276 | return FAILED; /* no valid sense data */ | |
277 | ||
278 | if (scsi_sense_is_deferred(&sshdr)) | |
279 | return NEEDS_RETRY; | |
280 | ||
281 | /* | |
282 | * Previous logic looked for FILEMARK, EOM or ILI which are | |
283 | * mainly associated with tapes and returned SUCCESS. | |
284 | */ | |
285 | if (sshdr.response_code == 0x70) { | |
286 | /* fixed format */ | |
287 | if (scmd->sense_buffer[2] & 0xe0) | |
288 | return SUCCESS; | |
289 | } else { | |
290 | /* | |
291 | * descriptor format: look for "stream commands sense data | |
292 | * descriptor" (see SSC-3). Assume single sense data | |
293 | * descriptor. Ignore ILI from SBC-2 READ LONG and WRITE LONG. | |
294 | */ | |
295 | if ((sshdr.additional_length > 3) && | |
296 | (scmd->sense_buffer[8] == 0x4) && | |
297 | (scmd->sense_buffer[11] & 0xe0)) | |
298 | return SUCCESS; | |
299 | } | |
300 | ||
301 | switch (sshdr.sense_key) { | |
302 | case NO_SENSE: | |
303 | return SUCCESS; | |
304 | case RECOVERED_ERROR: | |
305 | return /* soft_error */ SUCCESS; | |
306 | ||
307 | case ABORTED_COMMAND: | |
308 | return NEEDS_RETRY; | |
309 | case NOT_READY: | |
310 | case UNIT_ATTENTION: | |
311 | /* | |
312 | * if we are expecting a cc/ua because of a bus reset that we | |
313 | * performed, treat this just as a retry. otherwise this is | |
314 | * information that we should pass up to the upper-level driver | |
315 | * so that we can deal with it there. | |
316 | */ | |
317 | if (scmd->device->expecting_cc_ua) { | |
318 | scmd->device->expecting_cc_ua = 0; | |
319 | return NEEDS_RETRY; | |
320 | } | |
321 | /* | |
322 | * if the device is in the process of becoming ready, we | |
323 | * should retry. | |
324 | */ | |
325 | if ((sshdr.asc == 0x04) && (sshdr.ascq == 0x01)) | |
326 | return NEEDS_RETRY; | |
327 | /* | |
328 | * if the device is not started, we need to wake | |
329 | * the error handler to start the motor | |
330 | */ | |
331 | if (scmd->device->allow_restart && | |
332 | (sshdr.asc == 0x04) && (sshdr.ascq == 0x02)) | |
333 | return FAILED; | |
334 | return SUCCESS; | |
335 | ||
336 | /* these three are not supported */ | |
337 | case COPY_ABORTED: | |
338 | case VOLUME_OVERFLOW: | |
339 | case MISCOMPARE: | |
340 | return SUCCESS; | |
341 | ||
342 | case MEDIUM_ERROR: | |
343 | return NEEDS_RETRY; | |
344 | ||
345 | case HARDWARE_ERROR: | |
346 | if (scmd->device->retry_hwerror) | |
347 | return NEEDS_RETRY; | |
348 | else | |
349 | return SUCCESS; | |
350 | ||
351 | case ILLEGAL_REQUEST: | |
352 | case BLANK_CHECK: | |
353 | case DATA_PROTECT: | |
354 | default: | |
355 | return SUCCESS; | |
356 | } | |
357 | } | |
358 | ||
359 | /** | |
360 | * scsi_eh_completed_normally - Disposition a eh cmd on return from LLD. | |
361 | * @scmd: SCSI cmd to examine. | |
362 | * | |
363 | * Notes: | |
364 | * This is *only* called when we are examining the status of commands | |
365 | * queued during error recovery. the main difference here is that we | |
366 | * don't allow for the possibility of retries here, and we are a lot | |
367 | * more restrictive about what we consider acceptable. | |
368 | **/ | |
369 | static int scsi_eh_completed_normally(struct scsi_cmnd *scmd) | |
370 | { | |
371 | /* | |
372 | * first check the host byte, to see if there is anything in there | |
373 | * that would indicate what we need to do. | |
374 | */ | |
375 | if (host_byte(scmd->result) == DID_RESET) { | |
376 | /* | |
377 | * rats. we are already in the error handler, so we now | |
378 | * get to try and figure out what to do next. if the sense | |
379 | * is valid, we have a pretty good idea of what to do. | |
380 | * if not, we mark it as FAILED. | |
381 | */ | |
382 | return scsi_check_sense(scmd); | |
383 | } | |
384 | if (host_byte(scmd->result) != DID_OK) | |
385 | return FAILED; | |
386 | ||
387 | /* | |
388 | * next, check the message byte. | |
389 | */ | |
390 | if (msg_byte(scmd->result) != COMMAND_COMPLETE) | |
391 | return FAILED; | |
392 | ||
393 | /* | |
394 | * now, check the status byte to see if this indicates | |
395 | * anything special. | |
396 | */ | |
397 | switch (status_byte(scmd->result)) { | |
398 | case GOOD: | |
399 | case COMMAND_TERMINATED: | |
400 | return SUCCESS; | |
401 | case CHECK_CONDITION: | |
402 | return scsi_check_sense(scmd); | |
403 | case CONDITION_GOOD: | |
404 | case INTERMEDIATE_GOOD: | |
405 | case INTERMEDIATE_C_GOOD: | |
406 | /* | |
407 | * who knows? FIXME(eric) | |
408 | */ | |
409 | return SUCCESS; | |
410 | case BUSY: | |
411 | case QUEUE_FULL: | |
412 | case RESERVATION_CONFLICT: | |
413 | default: | |
414 | return FAILED; | |
415 | } | |
416 | return FAILED; | |
417 | } | |
418 | ||
419 | /** | |
420 | * scsi_eh_times_out - timeout function for error handling. | |
421 | * @scmd: Cmd that is timing out. | |
422 | * | |
423 | * Notes: | |
424 | * During error handling, the kernel thread will be sleeping waiting | |
425 | * for some action to complete on the device. our only job is to | |
426 | * record that it timed out, and to wake up the thread. | |
427 | **/ | |
428 | static void scsi_eh_times_out(struct scsi_cmnd *scmd) | |
429 | { | |
3111b0d1 | 430 | scmd->eh_eflags |= SCSI_EH_REC_TIMEOUT; |
1da177e4 LT |
431 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd:%p\n", __FUNCTION__, |
432 | scmd)); | |
433 | ||
5b8ef842 | 434 | up(scmd->device->host->eh_action); |
1da177e4 LT |
435 | } |
436 | ||
437 | /** | |
438 | * scsi_eh_done - Completion function for error handling. | |
439 | * @scmd: Cmd that is done. | |
440 | **/ | |
441 | static void scsi_eh_done(struct scsi_cmnd *scmd) | |
442 | { | |
443 | /* | |
444 | * if the timeout handler is already running, then just set the | |
445 | * flag which says we finished late, and return. we have no | |
446 | * way of stopping the timeout handler from running, so we must | |
447 | * always defer to it. | |
448 | */ | |
449 | if (del_timer(&scmd->eh_timeout)) { | |
450 | scmd->request->rq_status = RQ_SCSI_DONE; | |
1da177e4 LT |
451 | |
452 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s scmd: %p result: %x\n", | |
453 | __FUNCTION__, scmd, scmd->result)); | |
454 | ||
5b8ef842 | 455 | up(scmd->device->host->eh_action); |
1da177e4 LT |
456 | } |
457 | } | |
458 | ||
459 | /** | |
460 | * scsi_send_eh_cmnd - send a cmd to a device as part of error recovery. | |
461 | * @scmd: SCSI Cmd to send. | |
462 | * @timeout: Timeout for cmd. | |
463 | * | |
464 | * Notes: | |
465 | * The initialization of the structures is quite a bit different in | |
466 | * this case, and furthermore, there is a different completion handler | |
467 | * vs scsi_dispatch_cmd. | |
468 | * Return value: | |
469 | * SUCCESS or FAILED or NEEDS_RETRY | |
470 | **/ | |
471 | static int scsi_send_eh_cmnd(struct scsi_cmnd *scmd, int timeout) | |
472 | { | |
f59114b7 TH |
473 | struct scsi_device *sdev = scmd->device; |
474 | struct Scsi_Host *shost = sdev->host; | |
1da177e4 LT |
475 | DECLARE_MUTEX_LOCKED(sem); |
476 | unsigned long flags; | |
477 | int rtn = SUCCESS; | |
478 | ||
479 | /* | |
480 | * we will use a queued command if possible, otherwise we will | |
481 | * emulate the queuing and calling of completion function ourselves. | |
482 | */ | |
f59114b7 | 483 | if (sdev->scsi_level <= SCSI_2) |
1da177e4 | 484 | scmd->cmnd[1] = (scmd->cmnd[1] & 0x1f) | |
f59114b7 | 485 | (sdev->lun << 5 & 0xe0); |
1da177e4 LT |
486 | |
487 | scsi_add_timer(scmd, timeout, scsi_eh_times_out); | |
488 | ||
489 | /* | |
490 | * set up the semaphore so we wait for the command to complete. | |
491 | */ | |
f59114b7 | 492 | shost->eh_action = &sem; |
1da177e4 LT |
493 | scmd->request->rq_status = RQ_SCSI_BUSY; |
494 | ||
f59114b7 | 495 | spin_lock_irqsave(shost->host_lock, flags); |
1da177e4 | 496 | scsi_log_send(scmd); |
f59114b7 TH |
497 | shost->hostt->queuecommand(scmd, scsi_eh_done); |
498 | spin_unlock_irqrestore(shost->host_lock, flags); | |
1da177e4 LT |
499 | |
500 | down(&sem); | |
501 | scsi_log_completion(scmd, SUCCESS); | |
502 | ||
f59114b7 | 503 | shost->eh_action = NULL; |
1da177e4 LT |
504 | |
505 | /* | |
506 | * see if timeout. if so, tell the host to forget about it. | |
507 | * in other words, we don't want a callback any more. | |
508 | */ | |
3111b0d1 CH |
509 | if (scmd->eh_eflags & SCSI_EH_REC_TIMEOUT) { |
510 | scmd->eh_eflags &= ~SCSI_EH_REC_TIMEOUT; | |
1da177e4 LT |
511 | |
512 | /* | |
513 | * as far as the low level driver is | |
514 | * concerned, this command is still active, so | |
515 | * we must give the low level driver a chance | |
516 | * to abort it. (db) | |
517 | * | |
518 | * FIXME(eric) - we are not tracking whether we could | |
519 | * abort a timed out command or not. not sure how | |
520 | * we should treat them differently anyways. | |
521 | */ | |
f59114b7 TH |
522 | if (shost->hostt->eh_abort_handler) |
523 | shost->hostt->eh_abort_handler(scmd); | |
1da177e4 LT |
524 | |
525 | scmd->request->rq_status = RQ_SCSI_DONE; | |
1da177e4 LT |
526 | rtn = FAILED; |
527 | } | |
528 | ||
529 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd: %p, rtn:%x\n", | |
530 | __FUNCTION__, scmd, rtn)); | |
531 | ||
532 | /* | |
533 | * now examine the actual status codes to see whether the command | |
534 | * actually did complete normally. | |
535 | */ | |
536 | if (rtn == SUCCESS) { | |
537 | rtn = scsi_eh_completed_normally(scmd); | |
538 | SCSI_LOG_ERROR_RECOVERY(3, | |
539 | printk("%s: scsi_eh_completed_normally %x\n", | |
540 | __FUNCTION__, rtn)); | |
541 | switch (rtn) { | |
542 | case SUCCESS: | |
543 | case NEEDS_RETRY: | |
544 | case FAILED: | |
545 | break; | |
546 | default: | |
547 | rtn = FAILED; | |
548 | break; | |
549 | } | |
550 | } | |
551 | ||
552 | return rtn; | |
553 | } | |
554 | ||
555 | /** | |
556 | * scsi_request_sense - Request sense data from a particular target. | |
557 | * @scmd: SCSI cmd for request sense. | |
558 | * | |
559 | * Notes: | |
560 | * Some hosts automatically obtain this information, others require | |
561 | * that we obtain it on our own. This function will *not* return until | |
562 | * the command either times out, or it completes. | |
563 | **/ | |
564 | static int scsi_request_sense(struct scsi_cmnd *scmd) | |
565 | { | |
566 | static unsigned char generic_sense[6] = | |
567 | {REQUEST_SENSE, 0, 0, 0, 252, 0}; | |
568 | unsigned char *scsi_result; | |
569 | int saved_result; | |
570 | int rtn; | |
571 | ||
572 | memcpy(scmd->cmnd, generic_sense, sizeof(generic_sense)); | |
573 | ||
bc86120a | 574 | scsi_result = kmalloc(252, GFP_ATOMIC | ((scmd->device->host->hostt->unchecked_isa_dma) ? __GFP_DMA : 0)); |
1da177e4 LT |
575 | |
576 | ||
577 | if (unlikely(!scsi_result)) { | |
578 | printk(KERN_ERR "%s: cannot allocate scsi_result.\n", | |
579 | __FUNCTION__); | |
580 | return FAILED; | |
581 | } | |
582 | ||
583 | /* | |
584 | * zero the sense buffer. some host adapters automatically always | |
585 | * request sense, so it is not a good idea that | |
586 | * scmd->request_buffer and scmd->sense_buffer point to the same | |
587 | * address (db). 0 is not a valid sense code. | |
588 | */ | |
589 | memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer)); | |
590 | memset(scsi_result, 0, 252); | |
591 | ||
592 | saved_result = scmd->result; | |
593 | scmd->request_buffer = scsi_result; | |
594 | scmd->request_bufflen = 252; | |
595 | scmd->use_sg = 0; | |
596 | scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); | |
597 | scmd->sc_data_direction = DMA_FROM_DEVICE; | |
598 | scmd->underflow = 0; | |
599 | ||
600 | rtn = scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT); | |
601 | ||
602 | /* last chance to have valid sense data */ | |
603 | if(!SCSI_SENSE_VALID(scmd)) { | |
604 | memcpy(scmd->sense_buffer, scmd->request_buffer, | |
605 | sizeof(scmd->sense_buffer)); | |
606 | } | |
607 | ||
608 | kfree(scsi_result); | |
609 | ||
610 | /* | |
611 | * when we eventually call scsi_finish, we really wish to complete | |
612 | * the original request, so let's restore the original data. (db) | |
613 | */ | |
614 | scsi_setup_cmd_retry(scmd); | |
615 | scmd->result = saved_result; | |
616 | return rtn; | |
617 | } | |
618 | ||
619 | /** | |
620 | * scsi_eh_finish_cmd - Handle a cmd that eh is finished with. | |
621 | * @scmd: Original SCSI cmd that eh has finished. | |
622 | * @done_q: Queue for processed commands. | |
623 | * | |
624 | * Notes: | |
625 | * We don't want to use the normal command completion while we are are | |
626 | * still handling errors - it may cause other commands to be queued, | |
627 | * and that would disturb what we are doing. thus we really want to | |
628 | * keep a list of pending commands for final completion, and once we | |
629 | * are ready to leave error handling we handle completion for real. | |
630 | **/ | |
631 | static void scsi_eh_finish_cmd(struct scsi_cmnd *scmd, | |
632 | struct list_head *done_q) | |
633 | { | |
634 | scmd->device->host->host_failed--; | |
3111b0d1 | 635 | scmd->eh_eflags = 0; |
1da177e4 LT |
636 | |
637 | /* | |
638 | * set this back so that the upper level can correctly free up | |
639 | * things. | |
640 | */ | |
641 | scsi_setup_cmd_retry(scmd); | |
642 | list_move_tail(&scmd->eh_entry, done_q); | |
643 | } | |
644 | ||
645 | /** | |
646 | * scsi_eh_get_sense - Get device sense data. | |
647 | * @work_q: Queue of commands to process. | |
648 | * @done_q: Queue of proccessed commands.. | |
649 | * | |
650 | * Description: | |
651 | * See if we need to request sense information. if so, then get it | |
652 | * now, so we have a better idea of what to do. | |
653 | * | |
654 | * Notes: | |
655 | * This has the unfortunate side effect that if a shost adapter does | |
656 | * not automatically request sense information, that we end up shutting | |
657 | * it down before we request it. | |
658 | * | |
659 | * All drivers should request sense information internally these days, | |
660 | * so for now all I have to say is tough noogies if you end up in here. | |
661 | * | |
662 | * XXX: Long term this code should go away, but that needs an audit of | |
663 | * all LLDDs first. | |
664 | **/ | |
665 | static int scsi_eh_get_sense(struct list_head *work_q, | |
666 | struct list_head *done_q) | |
667 | { | |
937abeaa | 668 | struct scsi_cmnd *scmd, *next; |
1da177e4 LT |
669 | int rtn; |
670 | ||
937abeaa | 671 | list_for_each_entry_safe(scmd, next, work_q, eh_entry) { |
3111b0d1 | 672 | if ((scmd->eh_eflags & SCSI_EH_CANCEL_CMD) || |
1da177e4 LT |
673 | SCSI_SENSE_VALID(scmd)) |
674 | continue; | |
675 | ||
3bf743e7 JG |
676 | SCSI_LOG_ERROR_RECOVERY(2, scmd_printk(KERN_INFO, scmd, |
677 | "%s: requesting sense\n", | |
678 | current->comm)); | |
1da177e4 LT |
679 | rtn = scsi_request_sense(scmd); |
680 | if (rtn != SUCCESS) | |
681 | continue; | |
682 | ||
683 | SCSI_LOG_ERROR_RECOVERY(3, printk("sense requested for %p" | |
684 | " result %x\n", scmd, | |
685 | scmd->result)); | |
686 | SCSI_LOG_ERROR_RECOVERY(3, scsi_print_sense("bh", scmd)); | |
687 | ||
688 | rtn = scsi_decide_disposition(scmd); | |
689 | ||
690 | /* | |
691 | * if the result was normal, then just pass it along to the | |
692 | * upper level. | |
693 | */ | |
694 | if (rtn == SUCCESS) | |
695 | /* we don't want this command reissued, just | |
696 | * finished with the sense data, so set | |
697 | * retries to the max allowed to ensure it | |
698 | * won't get reissued */ | |
699 | scmd->retries = scmd->allowed; | |
700 | else if (rtn != NEEDS_RETRY) | |
701 | continue; | |
702 | ||
703 | scsi_eh_finish_cmd(scmd, done_q); | |
704 | } | |
705 | ||
706 | return list_empty(work_q); | |
707 | } | |
708 | ||
709 | /** | |
710 | * scsi_try_to_abort_cmd - Ask host to abort a running command. | |
711 | * @scmd: SCSI cmd to abort from Lower Level. | |
712 | * | |
713 | * Notes: | |
714 | * This function will not return until the user's completion function | |
715 | * has been called. there is no timeout on this operation. if the | |
716 | * author of the low-level driver wishes this operation to be timed, | |
717 | * they can provide this facility themselves. helper functions in | |
718 | * scsi_error.c can be supplied to make this easier to do. | |
719 | **/ | |
720 | static int scsi_try_to_abort_cmd(struct scsi_cmnd *scmd) | |
721 | { | |
1da177e4 | 722 | if (!scmd->device->host->hostt->eh_abort_handler) |
8fa728a2 | 723 | return FAILED; |
1da177e4 LT |
724 | |
725 | /* | |
726 | * scsi_done was called just after the command timed out and before | |
727 | * we had a chance to process it. (db) | |
728 | */ | |
729 | if (scmd->serial_number == 0) | |
730 | return SUCCESS; | |
8fa728a2 | 731 | return scmd->device->host->hostt->eh_abort_handler(scmd); |
1da177e4 LT |
732 | } |
733 | ||
734 | /** | |
735 | * scsi_eh_tur - Send TUR to device. | |
736 | * @scmd: Scsi cmd to send TUR | |
737 | * | |
738 | * Return value: | |
739 | * 0 - Device is ready. 1 - Device NOT ready. | |
740 | **/ | |
741 | static int scsi_eh_tur(struct scsi_cmnd *scmd) | |
742 | { | |
743 | static unsigned char tur_command[6] = {TEST_UNIT_READY, 0, 0, 0, 0, 0}; | |
744 | int retry_cnt = 1, rtn; | |
793698ce | 745 | int saved_result; |
1da177e4 LT |
746 | |
747 | retry_tur: | |
748 | memcpy(scmd->cmnd, tur_command, sizeof(tur_command)); | |
749 | ||
750 | /* | |
751 | * zero the sense buffer. the scsi spec mandates that any | |
752 | * untransferred sense data should be interpreted as being zero. | |
753 | */ | |
754 | memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer)); | |
755 | ||
793698ce | 756 | saved_result = scmd->result; |
1da177e4 LT |
757 | scmd->request_buffer = NULL; |
758 | scmd->request_bufflen = 0; | |
759 | scmd->use_sg = 0; | |
760 | scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); | |
761 | scmd->underflow = 0; | |
762 | scmd->sc_data_direction = DMA_NONE; | |
763 | ||
764 | rtn = scsi_send_eh_cmnd(scmd, SENSE_TIMEOUT); | |
765 | ||
766 | /* | |
767 | * when we eventually call scsi_finish, we really wish to complete | |
768 | * the original request, so let's restore the original data. (db) | |
769 | */ | |
770 | scsi_setup_cmd_retry(scmd); | |
793698ce | 771 | scmd->result = saved_result; |
1da177e4 LT |
772 | |
773 | /* | |
774 | * hey, we are done. let's look to see what happened. | |
775 | */ | |
776 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n", | |
777 | __FUNCTION__, scmd, rtn)); | |
778 | if (rtn == SUCCESS) | |
779 | return 0; | |
e47373ec | 780 | else if (rtn == NEEDS_RETRY) { |
1da177e4 LT |
781 | if (retry_cnt--) |
782 | goto retry_tur; | |
e47373ec AS |
783 | return 0; |
784 | } | |
1da177e4 LT |
785 | return 1; |
786 | } | |
787 | ||
788 | /** | |
789 | * scsi_eh_abort_cmds - abort canceled commands. | |
790 | * @shost: scsi host being recovered. | |
791 | * @eh_done_q: list_head for processed commands. | |
792 | * | |
793 | * Decription: | |
794 | * Try and see whether or not it makes sense to try and abort the | |
795 | * running command. this only works out to be the case if we have one | |
796 | * command that has timed out. if the command simply failed, it makes | |
797 | * no sense to try and abort the command, since as far as the shost | |
798 | * adapter is concerned, it isn't running. | |
799 | **/ | |
800 | static int scsi_eh_abort_cmds(struct list_head *work_q, | |
801 | struct list_head *done_q) | |
802 | { | |
937abeaa | 803 | struct scsi_cmnd *scmd, *next; |
1da177e4 LT |
804 | int rtn; |
805 | ||
937abeaa | 806 | list_for_each_entry_safe(scmd, next, work_q, eh_entry) { |
3111b0d1 | 807 | if (!(scmd->eh_eflags & SCSI_EH_CANCEL_CMD)) |
1da177e4 LT |
808 | continue; |
809 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting cmd:" | |
810 | "0x%p\n", current->comm, | |
811 | scmd)); | |
812 | rtn = scsi_try_to_abort_cmd(scmd); | |
813 | if (rtn == SUCCESS) { | |
3111b0d1 | 814 | scmd->eh_eflags &= ~SCSI_EH_CANCEL_CMD; |
1da177e4 LT |
815 | if (!scsi_device_online(scmd->device) || |
816 | !scsi_eh_tur(scmd)) { | |
817 | scsi_eh_finish_cmd(scmd, done_q); | |
818 | } | |
819 | ||
820 | } else | |
821 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: aborting" | |
822 | " cmd failed:" | |
823 | "0x%p\n", | |
824 | current->comm, | |
825 | scmd)); | |
826 | } | |
827 | ||
828 | return list_empty(work_q); | |
829 | } | |
830 | ||
831 | /** | |
832 | * scsi_try_bus_device_reset - Ask host to perform a BDR on a dev | |
833 | * @scmd: SCSI cmd used to send BDR | |
834 | * | |
835 | * Notes: | |
836 | * There is no timeout for this operation. if this operation is | |
837 | * unreliable for a given host, then the host itself needs to put a | |
838 | * timer on it, and set the host back to a consistent state prior to | |
839 | * returning. | |
840 | **/ | |
841 | static int scsi_try_bus_device_reset(struct scsi_cmnd *scmd) | |
842 | { | |
94d0e7b8 | 843 | int rtn; |
1da177e4 LT |
844 | |
845 | if (!scmd->device->host->hostt->eh_device_reset_handler) | |
94d0e7b8 | 846 | return FAILED; |
1da177e4 | 847 | |
1da177e4 | 848 | rtn = scmd->device->host->hostt->eh_device_reset_handler(scmd); |
1da177e4 LT |
849 | if (rtn == SUCCESS) { |
850 | scmd->device->was_reset = 1; | |
851 | scmd->device->expecting_cc_ua = 1; | |
852 | } | |
853 | ||
854 | return rtn; | |
855 | } | |
856 | ||
857 | /** | |
858 | * scsi_eh_try_stu - Send START_UNIT to device. | |
859 | * @scmd: Scsi cmd to send START_UNIT | |
860 | * | |
861 | * Return value: | |
862 | * 0 - Device is ready. 1 - Device NOT ready. | |
863 | **/ | |
864 | static int scsi_eh_try_stu(struct scsi_cmnd *scmd) | |
865 | { | |
866 | static unsigned char stu_command[6] = {START_STOP, 0, 0, 0, 1, 0}; | |
867 | int rtn; | |
793698ce | 868 | int saved_result; |
1da177e4 LT |
869 | |
870 | if (!scmd->device->allow_restart) | |
871 | return 1; | |
872 | ||
873 | memcpy(scmd->cmnd, stu_command, sizeof(stu_command)); | |
874 | ||
875 | /* | |
876 | * zero the sense buffer. the scsi spec mandates that any | |
877 | * untransferred sense data should be interpreted as being zero. | |
878 | */ | |
879 | memset(scmd->sense_buffer, 0, sizeof(scmd->sense_buffer)); | |
880 | ||
793698ce | 881 | saved_result = scmd->result; |
1da177e4 LT |
882 | scmd->request_buffer = NULL; |
883 | scmd->request_bufflen = 0; | |
884 | scmd->use_sg = 0; | |
885 | scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); | |
886 | scmd->underflow = 0; | |
887 | scmd->sc_data_direction = DMA_NONE; | |
888 | ||
889 | rtn = scsi_send_eh_cmnd(scmd, START_UNIT_TIMEOUT); | |
890 | ||
891 | /* | |
892 | * when we eventually call scsi_finish, we really wish to complete | |
893 | * the original request, so let's restore the original data. (db) | |
894 | */ | |
895 | scsi_setup_cmd_retry(scmd); | |
793698ce | 896 | scmd->result = saved_result; |
1da177e4 LT |
897 | |
898 | /* | |
899 | * hey, we are done. let's look to see what happened. | |
900 | */ | |
901 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: scmd %p rtn %x\n", | |
902 | __FUNCTION__, scmd, rtn)); | |
903 | if (rtn == SUCCESS) | |
904 | return 0; | |
905 | return 1; | |
906 | } | |
907 | ||
908 | /** | |
909 | * scsi_eh_stu - send START_UNIT if needed | |
910 | * @shost: scsi host being recovered. | |
911 | * @eh_done_q: list_head for processed commands. | |
912 | * | |
913 | * Notes: | |
914 | * If commands are failing due to not ready, initializing command required, | |
915 | * try revalidating the device, which will end up sending a start unit. | |
916 | **/ | |
917 | static int scsi_eh_stu(struct Scsi_Host *shost, | |
918 | struct list_head *work_q, | |
919 | struct list_head *done_q) | |
920 | { | |
937abeaa | 921 | struct scsi_cmnd *scmd, *stu_scmd, *next; |
1da177e4 LT |
922 | struct scsi_device *sdev; |
923 | ||
924 | shost_for_each_device(sdev, shost) { | |
925 | stu_scmd = NULL; | |
926 | list_for_each_entry(scmd, work_q, eh_entry) | |
927 | if (scmd->device == sdev && SCSI_SENSE_VALID(scmd) && | |
928 | scsi_check_sense(scmd) == FAILED ) { | |
929 | stu_scmd = scmd; | |
930 | break; | |
931 | } | |
932 | ||
933 | if (!stu_scmd) | |
934 | continue; | |
935 | ||
936 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending START_UNIT to sdev:" | |
937 | " 0x%p\n", current->comm, sdev)); | |
938 | ||
939 | if (!scsi_eh_try_stu(stu_scmd)) { | |
940 | if (!scsi_device_online(sdev) || | |
941 | !scsi_eh_tur(stu_scmd)) { | |
937abeaa CH |
942 | list_for_each_entry_safe(scmd, next, |
943 | work_q, eh_entry) { | |
1da177e4 LT |
944 | if (scmd->device == sdev) |
945 | scsi_eh_finish_cmd(scmd, done_q); | |
946 | } | |
947 | } | |
948 | } else { | |
949 | SCSI_LOG_ERROR_RECOVERY(3, | |
950 | printk("%s: START_UNIT failed to sdev:" | |
951 | " 0x%p\n", current->comm, sdev)); | |
952 | } | |
953 | } | |
954 | ||
955 | return list_empty(work_q); | |
956 | } | |
957 | ||
958 | ||
959 | /** | |
960 | * scsi_eh_bus_device_reset - send bdr if needed | |
961 | * @shost: scsi host being recovered. | |
962 | * @eh_done_q: list_head for processed commands. | |
963 | * | |
964 | * Notes: | |
965 | * Try a bus device reset. still, look to see whether we have multiple | |
966 | * devices that are jammed or not - if we have multiple devices, it | |
967 | * makes no sense to try bus_device_reset - we really would need to try | |
968 | * a bus_reset instead. | |
969 | **/ | |
970 | static int scsi_eh_bus_device_reset(struct Scsi_Host *shost, | |
971 | struct list_head *work_q, | |
972 | struct list_head *done_q) | |
973 | { | |
937abeaa | 974 | struct scsi_cmnd *scmd, *bdr_scmd, *next; |
1da177e4 LT |
975 | struct scsi_device *sdev; |
976 | int rtn; | |
977 | ||
978 | shost_for_each_device(sdev, shost) { | |
979 | bdr_scmd = NULL; | |
980 | list_for_each_entry(scmd, work_q, eh_entry) | |
981 | if (scmd->device == sdev) { | |
982 | bdr_scmd = scmd; | |
983 | break; | |
984 | } | |
985 | ||
986 | if (!bdr_scmd) | |
987 | continue; | |
988 | ||
989 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending BDR sdev:" | |
990 | " 0x%p\n", current->comm, | |
991 | sdev)); | |
992 | rtn = scsi_try_bus_device_reset(bdr_scmd); | |
993 | if (rtn == SUCCESS) { | |
994 | if (!scsi_device_online(sdev) || | |
995 | !scsi_eh_tur(bdr_scmd)) { | |
937abeaa CH |
996 | list_for_each_entry_safe(scmd, next, |
997 | work_q, eh_entry) { | |
1da177e4 LT |
998 | if (scmd->device == sdev) |
999 | scsi_eh_finish_cmd(scmd, | |
1000 | done_q); | |
1001 | } | |
1002 | } | |
1003 | } else { | |
1004 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: BDR" | |
1005 | " failed sdev:" | |
1006 | "0x%p\n", | |
1007 | current->comm, | |
1008 | sdev)); | |
1009 | } | |
1010 | } | |
1011 | ||
1012 | return list_empty(work_q); | |
1013 | } | |
1014 | ||
1015 | /** | |
1016 | * scsi_try_bus_reset - ask host to perform a bus reset | |
1017 | * @scmd: SCSI cmd to send bus reset. | |
1018 | **/ | |
1019 | static int scsi_try_bus_reset(struct scsi_cmnd *scmd) | |
1020 | { | |
1021 | unsigned long flags; | |
1022 | int rtn; | |
1023 | ||
1024 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Bus RST\n", | |
1025 | __FUNCTION__)); | |
1da177e4 LT |
1026 | |
1027 | if (!scmd->device->host->hostt->eh_bus_reset_handler) | |
1028 | return FAILED; | |
1029 | ||
1da177e4 | 1030 | rtn = scmd->device->host->hostt->eh_bus_reset_handler(scmd); |
1da177e4 LT |
1031 | |
1032 | if (rtn == SUCCESS) { | |
1033 | if (!scmd->device->host->hostt->skip_settle_delay) | |
1034 | ssleep(BUS_RESET_SETTLE_TIME); | |
1035 | spin_lock_irqsave(scmd->device->host->host_lock, flags); | |
1036 | scsi_report_bus_reset(scmd->device->host, scmd->device->channel); | |
1037 | spin_unlock_irqrestore(scmd->device->host->host_lock, flags); | |
1038 | } | |
1039 | ||
1040 | return rtn; | |
1041 | } | |
1042 | ||
1043 | /** | |
1044 | * scsi_try_host_reset - ask host adapter to reset itself | |
1045 | * @scmd: SCSI cmd to send hsot reset. | |
1046 | **/ | |
1047 | static int scsi_try_host_reset(struct scsi_cmnd *scmd) | |
1048 | { | |
1049 | unsigned long flags; | |
1050 | int rtn; | |
1051 | ||
1052 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Snd Host RST\n", | |
1053 | __FUNCTION__)); | |
1da177e4 LT |
1054 | |
1055 | if (!scmd->device->host->hostt->eh_host_reset_handler) | |
1056 | return FAILED; | |
1057 | ||
1da177e4 | 1058 | rtn = scmd->device->host->hostt->eh_host_reset_handler(scmd); |
1da177e4 LT |
1059 | |
1060 | if (rtn == SUCCESS) { | |
1061 | if (!scmd->device->host->hostt->skip_settle_delay) | |
1062 | ssleep(HOST_RESET_SETTLE_TIME); | |
1063 | spin_lock_irqsave(scmd->device->host->host_lock, flags); | |
1064 | scsi_report_bus_reset(scmd->device->host, scmd->device->channel); | |
1065 | spin_unlock_irqrestore(scmd->device->host->host_lock, flags); | |
1066 | } | |
1067 | ||
1068 | return rtn; | |
1069 | } | |
1070 | ||
1071 | /** | |
1072 | * scsi_eh_bus_reset - send a bus reset | |
1073 | * @shost: scsi host being recovered. | |
1074 | * @eh_done_q: list_head for processed commands. | |
1075 | **/ | |
1076 | static int scsi_eh_bus_reset(struct Scsi_Host *shost, | |
1077 | struct list_head *work_q, | |
1078 | struct list_head *done_q) | |
1079 | { | |
937abeaa | 1080 | struct scsi_cmnd *scmd, *chan_scmd, *next; |
1da177e4 LT |
1081 | unsigned int channel; |
1082 | int rtn; | |
1083 | ||
1084 | /* | |
1085 | * we really want to loop over the various channels, and do this on | |
1086 | * a channel by channel basis. we should also check to see if any | |
1087 | * of the failed commands are on soft_reset devices, and if so, skip | |
1088 | * the reset. | |
1089 | */ | |
1090 | ||
1091 | for (channel = 0; channel <= shost->max_channel; channel++) { | |
1092 | chan_scmd = NULL; | |
1093 | list_for_each_entry(scmd, work_q, eh_entry) { | |
1094 | if (channel == scmd->device->channel) { | |
1095 | chan_scmd = scmd; | |
1096 | break; | |
1097 | /* | |
1098 | * FIXME add back in some support for | |
1099 | * soft_reset devices. | |
1100 | */ | |
1101 | } | |
1102 | } | |
1103 | ||
1104 | if (!chan_scmd) | |
1105 | continue; | |
1106 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending BRST chan:" | |
1107 | " %d\n", current->comm, | |
1108 | channel)); | |
1109 | rtn = scsi_try_bus_reset(chan_scmd); | |
1110 | if (rtn == SUCCESS) { | |
937abeaa | 1111 | list_for_each_entry_safe(scmd, next, work_q, eh_entry) { |
1da177e4 LT |
1112 | if (channel == scmd->device->channel) |
1113 | if (!scsi_device_online(scmd->device) || | |
1114 | !scsi_eh_tur(scmd)) | |
1115 | scsi_eh_finish_cmd(scmd, | |
1116 | done_q); | |
1117 | } | |
1118 | } else { | |
1119 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: BRST" | |
1120 | " failed chan: %d\n", | |
1121 | current->comm, | |
1122 | channel)); | |
1123 | } | |
1124 | } | |
1125 | return list_empty(work_q); | |
1126 | } | |
1127 | ||
1128 | /** | |
1129 | * scsi_eh_host_reset - send a host reset | |
1130 | * @work_q: list_head for processed commands. | |
1131 | * @done_q: list_head for processed commands. | |
1132 | **/ | |
1133 | static int scsi_eh_host_reset(struct list_head *work_q, | |
1134 | struct list_head *done_q) | |
1135 | { | |
937abeaa | 1136 | struct scsi_cmnd *scmd, *next; |
1da177e4 | 1137 | int rtn; |
1da177e4 LT |
1138 | |
1139 | if (!list_empty(work_q)) { | |
1140 | scmd = list_entry(work_q->next, | |
1141 | struct scsi_cmnd, eh_entry); | |
1142 | ||
1143 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: Sending HRST\n" | |
1144 | , current->comm)); | |
1145 | ||
1146 | rtn = scsi_try_host_reset(scmd); | |
1147 | if (rtn == SUCCESS) { | |
937abeaa | 1148 | list_for_each_entry_safe(scmd, next, work_q, eh_entry) { |
1da177e4 LT |
1149 | if (!scsi_device_online(scmd->device) || |
1150 | (!scsi_eh_try_stu(scmd) && !scsi_eh_tur(scmd)) || | |
1151 | !scsi_eh_tur(scmd)) | |
1152 | scsi_eh_finish_cmd(scmd, done_q); | |
1153 | } | |
1154 | } else { | |
1155 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: HRST" | |
1156 | " failed\n", | |
1157 | current->comm)); | |
1158 | } | |
1159 | } | |
1160 | return list_empty(work_q); | |
1161 | } | |
1162 | ||
1163 | /** | |
1164 | * scsi_eh_offline_sdevs - offline scsi devices that fail to recover | |
1165 | * @work_q: list_head for processed commands. | |
1166 | * @done_q: list_head for processed commands. | |
1167 | * | |
1168 | **/ | |
1169 | static void scsi_eh_offline_sdevs(struct list_head *work_q, | |
1170 | struct list_head *done_q) | |
1171 | { | |
937abeaa | 1172 | struct scsi_cmnd *scmd, *next; |
1da177e4 | 1173 | |
937abeaa | 1174 | list_for_each_entry_safe(scmd, next, work_q, eh_entry) { |
9ccfc756 JB |
1175 | sdev_printk(KERN_INFO, scmd->device, |
1176 | "scsi: Device offlined - not" | |
1177 | " ready after error recovery\n"); | |
1da177e4 | 1178 | scsi_device_set_state(scmd->device, SDEV_OFFLINE); |
3111b0d1 | 1179 | if (scmd->eh_eflags & SCSI_EH_CANCEL_CMD) { |
1da177e4 LT |
1180 | /* |
1181 | * FIXME: Handle lost cmds. | |
1182 | */ | |
1183 | } | |
1184 | scsi_eh_finish_cmd(scmd, done_q); | |
1185 | } | |
1186 | return; | |
1187 | } | |
1188 | ||
1189 | /** | |
1190 | * scsi_decide_disposition - Disposition a cmd on return from LLD. | |
1191 | * @scmd: SCSI cmd to examine. | |
1192 | * | |
1193 | * Notes: | |
1194 | * This is *only* called when we are examining the status after sending | |
1195 | * out the actual data command. any commands that are queued for error | |
1196 | * recovery (e.g. test_unit_ready) do *not* come through here. | |
1197 | * | |
1198 | * When this routine returns failed, it means the error handler thread | |
1199 | * is woken. In cases where the error code indicates an error that | |
1200 | * doesn't require the error handler read (i.e. we don't need to | |
1201 | * abort/reset), this function should return SUCCESS. | |
1202 | **/ | |
1203 | int scsi_decide_disposition(struct scsi_cmnd *scmd) | |
1204 | { | |
1205 | int rtn; | |
1206 | ||
1207 | /* | |
1208 | * if the device is offline, then we clearly just pass the result back | |
1209 | * up to the top level. | |
1210 | */ | |
1211 | if (!scsi_device_online(scmd->device)) { | |
1212 | SCSI_LOG_ERROR_RECOVERY(5, printk("%s: device offline - report" | |
1213 | " as SUCCESS\n", | |
1214 | __FUNCTION__)); | |
1215 | return SUCCESS; | |
1216 | } | |
1217 | ||
1218 | /* | |
1219 | * first check the host byte, to see if there is anything in there | |
1220 | * that would indicate what we need to do. | |
1221 | */ | |
1222 | switch (host_byte(scmd->result)) { | |
1223 | case DID_PASSTHROUGH: | |
1224 | /* | |
1225 | * no matter what, pass this through to the upper layer. | |
1226 | * nuke this special code so that it looks like we are saying | |
1227 | * did_ok. | |
1228 | */ | |
1229 | scmd->result &= 0xff00ffff; | |
1230 | return SUCCESS; | |
1231 | case DID_OK: | |
1232 | /* | |
1233 | * looks good. drop through, and check the next byte. | |
1234 | */ | |
1235 | break; | |
1236 | case DID_NO_CONNECT: | |
1237 | case DID_BAD_TARGET: | |
1238 | case DID_ABORT: | |
1239 | /* | |
1240 | * note - this means that we just report the status back | |
1241 | * to the top level driver, not that we actually think | |
1242 | * that it indicates SUCCESS. | |
1243 | */ | |
1244 | return SUCCESS; | |
1245 | /* | |
1246 | * when the low level driver returns did_soft_error, | |
1247 | * it is responsible for keeping an internal retry counter | |
1248 | * in order to avoid endless loops (db) | |
1249 | * | |
1250 | * actually this is a bug in this function here. we should | |
1251 | * be mindful of the maximum number of retries specified | |
1252 | * and not get stuck in a loop. | |
1253 | */ | |
1254 | case DID_SOFT_ERROR: | |
1255 | goto maybe_retry; | |
1256 | case DID_IMM_RETRY: | |
1257 | return NEEDS_RETRY; | |
1258 | ||
bf341919 JB |
1259 | case DID_REQUEUE: |
1260 | return ADD_TO_MLQUEUE; | |
1261 | ||
1da177e4 LT |
1262 | case DID_ERROR: |
1263 | if (msg_byte(scmd->result) == COMMAND_COMPLETE && | |
1264 | status_byte(scmd->result) == RESERVATION_CONFLICT) | |
1265 | /* | |
1266 | * execute reservation conflict processing code | |
1267 | * lower down | |
1268 | */ | |
1269 | break; | |
1270 | /* fallthrough */ | |
1271 | ||
1272 | case DID_BUS_BUSY: | |
1273 | case DID_PARITY: | |
1274 | goto maybe_retry; | |
1275 | case DID_TIME_OUT: | |
1276 | /* | |
1277 | * when we scan the bus, we get timeout messages for | |
1278 | * these commands if there is no device available. | |
1279 | * other hosts report did_no_connect for the same thing. | |
1280 | */ | |
1281 | if ((scmd->cmnd[0] == TEST_UNIT_READY || | |
1282 | scmd->cmnd[0] == INQUIRY)) { | |
1283 | return SUCCESS; | |
1284 | } else { | |
1285 | return FAILED; | |
1286 | } | |
1287 | case DID_RESET: | |
1288 | return SUCCESS; | |
1289 | default: | |
1290 | return FAILED; | |
1291 | } | |
1292 | ||
1293 | /* | |
1294 | * next, check the message byte. | |
1295 | */ | |
1296 | if (msg_byte(scmd->result) != COMMAND_COMPLETE) | |
1297 | return FAILED; | |
1298 | ||
1299 | /* | |
1300 | * check the status byte to see if this indicates anything special. | |
1301 | */ | |
1302 | switch (status_byte(scmd->result)) { | |
1303 | case QUEUE_FULL: | |
1304 | /* | |
1305 | * the case of trying to send too many commands to a | |
1306 | * tagged queueing device. | |
1307 | */ | |
1308 | case BUSY: | |
1309 | /* | |
1310 | * device can't talk to us at the moment. Should only | |
1311 | * occur (SAM-3) when the task queue is empty, so will cause | |
1312 | * the empty queue handling to trigger a stall in the | |
1313 | * device. | |
1314 | */ | |
1315 | return ADD_TO_MLQUEUE; | |
1316 | case GOOD: | |
1317 | case COMMAND_TERMINATED: | |
1318 | case TASK_ABORTED: | |
1319 | return SUCCESS; | |
1320 | case CHECK_CONDITION: | |
1321 | rtn = scsi_check_sense(scmd); | |
1322 | if (rtn == NEEDS_RETRY) | |
1323 | goto maybe_retry; | |
1324 | /* if rtn == FAILED, we have no sense information; | |
1325 | * returning FAILED will wake the error handler thread | |
1326 | * to collect the sense and redo the decide | |
1327 | * disposition */ | |
1328 | return rtn; | |
1329 | case CONDITION_GOOD: | |
1330 | case INTERMEDIATE_GOOD: | |
1331 | case INTERMEDIATE_C_GOOD: | |
1332 | case ACA_ACTIVE: | |
1333 | /* | |
1334 | * who knows? FIXME(eric) | |
1335 | */ | |
1336 | return SUCCESS; | |
1337 | ||
1338 | case RESERVATION_CONFLICT: | |
9ccfc756 JB |
1339 | sdev_printk(KERN_INFO, scmd->device, |
1340 | "reservation conflict\n"); | |
1da177e4 LT |
1341 | return SUCCESS; /* causes immediate i/o error */ |
1342 | default: | |
1343 | return FAILED; | |
1344 | } | |
1345 | return FAILED; | |
1346 | ||
1347 | maybe_retry: | |
1348 | ||
1349 | /* we requeue for retry because the error was retryable, and | |
1350 | * the request was not marked fast fail. Note that above, | |
1351 | * even if the request is marked fast fail, we still requeue | |
1352 | * for queue congestion conditions (QUEUE_FULL or BUSY) */ | |
1353 | if ((++scmd->retries) < scmd->allowed | |
1354 | && !blk_noretry_request(scmd->request)) { | |
1355 | return NEEDS_RETRY; | |
1356 | } else { | |
1357 | /* | |
1358 | * no more retries - report this one back to upper level. | |
1359 | */ | |
1360 | return SUCCESS; | |
1361 | } | |
1362 | } | |
1363 | ||
1364 | /** | |
1365 | * scsi_eh_lock_done - done function for eh door lock request | |
1366 | * @scmd: SCSI command block for the door lock request | |
1367 | * | |
1368 | * Notes: | |
1369 | * We completed the asynchronous door lock request, and it has either | |
1370 | * locked the door or failed. We must free the command structures | |
1371 | * associated with this request. | |
1372 | **/ | |
1373 | static void scsi_eh_lock_done(struct scsi_cmnd *scmd) | |
1374 | { | |
1375 | struct scsi_request *sreq = scmd->sc_request; | |
1376 | ||
1377 | scsi_release_request(sreq); | |
1378 | } | |
1379 | ||
1380 | ||
1381 | /** | |
1382 | * scsi_eh_lock_door - Prevent medium removal for the specified device | |
1383 | * @sdev: SCSI device to prevent medium removal | |
1384 | * | |
1385 | * Locking: | |
1386 | * We must be called from process context; scsi_allocate_request() | |
1387 | * may sleep. | |
1388 | * | |
1389 | * Notes: | |
1390 | * We queue up an asynchronous "ALLOW MEDIUM REMOVAL" request on the | |
1391 | * head of the devices request queue, and continue. | |
1392 | * | |
1393 | * Bugs: | |
1394 | * scsi_allocate_request() may sleep waiting for existing requests to | |
1395 | * be processed. However, since we haven't kicked off any request | |
1396 | * processing for this host, this may deadlock. | |
1397 | * | |
1398 | * If scsi_allocate_request() fails for what ever reason, we | |
1399 | * completely forget to lock the door. | |
1400 | **/ | |
1401 | static void scsi_eh_lock_door(struct scsi_device *sdev) | |
1402 | { | |
1403 | struct scsi_request *sreq = scsi_allocate_request(sdev, GFP_KERNEL); | |
1404 | ||
1405 | if (unlikely(!sreq)) { | |
1406 | printk(KERN_ERR "%s: request allocate failed," | |
1407 | "prevent media removal cmd not sent\n", __FUNCTION__); | |
1408 | return; | |
1409 | } | |
1410 | ||
1411 | sreq->sr_cmnd[0] = ALLOW_MEDIUM_REMOVAL; | |
1412 | sreq->sr_cmnd[1] = 0; | |
1413 | sreq->sr_cmnd[2] = 0; | |
1414 | sreq->sr_cmnd[3] = 0; | |
1415 | sreq->sr_cmnd[4] = SCSI_REMOVAL_PREVENT; | |
1416 | sreq->sr_cmnd[5] = 0; | |
1417 | sreq->sr_data_direction = DMA_NONE; | |
1418 | sreq->sr_bufflen = 0; | |
1419 | sreq->sr_buffer = NULL; | |
1420 | sreq->sr_allowed = 5; | |
1421 | sreq->sr_done = scsi_eh_lock_done; | |
1422 | sreq->sr_timeout_per_command = 10 * HZ; | |
1423 | sreq->sr_cmd_len = COMMAND_SIZE(sreq->sr_cmnd[0]); | |
1424 | ||
1425 | scsi_insert_special_req(sreq, 1); | |
1426 | } | |
1427 | ||
1428 | ||
1429 | /** | |
1430 | * scsi_restart_operations - restart io operations to the specified host. | |
1431 | * @shost: Host we are restarting. | |
1432 | * | |
1433 | * Notes: | |
1434 | * When we entered the error handler, we blocked all further i/o to | |
1435 | * this device. we need to 'reverse' this process. | |
1436 | **/ | |
1437 | static void scsi_restart_operations(struct Scsi_Host *shost) | |
1438 | { | |
1439 | struct scsi_device *sdev; | |
939647ee | 1440 | unsigned long flags; |
1da177e4 LT |
1441 | |
1442 | /* | |
1443 | * If the door was locked, we need to insert a door lock request | |
1444 | * onto the head of the SCSI request queue for the device. There | |
1445 | * is no point trying to lock the door of an off-line device. | |
1446 | */ | |
1447 | shost_for_each_device(sdev, shost) { | |
1448 | if (scsi_device_online(sdev) && sdev->locked) | |
1449 | scsi_eh_lock_door(sdev); | |
1450 | } | |
1451 | ||
1452 | /* | |
1453 | * next free up anything directly waiting upon the host. this | |
1454 | * will be requests for character device operations, and also for | |
1455 | * ioctls to queued block devices. | |
1456 | */ | |
1457 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: waking up host to restart\n", | |
1458 | __FUNCTION__)); | |
1459 | ||
939647ee JB |
1460 | spin_lock_irqsave(shost->host_lock, flags); |
1461 | if (scsi_host_set_state(shost, SHOST_RUNNING)) | |
1462 | if (scsi_host_set_state(shost, SHOST_CANCEL)) | |
1463 | BUG_ON(scsi_host_set_state(shost, SHOST_DEL)); | |
1464 | spin_unlock_irqrestore(shost->host_lock, flags); | |
1da177e4 LT |
1465 | |
1466 | wake_up(&shost->host_wait); | |
1467 | ||
1468 | /* | |
1469 | * finally we need to re-initiate requests that may be pending. we will | |
1470 | * have had everything blocked while error handling is taking place, and | |
1471 | * now that error recovery is done, we will need to ensure that these | |
1472 | * requests are started. | |
1473 | */ | |
1474 | scsi_run_host_queues(shost); | |
1475 | } | |
1476 | ||
1477 | /** | |
1478 | * scsi_eh_ready_devs - check device ready state and recover if not. | |
1479 | * @shost: host to be recovered. | |
1480 | * @eh_done_q: list_head for processed commands. | |
1481 | * | |
1482 | **/ | |
1483 | static void scsi_eh_ready_devs(struct Scsi_Host *shost, | |
1484 | struct list_head *work_q, | |
1485 | struct list_head *done_q) | |
1486 | { | |
1487 | if (!scsi_eh_stu(shost, work_q, done_q)) | |
1488 | if (!scsi_eh_bus_device_reset(shost, work_q, done_q)) | |
1489 | if (!scsi_eh_bus_reset(shost, work_q, done_q)) | |
1490 | if (!scsi_eh_host_reset(work_q, done_q)) | |
1491 | scsi_eh_offline_sdevs(work_q, done_q); | |
1492 | } | |
1493 | ||
1494 | /** | |
1495 | * scsi_eh_flush_done_q - finish processed commands or retry them. | |
1496 | * @done_q: list_head of processed commands. | |
1497 | * | |
1498 | **/ | |
1499 | static void scsi_eh_flush_done_q(struct list_head *done_q) | |
1500 | { | |
937abeaa | 1501 | struct scsi_cmnd *scmd, *next; |
1da177e4 | 1502 | |
937abeaa CH |
1503 | list_for_each_entry_safe(scmd, next, done_q, eh_entry) { |
1504 | list_del_init(&scmd->eh_entry); | |
1da177e4 LT |
1505 | if (scsi_device_online(scmd->device) && |
1506 | !blk_noretry_request(scmd->request) && | |
1507 | (++scmd->retries < scmd->allowed)) { | |
1508 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: flush" | |
1509 | " retry cmd: %p\n", | |
1510 | current->comm, | |
1511 | scmd)); | |
1512 | scsi_queue_insert(scmd, SCSI_MLQUEUE_EH_RETRY); | |
1513 | } else { | |
793698ce PM |
1514 | /* |
1515 | * If just we got sense for the device (called | |
1516 | * scsi_eh_get_sense), scmd->result is already | |
1517 | * set, do not set DRIVER_TIMEOUT. | |
1518 | */ | |
1da177e4 LT |
1519 | if (!scmd->result) |
1520 | scmd->result |= (DRIVER_TIMEOUT << 24); | |
1521 | SCSI_LOG_ERROR_RECOVERY(3, printk("%s: flush finish" | |
1522 | " cmd: %p\n", | |
1523 | current->comm, scmd)); | |
1524 | scsi_finish_command(scmd); | |
1525 | } | |
1526 | } | |
1527 | } | |
1528 | ||
1529 | /** | |
1530 | * scsi_unjam_host - Attempt to fix a host which has a cmd that failed. | |
1531 | * @shost: Host to unjam. | |
1532 | * | |
1533 | * Notes: | |
1534 | * When we come in here, we *know* that all commands on the bus have | |
1535 | * either completed, failed or timed out. we also know that no further | |
1536 | * commands are being sent to the host, so things are relatively quiet | |
1537 | * and we have freedom to fiddle with things as we wish. | |
1538 | * | |
1539 | * This is only the *default* implementation. it is possible for | |
1540 | * individual drivers to supply their own version of this function, and | |
1541 | * if the maintainer wishes to do this, it is strongly suggested that | |
1542 | * this function be taken as a template and modified. this function | |
1543 | * was designed to correctly handle problems for about 95% of the | |
1544 | * different cases out there, and it should always provide at least a | |
1545 | * reasonable amount of error recovery. | |
1546 | * | |
1547 | * Any command marked 'failed' or 'timeout' must eventually have | |
1548 | * scsi_finish_cmd() called for it. we do all of the retry stuff | |
1549 | * here, so when we restart the host after we return it should have an | |
1550 | * empty queue. | |
1551 | **/ | |
1552 | static void scsi_unjam_host(struct Scsi_Host *shost) | |
1553 | { | |
1554 | unsigned long flags; | |
1555 | LIST_HEAD(eh_work_q); | |
1556 | LIST_HEAD(eh_done_q); | |
1557 | ||
1558 | spin_lock_irqsave(shost->host_lock, flags); | |
1559 | list_splice_init(&shost->eh_cmd_q, &eh_work_q); | |
1560 | spin_unlock_irqrestore(shost->host_lock, flags); | |
1561 | ||
1562 | SCSI_LOG_ERROR_RECOVERY(1, scsi_eh_prt_fail_stats(shost, &eh_work_q)); | |
1563 | ||
1564 | if (!scsi_eh_get_sense(&eh_work_q, &eh_done_q)) | |
1565 | if (!scsi_eh_abort_cmds(&eh_work_q, &eh_done_q)) | |
1566 | scsi_eh_ready_devs(shost, &eh_work_q, &eh_done_q); | |
1567 | ||
1568 | scsi_eh_flush_done_q(&eh_done_q); | |
1569 | } | |
1570 | ||
1571 | /** | |
1572 | * scsi_error_handler - Handle errors/timeouts of SCSI cmds. | |
1573 | * @data: Host for which we are running. | |
1574 | * | |
1575 | * Notes: | |
1576 | * This is always run in the context of a kernel thread. The idea is | |
1577 | * that we start this thing up when the kernel starts up (one per host | |
1578 | * that we detect), and it immediately goes to sleep and waits for some | |
1579 | * event (i.e. failure). When this takes place, we have the job of | |
1580 | * trying to unjam the bus and restarting things. | |
1581 | **/ | |
1582 | int scsi_error_handler(void *data) | |
1583 | { | |
1584 | struct Scsi_Host *shost = (struct Scsi_Host *) data; | |
1585 | int rtn; | |
1da177e4 | 1586 | |
1da177e4 | 1587 | current->flags |= PF_NOFREEZE; |
1da177e4 | 1588 | |
3ed7a470 | 1589 | |
1da177e4 | 1590 | /* |
3ed7a470 JB |
1591 | * Note - we always use TASK_INTERRUPTIBLE even if the module |
1592 | * was loaded as part of the kernel. The reason is that | |
1593 | * UNINTERRUPTIBLE would cause this thread to be counted in | |
1594 | * the load average as a running process, and an interruptible | |
1595 | * wait doesn't. | |
1da177e4 | 1596 | */ |
3ed7a470 JB |
1597 | set_current_state(TASK_INTERRUPTIBLE); |
1598 | while (!kthread_should_stop()) { | |
1599 | if (shost->host_failed == 0 || | |
1600 | shost->host_failed != shost->host_busy) { | |
1601 | SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler" | |
1602 | " scsi_eh_%d" | |
1603 | " sleeping\n", | |
1604 | shost->host_no)); | |
1605 | schedule(); | |
1606 | set_current_state(TASK_INTERRUPTIBLE); | |
1607 | continue; | |
1608 | } | |
1da177e4 | 1609 | |
3ed7a470 | 1610 | __set_current_state(TASK_RUNNING); |
1da177e4 LT |
1611 | SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler" |
1612 | " scsi_eh_%d waking" | |
1613 | " up\n",shost->host_no)); | |
1614 | ||
1615 | shost->eh_active = 1; | |
1616 | ||
1617 | /* | |
1618 | * We have a host that is failing for some reason. Figure out | |
1619 | * what we need to do to get it up and online again (if we can). | |
1620 | * If we fail, we end up taking the thing offline. | |
1621 | */ | |
1622 | if (shost->hostt->eh_strategy_handler) | |
1623 | rtn = shost->hostt->eh_strategy_handler(shost); | |
1624 | else | |
1625 | scsi_unjam_host(shost); | |
1626 | ||
1627 | shost->eh_active = 0; | |
1628 | ||
1629 | /* | |
1630 | * Note - if the above fails completely, the action is to take | |
1631 | * individual devices offline and flush the queue of any | |
1632 | * outstanding requests that may have been pending. When we | |
1633 | * restart, we restart any I/O to any other devices on the bus | |
1634 | * which are still online. | |
1635 | */ | |
1636 | scsi_restart_operations(shost); | |
3ed7a470 | 1637 | set_current_state(TASK_INTERRUPTIBLE); |
1da177e4 LT |
1638 | } |
1639 | ||
461a0ffb SR |
1640 | __set_current_state(TASK_RUNNING); |
1641 | ||
1da177e4 LT |
1642 | SCSI_LOG_ERROR_RECOVERY(1, printk("Error handler scsi_eh_%d" |
1643 | " exiting\n",shost->host_no)); | |
1644 | ||
1645 | /* | |
1646 | * Make sure that nobody tries to wake us up again. | |
1647 | */ | |
3ed7a470 | 1648 | shost->ehandler = NULL; |
1da177e4 LT |
1649 | return 0; |
1650 | } | |
1651 | ||
1652 | /* | |
1653 | * Function: scsi_report_bus_reset() | |
1654 | * | |
1655 | * Purpose: Utility function used by low-level drivers to report that | |
1656 | * they have observed a bus reset on the bus being handled. | |
1657 | * | |
1658 | * Arguments: shost - Host in question | |
1659 | * channel - channel on which reset was observed. | |
1660 | * | |
1661 | * Returns: Nothing | |
1662 | * | |
1663 | * Lock status: Host lock must be held. | |
1664 | * | |
1665 | * Notes: This only needs to be called if the reset is one which | |
1666 | * originates from an unknown location. Resets originated | |
1667 | * by the mid-level itself don't need to call this, but there | |
1668 | * should be no harm. | |
1669 | * | |
1670 | * The main purpose of this is to make sure that a CHECK_CONDITION | |
1671 | * is properly treated. | |
1672 | */ | |
1673 | void scsi_report_bus_reset(struct Scsi_Host *shost, int channel) | |
1674 | { | |
1675 | struct scsi_device *sdev; | |
1676 | ||
1677 | __shost_for_each_device(sdev, shost) { | |
1678 | if (channel == sdev->channel) { | |
1679 | sdev->was_reset = 1; | |
1680 | sdev->expecting_cc_ua = 1; | |
1681 | } | |
1682 | } | |
1683 | } | |
1684 | EXPORT_SYMBOL(scsi_report_bus_reset); | |
1685 | ||
1686 | /* | |
1687 | * Function: scsi_report_device_reset() | |
1688 | * | |
1689 | * Purpose: Utility function used by low-level drivers to report that | |
1690 | * they have observed a device reset on the device being handled. | |
1691 | * | |
1692 | * Arguments: shost - Host in question | |
1693 | * channel - channel on which reset was observed | |
1694 | * target - target on which reset was observed | |
1695 | * | |
1696 | * Returns: Nothing | |
1697 | * | |
1698 | * Lock status: Host lock must be held | |
1699 | * | |
1700 | * Notes: This only needs to be called if the reset is one which | |
1701 | * originates from an unknown location. Resets originated | |
1702 | * by the mid-level itself don't need to call this, but there | |
1703 | * should be no harm. | |
1704 | * | |
1705 | * The main purpose of this is to make sure that a CHECK_CONDITION | |
1706 | * is properly treated. | |
1707 | */ | |
1708 | void scsi_report_device_reset(struct Scsi_Host *shost, int channel, int target) | |
1709 | { | |
1710 | struct scsi_device *sdev; | |
1711 | ||
1712 | __shost_for_each_device(sdev, shost) { | |
1713 | if (channel == sdev->channel && | |
1714 | target == sdev->id) { | |
1715 | sdev->was_reset = 1; | |
1716 | sdev->expecting_cc_ua = 1; | |
1717 | } | |
1718 | } | |
1719 | } | |
1720 | EXPORT_SYMBOL(scsi_report_device_reset); | |
1721 | ||
1722 | static void | |
1723 | scsi_reset_provider_done_command(struct scsi_cmnd *scmd) | |
1724 | { | |
1725 | } | |
1726 | ||
1727 | /* | |
1728 | * Function: scsi_reset_provider | |
1729 | * | |
1730 | * Purpose: Send requested reset to a bus or device at any phase. | |
1731 | * | |
1732 | * Arguments: device - device to send reset to | |
1733 | * flag - reset type (see scsi.h) | |
1734 | * | |
1735 | * Returns: SUCCESS/FAILURE. | |
1736 | * | |
1737 | * Notes: This is used by the SCSI Generic driver to provide | |
1738 | * Bus/Device reset capability. | |
1739 | */ | |
1740 | int | |
1741 | scsi_reset_provider(struct scsi_device *dev, int flag) | |
1742 | { | |
1743 | struct scsi_cmnd *scmd = scsi_get_command(dev, GFP_KERNEL); | |
1744 | struct request req; | |
1745 | int rtn; | |
1746 | ||
1747 | scmd->request = &req; | |
1748 | memset(&scmd->eh_timeout, 0, sizeof(scmd->eh_timeout)); | |
1749 | scmd->request->rq_status = RQ_SCSI_BUSY; | |
b4edcbca | 1750 | |
1da177e4 LT |
1751 | memset(&scmd->cmnd, '\0', sizeof(scmd->cmnd)); |
1752 | ||
1753 | scmd->scsi_done = scsi_reset_provider_done_command; | |
1754 | scmd->done = NULL; | |
1755 | scmd->buffer = NULL; | |
1756 | scmd->bufflen = 0; | |
1757 | scmd->request_buffer = NULL; | |
1758 | scmd->request_bufflen = 0; | |
1da177e4 LT |
1759 | |
1760 | scmd->cmd_len = 0; | |
1761 | ||
1762 | scmd->sc_data_direction = DMA_BIDIRECTIONAL; | |
1763 | scmd->sc_request = NULL; | |
1764 | scmd->sc_magic = SCSI_CMND_MAGIC; | |
1765 | ||
1766 | init_timer(&scmd->eh_timeout); | |
1767 | ||
1768 | /* | |
1769 | * Sometimes the command can get back into the timer chain, | |
1770 | * so use the pid as an identifier. | |
1771 | */ | |
1772 | scmd->pid = 0; | |
1773 | ||
1774 | switch (flag) { | |
1775 | case SCSI_TRY_RESET_DEVICE: | |
1776 | rtn = scsi_try_bus_device_reset(scmd); | |
1777 | if (rtn == SUCCESS) | |
1778 | break; | |
1779 | /* FALLTHROUGH */ | |
1780 | case SCSI_TRY_RESET_BUS: | |
1781 | rtn = scsi_try_bus_reset(scmd); | |
1782 | if (rtn == SUCCESS) | |
1783 | break; | |
1784 | /* FALLTHROUGH */ | |
1785 | case SCSI_TRY_RESET_HOST: | |
1786 | rtn = scsi_try_host_reset(scmd); | |
1787 | break; | |
1788 | default: | |
1789 | rtn = FAILED; | |
1790 | } | |
1791 | ||
1da177e4 LT |
1792 | scsi_next_command(scmd); |
1793 | return rtn; | |
1794 | } | |
1795 | EXPORT_SYMBOL(scsi_reset_provider); | |
1796 | ||
1797 | /** | |
1798 | * scsi_normalize_sense - normalize main elements from either fixed or | |
1799 | * descriptor sense data format into a common format. | |
1800 | * | |
1801 | * @sense_buffer: byte array containing sense data returned by device | |
1802 | * @sb_len: number of valid bytes in sense_buffer | |
1803 | * @sshdr: pointer to instance of structure that common | |
1804 | * elements are written to. | |
1805 | * | |
1806 | * Notes: | |
1807 | * The "main elements" from sense data are: response_code, sense_key, | |
1808 | * asc, ascq and additional_length (only for descriptor format). | |
1809 | * | |
1810 | * Typically this function can be called after a device has | |
1811 | * responded to a SCSI command with the CHECK_CONDITION status. | |
1812 | * | |
1813 | * Return value: | |
1814 | * 1 if valid sense data information found, else 0; | |
1815 | **/ | |
1816 | int scsi_normalize_sense(const u8 *sense_buffer, int sb_len, | |
1817 | struct scsi_sense_hdr *sshdr) | |
1818 | { | |
33aa687d | 1819 | if (!sense_buffer || !sb_len) |
1da177e4 LT |
1820 | return 0; |
1821 | ||
1822 | memset(sshdr, 0, sizeof(struct scsi_sense_hdr)); | |
1823 | ||
1824 | sshdr->response_code = (sense_buffer[0] & 0x7f); | |
33aa687d JB |
1825 | |
1826 | if (!scsi_sense_valid(sshdr)) | |
1827 | return 0; | |
1828 | ||
1da177e4 LT |
1829 | if (sshdr->response_code >= 0x72) { |
1830 | /* | |
1831 | * descriptor format | |
1832 | */ | |
1833 | if (sb_len > 1) | |
1834 | sshdr->sense_key = (sense_buffer[1] & 0xf); | |
1835 | if (sb_len > 2) | |
1836 | sshdr->asc = sense_buffer[2]; | |
1837 | if (sb_len > 3) | |
1838 | sshdr->ascq = sense_buffer[3]; | |
1839 | if (sb_len > 7) | |
1840 | sshdr->additional_length = sense_buffer[7]; | |
1841 | } else { | |
1842 | /* | |
1843 | * fixed format | |
1844 | */ | |
1845 | if (sb_len > 2) | |
1846 | sshdr->sense_key = (sense_buffer[2] & 0xf); | |
1847 | if (sb_len > 7) { | |
1848 | sb_len = (sb_len < (sense_buffer[7] + 8)) ? | |
1849 | sb_len : (sense_buffer[7] + 8); | |
1850 | if (sb_len > 12) | |
1851 | sshdr->asc = sense_buffer[12]; | |
1852 | if (sb_len > 13) | |
1853 | sshdr->ascq = sense_buffer[13]; | |
1854 | } | |
1855 | } | |
1856 | ||
1857 | return 1; | |
1858 | } | |
1859 | EXPORT_SYMBOL(scsi_normalize_sense); | |
1860 | ||
1861 | int scsi_request_normalize_sense(struct scsi_request *sreq, | |
1862 | struct scsi_sense_hdr *sshdr) | |
1863 | { | |
1864 | return scsi_normalize_sense(sreq->sr_sense_buffer, | |
1865 | sizeof(sreq->sr_sense_buffer), sshdr); | |
1866 | } | |
1867 | EXPORT_SYMBOL(scsi_request_normalize_sense); | |
1868 | ||
1869 | int scsi_command_normalize_sense(struct scsi_cmnd *cmd, | |
1870 | struct scsi_sense_hdr *sshdr) | |
1871 | { | |
1872 | return scsi_normalize_sense(cmd->sense_buffer, | |
1873 | sizeof(cmd->sense_buffer), sshdr); | |
1874 | } | |
1875 | EXPORT_SYMBOL(scsi_command_normalize_sense); | |
1876 | ||
1877 | /** | |
1878 | * scsi_sense_desc_find - search for a given descriptor type in | |
1879 | * descriptor sense data format. | |
1880 | * | |
1881 | * @sense_buffer: byte array of descriptor format sense data | |
1882 | * @sb_len: number of valid bytes in sense_buffer | |
1883 | * @desc_type: value of descriptor type to find | |
1884 | * (e.g. 0 -> information) | |
1885 | * | |
1886 | * Notes: | |
1887 | * only valid when sense data is in descriptor format | |
1888 | * | |
1889 | * Return value: | |
1890 | * pointer to start of (first) descriptor if found else NULL | |
1891 | **/ | |
1892 | const u8 * scsi_sense_desc_find(const u8 * sense_buffer, int sb_len, | |
1893 | int desc_type) | |
1894 | { | |
1895 | int add_sen_len, add_len, desc_len, k; | |
1896 | const u8 * descp; | |
1897 | ||
1898 | if ((sb_len < 8) || (0 == (add_sen_len = sense_buffer[7]))) | |
1899 | return NULL; | |
1900 | if ((sense_buffer[0] < 0x72) || (sense_buffer[0] > 0x73)) | |
1901 | return NULL; | |
1902 | add_sen_len = (add_sen_len < (sb_len - 8)) ? | |
1903 | add_sen_len : (sb_len - 8); | |
1904 | descp = &sense_buffer[8]; | |
1905 | for (desc_len = 0, k = 0; k < add_sen_len; k += desc_len) { | |
1906 | descp += desc_len; | |
1907 | add_len = (k < (add_sen_len - 1)) ? descp[1]: -1; | |
1908 | desc_len = add_len + 2; | |
1909 | if (descp[0] == desc_type) | |
1910 | return descp; | |
1911 | if (add_len < 0) // short descriptor ?? | |
1912 | break; | |
1913 | } | |
1914 | return NULL; | |
1915 | } | |
1916 | EXPORT_SYMBOL(scsi_sense_desc_find); | |
1917 | ||
1918 | /** | |
1919 | * scsi_get_sense_info_fld - attempts to get information field from | |
1920 | * sense data (either fixed or descriptor format) | |
1921 | * | |
1922 | * @sense_buffer: byte array of sense data | |
1923 | * @sb_len: number of valid bytes in sense_buffer | |
1924 | * @info_out: pointer to 64 integer where 8 or 4 byte information | |
1925 | * field will be placed if found. | |
1926 | * | |
1927 | * Return value: | |
1928 | * 1 if information field found, 0 if not found. | |
1929 | **/ | |
1930 | int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len, | |
1931 | u64 * info_out) | |
1932 | { | |
1933 | int j; | |
1934 | const u8 * ucp; | |
1935 | u64 ull; | |
1936 | ||
1937 | if (sb_len < 7) | |
1938 | return 0; | |
1939 | switch (sense_buffer[0] & 0x7f) { | |
1940 | case 0x70: | |
1941 | case 0x71: | |
1942 | if (sense_buffer[0] & 0x80) { | |
1943 | *info_out = (sense_buffer[3] << 24) + | |
1944 | (sense_buffer[4] << 16) + | |
1945 | (sense_buffer[5] << 8) + sense_buffer[6]; | |
1946 | return 1; | |
1947 | } else | |
1948 | return 0; | |
1949 | case 0x72: | |
1950 | case 0x73: | |
1951 | ucp = scsi_sense_desc_find(sense_buffer, sb_len, | |
1952 | 0 /* info desc */); | |
1953 | if (ucp && (0xa == ucp[1])) { | |
1954 | ull = 0; | |
1955 | for (j = 0; j < 8; ++j) { | |
1956 | if (j > 0) | |
1957 | ull <<= 8; | |
1958 | ull |= ucp[4 + j]; | |
1959 | } | |
1960 | *info_out = ull; | |
1961 | return 1; | |
1962 | } else | |
1963 | return 0; | |
1964 | default: | |
1965 | return 0; | |
1966 | } | |
1967 | } | |
1968 | EXPORT_SYMBOL(scsi_get_sense_info_fld); |