2 * libata-eh.c - libata error handling
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
8 * Copyright 2006 Tejun Heo <htejun@gmail.com>
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; either version 2, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program; see the file COPYING. If not, write to
23 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
35 #include <linux/kernel.h>
36 #include <linux/blkdev.h>
37 #include <linux/pci.h>
38 #include <scsi/scsi.h>
39 #include <scsi/scsi_host.h>
40 #include <scsi/scsi_eh.h>
41 #include <scsi/scsi_device.h>
42 #include <scsi/scsi_cmnd.h>
43 #include <scsi/scsi_dbg.h>
44 #include "../scsi/scsi_transport_api.h"
46 #include <linux/libata.h>
51 /* speed down verdicts */
52 ATA_EH_SPDN_NCQ_OFF = (1 << 0),
53 ATA_EH_SPDN_SPEED_DOWN = (1 << 1),
54 ATA_EH_SPDN_FALLBACK_TO_PIO = (1 << 2),
55 ATA_EH_SPDN_KEEP_ERRORS = (1 << 3),
58 ATA_EFLAG_IS_IO = (1 << 0),
59 ATA_EFLAG_DUBIOUS_XFER = (1 << 1),
61 /* error categories */
64 ATA_ECAT_TOUT_HSM = 2,
66 ATA_ECAT_DUBIOUS_NONE = 4,
67 ATA_ECAT_DUBIOUS_ATA_BUS = 5,
68 ATA_ECAT_DUBIOUS_TOUT_HSM = 6,
69 ATA_ECAT_DUBIOUS_UNK_DEV = 7,
72 ATA_EH_CMD_DFL_TIMEOUT = 5000,
74 /* always put at least this amount of time between resets */
75 ATA_EH_RESET_COOL_DOWN = 5000,
77 /* Waiting in ->prereset can never be reliable. It's
78 * sometimes nice to wait there but it can't be depended upon;
79 * otherwise, we wouldn't be resetting. Just give it enough
80 * time for most drives to spin up.
82 ATA_EH_PRERESET_TIMEOUT = 10000,
83 ATA_EH_FASTDRAIN_INTERVAL = 3000,
87 /* probe speed down parameters, see ata_eh_schedule_probe() */
88 ATA_EH_PROBE_TRIAL_INTERVAL = 60000, /* 1 min */
89 ATA_EH_PROBE_TRIALS = 2,
92 /* The following table determines how we sequence resets. Each entry
93 * represents timeout for that try. The first try can be soft or
94 * hardreset. All others are hardreset if available. In most cases
95 * the first reset w/ 10sec timeout should succeed. Following entries
96 * are mostly for error handling, hotplug and retarded devices.
98 static const unsigned long ata_eh_reset_timeouts[] = {
99 10000, /* most drives spin up by 10sec */
100 10000, /* > 99% working drives spin up before 20sec */
101 35000, /* give > 30 secs of idleness for retarded devices */
102 5000, /* and sweet one last chance */
103 ULONG_MAX, /* > 1 min has elapsed, give up */
106 static const unsigned long ata_eh_identify_timeouts[] = {
107 5000, /* covers > 99% of successes and not too boring on failures */
108 10000, /* combined time till here is enough even for media access */
109 30000, /* for true idiots */
113 static const unsigned long ata_eh_flush_timeouts[] = {
114 15000, /* be generous with flush */
116 30000, /* and even more generous */
120 static const unsigned long ata_eh_other_timeouts[] = {
121 5000, /* same rationale as identify timeout */
123 /* but no merciful 30sec for other commands, it just isn't worth it */
127 struct ata_eh_cmd_timeout_ent {
129 const unsigned long *timeouts;
132 /* The following table determines timeouts to use for EH internal
133 * commands. Each table entry is a command class and matches the
134 * commands the entry applies to and the timeout table to use.
136 * On the retry after a command timed out, the next timeout value from
137 * the table is used. If the table doesn't contain further entries,
138 * the last value is used.
140 * ehc->cmd_timeout_idx keeps track of which timeout to use per
141 * command class, so if SET_FEATURES times out on the first try, the
142 * next try will use the second timeout value only for that class.
144 #define CMDS(cmds...) (const u8 []){ cmds, 0 }
145 static const struct ata_eh_cmd_timeout_ent
146 ata_eh_cmd_timeout_table[ATA_EH_CMD_TIMEOUT_TABLE_SIZE] = {
147 { .commands = CMDS(ATA_CMD_ID_ATA, ATA_CMD_ID_ATAPI),
148 .timeouts = ata_eh_identify_timeouts, },
149 { .commands = CMDS(ATA_CMD_READ_NATIVE_MAX, ATA_CMD_READ_NATIVE_MAX_EXT),
150 .timeouts = ata_eh_other_timeouts, },
151 { .commands = CMDS(ATA_CMD_SET_MAX, ATA_CMD_SET_MAX_EXT),
152 .timeouts = ata_eh_other_timeouts, },
153 { .commands = CMDS(ATA_CMD_SET_FEATURES),
154 .timeouts = ata_eh_other_timeouts, },
155 { .commands = CMDS(ATA_CMD_INIT_DEV_PARAMS),
156 .timeouts = ata_eh_other_timeouts, },
157 { .commands = CMDS(ATA_CMD_FLUSH, ATA_CMD_FLUSH_EXT),
158 .timeouts = ata_eh_flush_timeouts },
162 static void __ata_port_freeze(struct ata_port *ap);
164 static void ata_eh_handle_port_suspend(struct ata_port *ap);
165 static void ata_eh_handle_port_resume(struct ata_port *ap);
166 #else /* CONFIG_PM */
167 static void ata_eh_handle_port_suspend(struct ata_port *ap)
170 static void ata_eh_handle_port_resume(struct ata_port *ap)
172 #endif /* CONFIG_PM */
174 static void __ata_ehi_pushv_desc(struct ata_eh_info *ehi, const char *fmt,
177 ehi->desc_len += vscnprintf(ehi->desc + ehi->desc_len,
178 ATA_EH_DESC_LEN - ehi->desc_len,
183 * __ata_ehi_push_desc - push error description without adding separator
185 * @fmt: printf format string
187 * Format string according to @fmt and append it to @ehi->desc.
190 * spin_lock_irqsave(host lock)
192 void __ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
197 __ata_ehi_pushv_desc(ehi, fmt, args);
202 * ata_ehi_push_desc - push error description with separator
204 * @fmt: printf format string
206 * Format string according to @fmt and append it to @ehi->desc.
207 * If @ehi->desc is not empty, ", " is added in-between.
210 * spin_lock_irqsave(host lock)
212 void ata_ehi_push_desc(struct ata_eh_info *ehi, const char *fmt, ...)
217 __ata_ehi_push_desc(ehi, ", ");
220 __ata_ehi_pushv_desc(ehi, fmt, args);
225 * ata_ehi_clear_desc - clean error description
231 * spin_lock_irqsave(host lock)
233 void ata_ehi_clear_desc(struct ata_eh_info *ehi)
240 * ata_port_desc - append port description
241 * @ap: target ATA port
242 * @fmt: printf format string
244 * Format string according to @fmt and append it to port
245 * description. If port description is not empty, " " is added
246 * in-between. This function is to be used while initializing
247 * ata_host. The description is printed on host registration.
252 void ata_port_desc(struct ata_port *ap, const char *fmt, ...)
256 WARN_ON(!(ap->pflags & ATA_PFLAG_INITIALIZING));
258 if (ap->link.eh_info.desc_len)
259 __ata_ehi_push_desc(&ap->link.eh_info, " ");
262 __ata_ehi_pushv_desc(&ap->link.eh_info, fmt, args);
269 * ata_port_pbar_desc - append PCI BAR description
270 * @ap: target ATA port
271 * @bar: target PCI BAR
272 * @offset: offset into PCI BAR
273 * @name: name of the area
275 * If @offset is negative, this function formats a string which
276 * contains the name, address, size and type of the BAR and
277 * appends it to the port description. If @offset is zero or
278 * positive, only name and offsetted address is appended.
283 void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
286 struct pci_dev *pdev = to_pci_dev(ap->host->dev);
288 unsigned long long start, len;
290 if (pci_resource_flags(pdev, bar) & IORESOURCE_MEM)
292 else if (pci_resource_flags(pdev, bar) & IORESOURCE_IO)
295 start = (unsigned long long)pci_resource_start(pdev, bar);
296 len = (unsigned long long)pci_resource_len(pdev, bar);
299 ata_port_desc(ap, "%s %s%llu@0x%llx", name, type, len, start);
301 ata_port_desc(ap, "%s 0x%llx", name,
302 start + (unsigned long long)offset);
305 #endif /* CONFIG_PCI */
307 static int ata_lookup_timeout_table(u8 cmd)
311 for (i = 0; i < ATA_EH_CMD_TIMEOUT_TABLE_SIZE; i++) {
314 for (cur = ata_eh_cmd_timeout_table[i].commands; *cur; cur++)
323 * ata_internal_cmd_timeout - determine timeout for an internal command
324 * @dev: target device
325 * @cmd: internal command to be issued
327 * Determine timeout for internal command @cmd for @dev.
333 * Determined timeout.
335 unsigned long ata_internal_cmd_timeout(struct ata_device *dev, u8 cmd)
337 struct ata_eh_context *ehc = &dev->link->eh_context;
338 int ent = ata_lookup_timeout_table(cmd);
342 return ATA_EH_CMD_DFL_TIMEOUT;
344 idx = ehc->cmd_timeout_idx[dev->devno][ent];
345 return ata_eh_cmd_timeout_table[ent].timeouts[idx];
349 * ata_internal_cmd_timed_out - notification for internal command timeout
350 * @dev: target device
351 * @cmd: internal command which timed out
353 * Notify EH that internal command @cmd for @dev timed out. This
354 * function should be called only for commands whose timeouts are
355 * determined using ata_internal_cmd_timeout().
360 void ata_internal_cmd_timed_out(struct ata_device *dev, u8 cmd)
362 struct ata_eh_context *ehc = &dev->link->eh_context;
363 int ent = ata_lookup_timeout_table(cmd);
369 idx = ehc->cmd_timeout_idx[dev->devno][ent];
370 if (ata_eh_cmd_timeout_table[ent].timeouts[idx + 1] != ULONG_MAX)
371 ehc->cmd_timeout_idx[dev->devno][ent]++;
374 static void ata_ering_record(struct ata_ering *ering, unsigned int eflags,
375 unsigned int err_mask)
377 struct ata_ering_entry *ent;
382 ering->cursor %= ATA_ERING_SIZE;
384 ent = &ering->ring[ering->cursor];
385 ent->eflags = eflags;
386 ent->err_mask = err_mask;
387 ent->timestamp = get_jiffies_64();
390 static struct ata_ering_entry *ata_ering_top(struct ata_ering *ering)
392 struct ata_ering_entry *ent = &ering->ring[ering->cursor];
399 static void ata_ering_clear(struct ata_ering *ering)
401 memset(ering, 0, sizeof(*ering));
404 static int ata_ering_map(struct ata_ering *ering,
405 int (*map_fn)(struct ata_ering_entry *, void *),
409 struct ata_ering_entry *ent;
413 ent = &ering->ring[idx];
416 rc = map_fn(ent, arg);
419 idx = (idx - 1 + ATA_ERING_SIZE) % ATA_ERING_SIZE;
420 } while (idx != ering->cursor);
425 static unsigned int ata_eh_dev_action(struct ata_device *dev)
427 struct ata_eh_context *ehc = &dev->link->eh_context;
429 return ehc->i.action | ehc->i.dev_action[dev->devno];
432 static void ata_eh_clear_action(struct ata_link *link, struct ata_device *dev,
433 struct ata_eh_info *ehi, unsigned int action)
435 struct ata_device *tdev;
438 ehi->action &= ~action;
439 ata_for_each_dev(tdev, link, ALL)
440 ehi->dev_action[tdev->devno] &= ~action;
442 /* doesn't make sense for port-wide EH actions */
443 WARN_ON(!(action & ATA_EH_PERDEV_MASK));
445 /* break ehi->action into ehi->dev_action */
446 if (ehi->action & action) {
447 ata_for_each_dev(tdev, link, ALL)
448 ehi->dev_action[tdev->devno] |=
449 ehi->action & action;
450 ehi->action &= ~action;
453 /* turn off the specified per-dev action */
454 ehi->dev_action[dev->devno] &= ~action;
459 * ata_scsi_timed_out - SCSI layer time out callback
460 * @cmd: timed out SCSI command
462 * Handles SCSI layer timeout. We race with normal completion of
463 * the qc for @cmd. If the qc is already gone, we lose and let
464 * the scsi command finish (EH_HANDLED). Otherwise, the qc has
465 * timed out and EH should be invoked. Prevent ata_qc_complete()
466 * from finishing it by setting EH_SCHEDULED and return
469 * TODO: kill this function once old EH is gone.
472 * Called from timer context
475 * EH_HANDLED or EH_NOT_HANDLED
477 enum blk_eh_timer_return ata_scsi_timed_out(struct scsi_cmnd *cmd)
479 struct Scsi_Host *host = cmd->device->host;
480 struct ata_port *ap = ata_shost_to_port(host);
482 struct ata_queued_cmd *qc;
483 enum blk_eh_timer_return ret;
487 if (ap->ops->error_handler) {
488 ret = BLK_EH_NOT_HANDLED;
492 ret = BLK_EH_HANDLED;
493 spin_lock_irqsave(ap->lock, flags);
494 qc = ata_qc_from_tag(ap, ap->link.active_tag);
496 WARN_ON(qc->scsicmd != cmd);
497 qc->flags |= ATA_QCFLAG_EH_SCHEDULED;
498 qc->err_mask |= AC_ERR_TIMEOUT;
499 ret = BLK_EH_NOT_HANDLED;
501 spin_unlock_irqrestore(ap->lock, flags);
504 DPRINTK("EXIT, ret=%d\n", ret);
508 static void ata_eh_unload(struct ata_port *ap)
510 struct ata_link *link;
511 struct ata_device *dev;
514 /* Restore SControl IPM and SPD for the next driver and
515 * disable attached devices.
517 ata_for_each_link(link, ap, PMP_FIRST) {
518 sata_scr_write(link, SCR_CONTROL, link->saved_scontrol & 0xff0);
519 ata_for_each_dev(dev, link, ALL)
520 ata_dev_disable(dev);
523 /* freeze and set UNLOADED */
524 spin_lock_irqsave(ap->lock, flags);
526 ata_port_freeze(ap); /* won't be thawed */
527 ap->pflags &= ~ATA_PFLAG_EH_PENDING; /* clear pending from freeze */
528 ap->pflags |= ATA_PFLAG_UNLOADED;
530 spin_unlock_irqrestore(ap->lock, flags);
534 * ata_scsi_error - SCSI layer error handler callback
535 * @host: SCSI host on which error occurred
537 * Handles SCSI-layer-thrown error events.
540 * Inherited from SCSI layer (none, can sleep)
545 void ata_scsi_error(struct Scsi_Host *host)
547 struct ata_port *ap = ata_shost_to_port(host);
553 /* synchronize with port task */
554 ata_port_flush_task(ap);
556 /* synchronize with host lock and sort out timeouts */
558 /* For new EH, all qcs are finished in one of three ways -
559 * normal completion, error completion, and SCSI timeout.
560 * Both completions can race against SCSI timeout. When normal
561 * completion wins, the qc never reaches EH. When error
562 * completion wins, the qc has ATA_QCFLAG_FAILED set.
564 * When SCSI timeout wins, things are a bit more complex.
565 * Normal or error completion can occur after the timeout but
566 * before this point. In such cases, both types of
567 * completions are honored. A scmd is determined to have
568 * timed out iff its associated qc is active and not failed.
570 if (ap->ops->error_handler) {
571 struct scsi_cmnd *scmd, *tmp;
574 spin_lock_irqsave(ap->lock, flags);
576 /* This must occur under the ap->lock as we don't want
577 a polled recovery to race the real interrupt handler
579 The lost_interrupt handler checks for any completed but
580 non-notified command and completes much like an IRQ handler.
582 We then fall into the error recovery code which will treat
583 this as if normal completion won the race */
585 if (ap->ops->lost_interrupt)
586 ap->ops->lost_interrupt(ap);
588 list_for_each_entry_safe(scmd, tmp, &host->eh_cmd_q, eh_entry) {
589 struct ata_queued_cmd *qc;
591 for (i = 0; i < ATA_MAX_QUEUE; i++) {
592 qc = __ata_qc_from_tag(ap, i);
593 if (qc->flags & ATA_QCFLAG_ACTIVE &&
598 if (i < ATA_MAX_QUEUE) {
599 /* the scmd has an associated qc */
600 if (!(qc->flags & ATA_QCFLAG_FAILED)) {
601 /* which hasn't failed yet, timeout */
602 qc->err_mask |= AC_ERR_TIMEOUT;
603 qc->flags |= ATA_QCFLAG_FAILED;
607 /* Normal completion occurred after
608 * SCSI timeout but before this point.
609 * Successfully complete it.
611 scmd->retries = scmd->allowed;
612 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
616 /* If we have timed out qcs. They belong to EH from
617 * this point but the state of the controller is
618 * unknown. Freeze the port to make sure the IRQ
619 * handler doesn't diddle with those qcs. This must
620 * be done atomically w.r.t. setting QCFLAG_FAILED.
623 __ata_port_freeze(ap);
625 spin_unlock_irqrestore(ap->lock, flags);
627 /* initialize eh_tries */
628 ap->eh_tries = ATA_EH_MAX_TRIES;
630 spin_unlock_wait(ap->lock);
632 /* If we timed raced normal completion and there is nothing to
633 recover nr_timedout == 0 why exactly are we doing error recovery ? */
636 /* invoke error handler */
637 if (ap->ops->error_handler) {
638 struct ata_link *link;
640 /* kill fast drain timer */
641 del_timer_sync(&ap->fastdrain_timer);
643 /* process port resume request */
644 ata_eh_handle_port_resume(ap);
646 /* fetch & clear EH info */
647 spin_lock_irqsave(ap->lock, flags);
649 ata_for_each_link(link, ap, HOST_FIRST) {
650 struct ata_eh_context *ehc = &link->eh_context;
651 struct ata_device *dev;
653 memset(&link->eh_context, 0, sizeof(link->eh_context));
654 link->eh_context.i = link->eh_info;
655 memset(&link->eh_info, 0, sizeof(link->eh_info));
657 ata_for_each_dev(dev, link, ENABLED) {
658 int devno = dev->devno;
660 ehc->saved_xfer_mode[devno] = dev->xfer_mode;
661 if (ata_ncq_enabled(dev))
662 ehc->saved_ncq_enabled |= 1 << devno;
666 ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
667 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
668 ap->excl_link = NULL; /* don't maintain exclusion over EH */
670 spin_unlock_irqrestore(ap->lock, flags);
672 /* invoke EH, skip if unloading or suspended */
673 if (!(ap->pflags & (ATA_PFLAG_UNLOADING | ATA_PFLAG_SUSPENDED)))
674 ap->ops->error_handler(ap);
676 /* if unloading, commence suicide */
677 if ((ap->pflags & ATA_PFLAG_UNLOADING) &&
678 !(ap->pflags & ATA_PFLAG_UNLOADED))
683 /* process port suspend request */
684 ata_eh_handle_port_suspend(ap);
686 /* Exception might have happend after ->error_handler
687 * recovered the port but before this point. Repeat
690 spin_lock_irqsave(ap->lock, flags);
692 if (ap->pflags & ATA_PFLAG_EH_PENDING) {
693 if (--ap->eh_tries) {
694 spin_unlock_irqrestore(ap->lock, flags);
697 ata_port_printk(ap, KERN_ERR, "EH pending after %d "
698 "tries, giving up\n", ATA_EH_MAX_TRIES);
699 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
702 /* this run is complete, make sure EH info is clear */
703 ata_for_each_link(link, ap, HOST_FIRST)
704 memset(&link->eh_info, 0, sizeof(link->eh_info));
706 /* Clear host_eh_scheduled while holding ap->lock such
707 * that if exception occurs after this point but
708 * before EH completion, SCSI midlayer will
711 host->host_eh_scheduled = 0;
713 spin_unlock_irqrestore(ap->lock, flags);
715 WARN_ON(ata_qc_from_tag(ap, ap->link.active_tag) == NULL);
716 ap->ops->eng_timeout(ap);
719 /* finish or retry handled scmd's and clean up */
720 WARN_ON(host->host_failed || !list_empty(&host->eh_cmd_q));
722 scsi_eh_flush_done_q(&ap->eh_done_q);
725 spin_lock_irqsave(ap->lock, flags);
727 if (ap->pflags & ATA_PFLAG_LOADING)
728 ap->pflags &= ~ATA_PFLAG_LOADING;
729 else if (ap->pflags & ATA_PFLAG_SCSI_HOTPLUG)
730 queue_delayed_work(ata_aux_wq, &ap->hotplug_task, 0);
732 if (ap->pflags & ATA_PFLAG_RECOVERED)
733 ata_port_printk(ap, KERN_INFO, "EH complete\n");
735 ap->pflags &= ~(ATA_PFLAG_SCSI_HOTPLUG | ATA_PFLAG_RECOVERED);
737 /* tell wait_eh that we're done */
738 ap->pflags &= ~ATA_PFLAG_EH_IN_PROGRESS;
739 wake_up_all(&ap->eh_wait_q);
741 spin_unlock_irqrestore(ap->lock, flags);
747 * ata_port_wait_eh - Wait for the currently pending EH to complete
748 * @ap: Port to wait EH for
750 * Wait until the currently pending EH is complete.
753 * Kernel thread context (may sleep).
755 void ata_port_wait_eh(struct ata_port *ap)
761 spin_lock_irqsave(ap->lock, flags);
763 while (ap->pflags & (ATA_PFLAG_EH_PENDING | ATA_PFLAG_EH_IN_PROGRESS)) {
764 prepare_to_wait(&ap->eh_wait_q, &wait, TASK_UNINTERRUPTIBLE);
765 spin_unlock_irqrestore(ap->lock, flags);
767 spin_lock_irqsave(ap->lock, flags);
769 finish_wait(&ap->eh_wait_q, &wait);
771 spin_unlock_irqrestore(ap->lock, flags);
773 /* make sure SCSI EH is complete */
774 if (scsi_host_in_recovery(ap->scsi_host)) {
780 static int ata_eh_nr_in_flight(struct ata_port *ap)
785 /* count only non-internal commands */
786 for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++)
787 if (ata_qc_from_tag(ap, tag))
793 void ata_eh_fastdrain_timerfn(unsigned long arg)
795 struct ata_port *ap = (void *)arg;
799 spin_lock_irqsave(ap->lock, flags);
801 cnt = ata_eh_nr_in_flight(ap);
807 if (cnt == ap->fastdrain_cnt) {
810 /* No progress during the last interval, tag all
811 * in-flight qcs as timed out and freeze the port.
813 for (tag = 0; tag < ATA_MAX_QUEUE - 1; tag++) {
814 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
816 qc->err_mask |= AC_ERR_TIMEOUT;
821 /* some qcs have finished, give it another chance */
822 ap->fastdrain_cnt = cnt;
823 ap->fastdrain_timer.expires =
824 ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
825 add_timer(&ap->fastdrain_timer);
829 spin_unlock_irqrestore(ap->lock, flags);
833 * ata_eh_set_pending - set ATA_PFLAG_EH_PENDING and activate fast drain
834 * @ap: target ATA port
835 * @fastdrain: activate fast drain
837 * Set ATA_PFLAG_EH_PENDING and activate fast drain if @fastdrain
838 * is non-zero and EH wasn't pending before. Fast drain ensures
839 * that EH kicks in in timely manner.
842 * spin_lock_irqsave(host lock)
844 static void ata_eh_set_pending(struct ata_port *ap, int fastdrain)
848 /* already scheduled? */
849 if (ap->pflags & ATA_PFLAG_EH_PENDING)
852 ap->pflags |= ATA_PFLAG_EH_PENDING;
857 /* do we have in-flight qcs? */
858 cnt = ata_eh_nr_in_flight(ap);
862 /* activate fast drain */
863 ap->fastdrain_cnt = cnt;
864 ap->fastdrain_timer.expires =
865 ata_deadline(jiffies, ATA_EH_FASTDRAIN_INTERVAL);
866 add_timer(&ap->fastdrain_timer);
870 * ata_qc_schedule_eh - schedule qc for error handling
871 * @qc: command to schedule error handling for
873 * Schedule error handling for @qc. EH will kick in as soon as
874 * other commands are drained.
877 * spin_lock_irqsave(host lock)
879 void ata_qc_schedule_eh(struct ata_queued_cmd *qc)
881 struct ata_port *ap = qc->ap;
882 struct request_queue *q = qc->scsicmd->device->request_queue;
885 WARN_ON(!ap->ops->error_handler);
887 qc->flags |= ATA_QCFLAG_FAILED;
888 ata_eh_set_pending(ap, 1);
890 /* The following will fail if timeout has already expired.
891 * ata_scsi_error() takes care of such scmds on EH entry.
892 * Note that ATA_QCFLAG_FAILED is unconditionally set after
893 * this function completes.
895 spin_lock_irqsave(q->queue_lock, flags);
896 blk_abort_request(qc->scsicmd->request);
897 spin_unlock_irqrestore(q->queue_lock, flags);
901 * ata_port_schedule_eh - schedule error handling without a qc
902 * @ap: ATA port to schedule EH for
904 * Schedule error handling for @ap. EH will kick in as soon as
905 * all commands are drained.
908 * spin_lock_irqsave(host lock)
910 void ata_port_schedule_eh(struct ata_port *ap)
912 WARN_ON(!ap->ops->error_handler);
914 if (ap->pflags & ATA_PFLAG_INITIALIZING)
917 ata_eh_set_pending(ap, 1);
918 scsi_schedule_eh(ap->scsi_host);
920 DPRINTK("port EH scheduled\n");
923 static int ata_do_link_abort(struct ata_port *ap, struct ata_link *link)
925 int tag, nr_aborted = 0;
927 WARN_ON(!ap->ops->error_handler);
929 /* we're gonna abort all commands, no need for fast drain */
930 ata_eh_set_pending(ap, 0);
932 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
933 struct ata_queued_cmd *qc = ata_qc_from_tag(ap, tag);
935 if (qc && (!link || qc->dev->link == link)) {
936 qc->flags |= ATA_QCFLAG_FAILED;
943 ata_port_schedule_eh(ap);
949 * ata_link_abort - abort all qc's on the link
950 * @link: ATA link to abort qc's for
952 * Abort all active qc's active on @link and schedule EH.
955 * spin_lock_irqsave(host lock)
958 * Number of aborted qc's.
960 int ata_link_abort(struct ata_link *link)
962 return ata_do_link_abort(link->ap, link);
966 * ata_port_abort - abort all qc's on the port
967 * @ap: ATA port to abort qc's for
969 * Abort all active qc's of @ap and schedule EH.
972 * spin_lock_irqsave(host_set lock)
975 * Number of aborted qc's.
977 int ata_port_abort(struct ata_port *ap)
979 return ata_do_link_abort(ap, NULL);
983 * __ata_port_freeze - freeze port
984 * @ap: ATA port to freeze
986 * This function is called when HSM violation or some other
987 * condition disrupts normal operation of the port. Frozen port
988 * is not allowed to perform any operation until the port is
989 * thawed, which usually follows a successful reset.
991 * ap->ops->freeze() callback can be used for freezing the port
992 * hardware-wise (e.g. mask interrupt and stop DMA engine). If a
993 * port cannot be frozen hardware-wise, the interrupt handler
994 * must ack and clear interrupts unconditionally while the port
998 * spin_lock_irqsave(host lock)
1000 static void __ata_port_freeze(struct ata_port *ap)
1002 WARN_ON(!ap->ops->error_handler);
1004 if (ap->ops->freeze)
1005 ap->ops->freeze(ap);
1007 ap->pflags |= ATA_PFLAG_FROZEN;
1009 DPRINTK("ata%u port frozen\n", ap->print_id);
1013 * ata_port_freeze - abort & freeze port
1014 * @ap: ATA port to freeze
1016 * Abort and freeze @ap. The freeze operation must be called
1017 * first, because some hardware requires special operations
1018 * before the taskfile registers are accessible.
1021 * spin_lock_irqsave(host lock)
1024 * Number of aborted commands.
1026 int ata_port_freeze(struct ata_port *ap)
1030 WARN_ON(!ap->ops->error_handler);
1032 __ata_port_freeze(ap);
1033 nr_aborted = ata_port_abort(ap);
1039 * sata_async_notification - SATA async notification handler
1040 * @ap: ATA port where async notification is received
1042 * Handler to be called when async notification via SDB FIS is
1043 * received. This function schedules EH if necessary.
1046 * spin_lock_irqsave(host lock)
1049 * 1 if EH is scheduled, 0 otherwise.
1051 int sata_async_notification(struct ata_port *ap)
1056 if (!(ap->flags & ATA_FLAG_AN))
1059 rc = sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf);
1061 sata_scr_write(&ap->link, SCR_NOTIFICATION, sntf);
1063 if (!sata_pmp_attached(ap) || rc) {
1064 /* PMP is not attached or SNTF is not available */
1065 if (!sata_pmp_attached(ap)) {
1066 /* PMP is not attached. Check whether ATAPI
1067 * AN is configured. If so, notify media
1070 struct ata_device *dev = ap->link.device;
1072 if ((dev->class == ATA_DEV_ATAPI) &&
1073 (dev->flags & ATA_DFLAG_AN))
1074 ata_scsi_media_change_notify(dev);
1077 /* PMP is attached but SNTF is not available.
1078 * ATAPI async media change notification is
1079 * not used. The PMP must be reporting PHY
1080 * status change, schedule EH.
1082 ata_port_schedule_eh(ap);
1086 /* PMP is attached and SNTF is available */
1087 struct ata_link *link;
1089 /* check and notify ATAPI AN */
1090 ata_for_each_link(link, ap, EDGE) {
1091 if (!(sntf & (1 << link->pmp)))
1094 if ((link->device->class == ATA_DEV_ATAPI) &&
1095 (link->device->flags & ATA_DFLAG_AN))
1096 ata_scsi_media_change_notify(link->device);
1099 /* If PMP is reporting that PHY status of some
1100 * downstream ports has changed, schedule EH.
1102 if (sntf & (1 << SATA_PMP_CTRL_PORT)) {
1103 ata_port_schedule_eh(ap);
1112 * ata_eh_freeze_port - EH helper to freeze port
1113 * @ap: ATA port to freeze
1120 void ata_eh_freeze_port(struct ata_port *ap)
1122 unsigned long flags;
1124 if (!ap->ops->error_handler)
1127 spin_lock_irqsave(ap->lock, flags);
1128 __ata_port_freeze(ap);
1129 spin_unlock_irqrestore(ap->lock, flags);
1133 * ata_port_thaw_port - EH helper to thaw port
1134 * @ap: ATA port to thaw
1136 * Thaw frozen port @ap.
1141 void ata_eh_thaw_port(struct ata_port *ap)
1143 unsigned long flags;
1145 if (!ap->ops->error_handler)
1148 spin_lock_irqsave(ap->lock, flags);
1150 ap->pflags &= ~ATA_PFLAG_FROZEN;
1155 spin_unlock_irqrestore(ap->lock, flags);
1157 DPRINTK("ata%u port thawed\n", ap->print_id);
1160 static void ata_eh_scsidone(struct scsi_cmnd *scmd)
1165 static void __ata_eh_qc_complete(struct ata_queued_cmd *qc)
1167 struct ata_port *ap = qc->ap;
1168 struct scsi_cmnd *scmd = qc->scsicmd;
1169 unsigned long flags;
1171 spin_lock_irqsave(ap->lock, flags);
1172 qc->scsidone = ata_eh_scsidone;
1173 __ata_qc_complete(qc);
1174 WARN_ON(ata_tag_valid(qc->tag));
1175 spin_unlock_irqrestore(ap->lock, flags);
1177 scsi_eh_finish_cmd(scmd, &ap->eh_done_q);
1181 * ata_eh_qc_complete - Complete an active ATA command from EH
1182 * @qc: Command to complete
1184 * Indicate to the mid and upper layers that an ATA command has
1185 * completed. To be used from EH.
1187 void ata_eh_qc_complete(struct ata_queued_cmd *qc)
1189 struct scsi_cmnd *scmd = qc->scsicmd;
1190 scmd->retries = scmd->allowed;
1191 __ata_eh_qc_complete(qc);
1195 * ata_eh_qc_retry - Tell midlayer to retry an ATA command after EH
1196 * @qc: Command to retry
1198 * Indicate to the mid and upper layers that an ATA command
1199 * should be retried. To be used from EH.
1201 * SCSI midlayer limits the number of retries to scmd->allowed.
1202 * scmd->retries is decremented for commands which get retried
1203 * due to unrelated failures (qc->err_mask is zero).
1205 void ata_eh_qc_retry(struct ata_queued_cmd *qc)
1207 struct scsi_cmnd *scmd = qc->scsicmd;
1208 if (!qc->err_mask && scmd->retries)
1210 __ata_eh_qc_complete(qc);
1214 * ata_dev_disable - disable ATA device
1215 * @dev: ATA device to disable
1222 void ata_dev_disable(struct ata_device *dev)
1224 if (!ata_dev_enabled(dev))
1227 if (ata_msg_drv(dev->link->ap))
1228 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
1229 ata_acpi_on_disable(dev);
1230 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 | ATA_DNXFER_QUIET);
1233 /* From now till the next successful probe, ering is used to
1234 * track probe failures. Clear accumulated device error info.
1236 ata_ering_clear(&dev->ering);
1240 * ata_eh_detach_dev - detach ATA device
1241 * @dev: ATA device to detach
1248 void ata_eh_detach_dev(struct ata_device *dev)
1250 struct ata_link *link = dev->link;
1251 struct ata_port *ap = link->ap;
1252 struct ata_eh_context *ehc = &link->eh_context;
1253 unsigned long flags;
1255 ata_dev_disable(dev);
1257 spin_lock_irqsave(ap->lock, flags);
1259 dev->flags &= ~ATA_DFLAG_DETACH;
1261 if (ata_scsi_offline_dev(dev)) {
1262 dev->flags |= ATA_DFLAG_DETACHED;
1263 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
1266 /* clear per-dev EH info */
1267 ata_eh_clear_action(link, dev, &link->eh_info, ATA_EH_PERDEV_MASK);
1268 ata_eh_clear_action(link, dev, &link->eh_context.i, ATA_EH_PERDEV_MASK);
1269 ehc->saved_xfer_mode[dev->devno] = 0;
1270 ehc->saved_ncq_enabled &= ~(1 << dev->devno);
1272 spin_unlock_irqrestore(ap->lock, flags);
1276 * ata_eh_about_to_do - about to perform eh_action
1277 * @link: target ATA link
1278 * @dev: target ATA dev for per-dev action (can be NULL)
1279 * @action: action about to be performed
1281 * Called just before performing EH actions to clear related bits
1282 * in @link->eh_info such that eh actions are not unnecessarily
1288 void ata_eh_about_to_do(struct ata_link *link, struct ata_device *dev,
1289 unsigned int action)
1291 struct ata_port *ap = link->ap;
1292 struct ata_eh_info *ehi = &link->eh_info;
1293 struct ata_eh_context *ehc = &link->eh_context;
1294 unsigned long flags;
1296 spin_lock_irqsave(ap->lock, flags);
1298 ata_eh_clear_action(link, dev, ehi, action);
1300 /* About to take EH action, set RECOVERED. Ignore actions on
1301 * slave links as master will do them again.
1303 if (!(ehc->i.flags & ATA_EHI_QUIET) && link != ap->slave_link)
1304 ap->pflags |= ATA_PFLAG_RECOVERED;
1306 spin_unlock_irqrestore(ap->lock, flags);
1310 * ata_eh_done - EH action complete
1311 * @ap: target ATA port
1312 * @dev: target ATA dev for per-dev action (can be NULL)
1313 * @action: action just completed
1315 * Called right after performing EH actions to clear related bits
1316 * in @link->eh_context.
1321 void ata_eh_done(struct ata_link *link, struct ata_device *dev,
1322 unsigned int action)
1324 struct ata_eh_context *ehc = &link->eh_context;
1326 ata_eh_clear_action(link, dev, &ehc->i, action);
1330 * ata_err_string - convert err_mask to descriptive string
1331 * @err_mask: error mask to convert to string
1333 * Convert @err_mask to descriptive string. Errors are
1334 * prioritized according to severity and only the most severe
1335 * error is reported.
1341 * Descriptive string for @err_mask
1343 static const char *ata_err_string(unsigned int err_mask)
1345 if (err_mask & AC_ERR_HOST_BUS)
1346 return "host bus error";
1347 if (err_mask & AC_ERR_ATA_BUS)
1348 return "ATA bus error";
1349 if (err_mask & AC_ERR_TIMEOUT)
1351 if (err_mask & AC_ERR_HSM)
1352 return "HSM violation";
1353 if (err_mask & AC_ERR_SYSTEM)
1354 return "internal error";
1355 if (err_mask & AC_ERR_MEDIA)
1356 return "media error";
1357 if (err_mask & AC_ERR_INVALID)
1358 return "invalid argument";
1359 if (err_mask & AC_ERR_DEV)
1360 return "device error";
1361 return "unknown error";
1365 * ata_read_log_page - read a specific log page
1366 * @dev: target device
1367 * @page: page to read
1368 * @buf: buffer to store read page
1369 * @sectors: number of sectors to read
1371 * Read log page using READ_LOG_EXT command.
1374 * Kernel thread context (may sleep).
1377 * 0 on success, AC_ERR_* mask otherwise.
1379 static unsigned int ata_read_log_page(struct ata_device *dev,
1380 u8 page, void *buf, unsigned int sectors)
1382 struct ata_taskfile tf;
1383 unsigned int err_mask;
1385 DPRINTK("read log page - page %d\n", page);
1387 ata_tf_init(dev, &tf);
1388 tf.command = ATA_CMD_READ_LOG_EXT;
1391 tf.hob_nsect = sectors >> 8;
1392 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_LBA48 | ATA_TFLAG_DEVICE;
1393 tf.protocol = ATA_PROT_PIO;
1395 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
1396 buf, sectors * ATA_SECT_SIZE, 0);
1398 DPRINTK("EXIT, err_mask=%x\n", err_mask);
1403 * ata_eh_read_log_10h - Read log page 10h for NCQ error details
1404 * @dev: Device to read log page 10h from
1405 * @tag: Resulting tag of the failed command
1406 * @tf: Resulting taskfile registers of the failed command
1408 * Read log page 10h to obtain NCQ error details and clear error
1412 * Kernel thread context (may sleep).
1415 * 0 on success, -errno otherwise.
1417 static int ata_eh_read_log_10h(struct ata_device *dev,
1418 int *tag, struct ata_taskfile *tf)
1420 u8 *buf = dev->link->ap->sector_buf;
1421 unsigned int err_mask;
1425 err_mask = ata_read_log_page(dev, ATA_LOG_SATA_NCQ, buf, 1);
1430 for (i = 0; i < ATA_SECT_SIZE; i++)
1433 ata_dev_printk(dev, KERN_WARNING,
1434 "invalid checksum 0x%x on log page 10h\n", csum);
1439 *tag = buf[0] & 0x1f;
1441 tf->command = buf[2];
1442 tf->feature = buf[3];
1446 tf->device = buf[7];
1447 tf->hob_lbal = buf[8];
1448 tf->hob_lbam = buf[9];
1449 tf->hob_lbah = buf[10];
1450 tf->nsect = buf[12];
1451 tf->hob_nsect = buf[13];
1457 * atapi_eh_tur - perform ATAPI TEST_UNIT_READY
1458 * @dev: target ATAPI device
1459 * @r_sense_key: out parameter for sense_key
1461 * Perform ATAPI TEST_UNIT_READY.
1464 * EH context (may sleep).
1467 * 0 on success, AC_ERR_* mask on failure.
1469 static unsigned int atapi_eh_tur(struct ata_device *dev, u8 *r_sense_key)
1471 u8 cdb[ATAPI_CDB_LEN] = { TEST_UNIT_READY, 0, 0, 0, 0, 0 };
1472 struct ata_taskfile tf;
1473 unsigned int err_mask;
1475 ata_tf_init(dev, &tf);
1477 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1478 tf.command = ATA_CMD_PACKET;
1479 tf.protocol = ATAPI_PROT_NODATA;
1481 err_mask = ata_exec_internal(dev, &tf, cdb, DMA_NONE, NULL, 0, 0);
1482 if (err_mask == AC_ERR_DEV)
1483 *r_sense_key = tf.feature >> 4;
1488 * atapi_eh_request_sense - perform ATAPI REQUEST_SENSE
1489 * @dev: device to perform REQUEST_SENSE to
1490 * @sense_buf: result sense data buffer (SCSI_SENSE_BUFFERSIZE bytes long)
1491 * @dfl_sense_key: default sense key to use
1493 * Perform ATAPI REQUEST_SENSE after the device reported CHECK
1494 * SENSE. This function is EH helper.
1497 * Kernel thread context (may sleep).
1500 * 0 on success, AC_ERR_* mask on failure
1502 static unsigned int atapi_eh_request_sense(struct ata_device *dev,
1503 u8 *sense_buf, u8 dfl_sense_key)
1505 u8 cdb[ATAPI_CDB_LEN] =
1506 { REQUEST_SENSE, 0, 0, 0, SCSI_SENSE_BUFFERSIZE, 0 };
1507 struct ata_port *ap = dev->link->ap;
1508 struct ata_taskfile tf;
1510 DPRINTK("ATAPI request sense\n");
1512 /* FIXME: is this needed? */
1513 memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
1515 /* initialize sense_buf with the error register,
1516 * for the case where they are -not- overwritten
1518 sense_buf[0] = 0x70;
1519 sense_buf[2] = dfl_sense_key;
1521 /* some devices time out if garbage left in tf */
1522 ata_tf_init(dev, &tf);
1524 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1525 tf.command = ATA_CMD_PACKET;
1527 /* is it pointless to prefer PIO for "safety reasons"? */
1528 if (ap->flags & ATA_FLAG_PIO_DMA) {
1529 tf.protocol = ATAPI_PROT_DMA;
1530 tf.feature |= ATAPI_PKT_DMA;
1532 tf.protocol = ATAPI_PROT_PIO;
1533 tf.lbam = SCSI_SENSE_BUFFERSIZE;
1537 return ata_exec_internal(dev, &tf, cdb, DMA_FROM_DEVICE,
1538 sense_buf, SCSI_SENSE_BUFFERSIZE, 0);
1542 * ata_eh_analyze_serror - analyze SError for a failed port
1543 * @link: ATA link to analyze SError for
1545 * Analyze SError if available and further determine cause of
1551 static void ata_eh_analyze_serror(struct ata_link *link)
1553 struct ata_eh_context *ehc = &link->eh_context;
1554 u32 serror = ehc->i.serror;
1555 unsigned int err_mask = 0, action = 0;
1558 if (serror & (SERR_PERSISTENT | SERR_DATA)) {
1559 err_mask |= AC_ERR_ATA_BUS;
1560 action |= ATA_EH_RESET;
1562 if (serror & SERR_PROTOCOL) {
1563 err_mask |= AC_ERR_HSM;
1564 action |= ATA_EH_RESET;
1566 if (serror & SERR_INTERNAL) {
1567 err_mask |= AC_ERR_SYSTEM;
1568 action |= ATA_EH_RESET;
1571 /* Determine whether a hotplug event has occurred. Both
1572 * SError.N/X are considered hotplug events for enabled or
1573 * host links. For disabled PMP links, only N bit is
1574 * considered as X bit is left at 1 for link plugging.
1578 if (!(link->flags & ATA_LFLAG_DISABLED) || ata_is_host_link(link))
1579 hotplug_mask = SERR_PHYRDY_CHG | SERR_DEV_XCHG;
1581 hotplug_mask = SERR_PHYRDY_CHG;
1583 if (serror & hotplug_mask)
1584 ata_ehi_hotplugged(&ehc->i);
1586 ehc->i.err_mask |= err_mask;
1587 ehc->i.action |= action;
1591 * ata_eh_analyze_ncq_error - analyze NCQ error
1592 * @link: ATA link to analyze NCQ error for
1594 * Read log page 10h, determine the offending qc and acquire
1595 * error status TF. For NCQ device errors, all LLDDs have to do
1596 * is setting AC_ERR_DEV in ehi->err_mask. This function takes
1600 * Kernel thread context (may sleep).
1602 void ata_eh_analyze_ncq_error(struct ata_link *link)
1604 struct ata_port *ap = link->ap;
1605 struct ata_eh_context *ehc = &link->eh_context;
1606 struct ata_device *dev = link->device;
1607 struct ata_queued_cmd *qc;
1608 struct ata_taskfile tf;
1611 /* if frozen, we can't do much */
1612 if (ap->pflags & ATA_PFLAG_FROZEN)
1615 /* is it NCQ device error? */
1616 if (!link->sactive || !(ehc->i.err_mask & AC_ERR_DEV))
1619 /* has LLDD analyzed already? */
1620 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
1621 qc = __ata_qc_from_tag(ap, tag);
1623 if (!(qc->flags & ATA_QCFLAG_FAILED))
1630 /* okay, this error is ours */
1631 rc = ata_eh_read_log_10h(dev, &tag, &tf);
1633 ata_link_printk(link, KERN_ERR, "failed to read log page 10h "
1634 "(errno=%d)\n", rc);
1638 if (!(link->sactive & (1 << tag))) {
1639 ata_link_printk(link, KERN_ERR, "log page 10h reported "
1640 "inactive tag %d\n", tag);
1644 /* we've got the perpetrator, condemn it */
1645 qc = __ata_qc_from_tag(ap, tag);
1646 memcpy(&qc->result_tf, &tf, sizeof(tf));
1647 qc->result_tf.flags = ATA_TFLAG_ISADDR | ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
1648 qc->err_mask |= AC_ERR_DEV | AC_ERR_NCQ;
1649 ehc->i.err_mask &= ~AC_ERR_DEV;
1653 * ata_eh_analyze_tf - analyze taskfile of a failed qc
1654 * @qc: qc to analyze
1655 * @tf: Taskfile registers to analyze
1657 * Analyze taskfile of @qc and further determine cause of
1658 * failure. This function also requests ATAPI sense data if
1662 * Kernel thread context (may sleep).
1665 * Determined recovery action
1667 static unsigned int ata_eh_analyze_tf(struct ata_queued_cmd *qc,
1668 const struct ata_taskfile *tf)
1670 unsigned int tmp, action = 0;
1671 u8 stat = tf->command, err = tf->feature;
1673 if ((stat & (ATA_BUSY | ATA_DRQ | ATA_DRDY)) != ATA_DRDY) {
1674 qc->err_mask |= AC_ERR_HSM;
1675 return ATA_EH_RESET;
1678 if (stat & (ATA_ERR | ATA_DF))
1679 qc->err_mask |= AC_ERR_DEV;
1683 switch (qc->dev->class) {
1686 qc->err_mask |= AC_ERR_ATA_BUS;
1688 qc->err_mask |= AC_ERR_MEDIA;
1690 qc->err_mask |= AC_ERR_INVALID;
1694 if (!(qc->ap->pflags & ATA_PFLAG_FROZEN)) {
1695 tmp = atapi_eh_request_sense(qc->dev,
1696 qc->scsicmd->sense_buffer,
1697 qc->result_tf.feature >> 4);
1699 /* ATA_QCFLAG_SENSE_VALID is used to
1700 * tell atapi_qc_complete() that sense
1701 * data is already valid.
1703 * TODO: interpret sense data and set
1704 * appropriate err_mask.
1706 qc->flags |= ATA_QCFLAG_SENSE_VALID;
1708 qc->err_mask |= tmp;
1712 if (qc->err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT | AC_ERR_ATA_BUS))
1713 action |= ATA_EH_RESET;
1718 static int ata_eh_categorize_error(unsigned int eflags, unsigned int err_mask,
1723 if (!(eflags & ATA_EFLAG_DUBIOUS_XFER))
1727 base = ATA_ECAT_DUBIOUS_NONE;
1729 if (err_mask & AC_ERR_ATA_BUS)
1730 return base + ATA_ECAT_ATA_BUS;
1732 if (err_mask & AC_ERR_TIMEOUT)
1733 return base + ATA_ECAT_TOUT_HSM;
1735 if (eflags & ATA_EFLAG_IS_IO) {
1736 if (err_mask & AC_ERR_HSM)
1737 return base + ATA_ECAT_TOUT_HSM;
1739 (AC_ERR_DEV|AC_ERR_MEDIA|AC_ERR_INVALID)) == AC_ERR_DEV)
1740 return base + ATA_ECAT_UNK_DEV;
1746 struct speed_down_verdict_arg {
1749 int nr_errors[ATA_ECAT_NR];
1752 static int speed_down_verdict_cb(struct ata_ering_entry *ent, void *void_arg)
1754 struct speed_down_verdict_arg *arg = void_arg;
1757 if (ent->timestamp < arg->since)
1760 cat = ata_eh_categorize_error(ent->eflags, ent->err_mask,
1762 arg->nr_errors[cat]++;
1768 * ata_eh_speed_down_verdict - Determine speed down verdict
1769 * @dev: Device of interest
1771 * This function examines error ring of @dev and determines
1772 * whether NCQ needs to be turned off, transfer speed should be
1773 * stepped down, or falling back to PIO is necessary.
1775 * ECAT_ATA_BUS : ATA_BUS error for any command
1777 * ECAT_TOUT_HSM : TIMEOUT for any command or HSM violation for
1780 * ECAT_UNK_DEV : Unknown DEV error for IO commands
1782 * ECAT_DUBIOUS_* : Identical to above three but occurred while
1783 * data transfer hasn't been verified.
1787 * NCQ_OFF : Turn off NCQ.
1789 * SPEED_DOWN : Speed down transfer speed but don't fall back
1792 * FALLBACK_TO_PIO : Fall back to PIO.
1794 * Even if multiple verdicts are returned, only one action is
1795 * taken per error. An action triggered by non-DUBIOUS errors
1796 * clears ering, while one triggered by DUBIOUS_* errors doesn't.
1797 * This is to expedite speed down decisions right after device is
1798 * initially configured.
1800 * The followings are speed down rules. #1 and #2 deal with
1803 * 1. If more than one DUBIOUS_ATA_BUS or DUBIOUS_TOUT_HSM errors
1804 * occurred during last 5 mins, SPEED_DOWN and FALLBACK_TO_PIO.
1806 * 2. If more than one DUBIOUS_TOUT_HSM or DUBIOUS_UNK_DEV errors
1807 * occurred during last 5 mins, NCQ_OFF.
1809 * 3. If more than 8 ATA_BUS, TOUT_HSM or UNK_DEV errors
1810 * ocurred during last 5 mins, FALLBACK_TO_PIO
1812 * 4. If more than 3 TOUT_HSM or UNK_DEV errors occurred
1813 * during last 10 mins, NCQ_OFF.
1815 * 5. If more than 3 ATA_BUS or TOUT_HSM errors, or more than 6
1816 * UNK_DEV errors occurred during last 10 mins, SPEED_DOWN.
1819 * Inherited from caller.
1822 * OR of ATA_EH_SPDN_* flags.
1824 static unsigned int ata_eh_speed_down_verdict(struct ata_device *dev)
1826 const u64 j5mins = 5LLU * 60 * HZ, j10mins = 10LLU * 60 * HZ;
1827 u64 j64 = get_jiffies_64();
1828 struct speed_down_verdict_arg arg;
1829 unsigned int verdict = 0;
1831 /* scan past 5 mins of error history */
1832 memset(&arg, 0, sizeof(arg));
1833 arg.since = j64 - min(j64, j5mins);
1834 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
1836 if (arg.nr_errors[ATA_ECAT_DUBIOUS_ATA_BUS] +
1837 arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] > 1)
1838 verdict |= ATA_EH_SPDN_SPEED_DOWN |
1839 ATA_EH_SPDN_FALLBACK_TO_PIO | ATA_EH_SPDN_KEEP_ERRORS;
1841 if (arg.nr_errors[ATA_ECAT_DUBIOUS_TOUT_HSM] +
1842 arg.nr_errors[ATA_ECAT_DUBIOUS_UNK_DEV] > 1)
1843 verdict |= ATA_EH_SPDN_NCQ_OFF | ATA_EH_SPDN_KEEP_ERRORS;
1845 if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
1846 arg.nr_errors[ATA_ECAT_TOUT_HSM] +
1847 arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
1848 verdict |= ATA_EH_SPDN_FALLBACK_TO_PIO;
1850 /* scan past 10 mins of error history */
1851 memset(&arg, 0, sizeof(arg));
1852 arg.since = j64 - min(j64, j10mins);
1853 ata_ering_map(&dev->ering, speed_down_verdict_cb, &arg);
1855 if (arg.nr_errors[ATA_ECAT_TOUT_HSM] +
1856 arg.nr_errors[ATA_ECAT_UNK_DEV] > 3)
1857 verdict |= ATA_EH_SPDN_NCQ_OFF;
1859 if (arg.nr_errors[ATA_ECAT_ATA_BUS] +
1860 arg.nr_errors[ATA_ECAT_TOUT_HSM] > 3 ||
1861 arg.nr_errors[ATA_ECAT_UNK_DEV] > 6)
1862 verdict |= ATA_EH_SPDN_SPEED_DOWN;
1868 * ata_eh_speed_down - record error and speed down if necessary
1869 * @dev: Failed device
1870 * @eflags: mask of ATA_EFLAG_* flags
1871 * @err_mask: err_mask of the error
1873 * Record error and examine error history to determine whether
1874 * adjusting transmission speed is necessary. It also sets
1875 * transmission limits appropriately if such adjustment is
1879 * Kernel thread context (may sleep).
1882 * Determined recovery action.
1884 static unsigned int ata_eh_speed_down(struct ata_device *dev,
1885 unsigned int eflags, unsigned int err_mask)
1887 struct ata_link *link = ata_dev_phys_link(dev);
1889 unsigned int verdict;
1890 unsigned int action = 0;
1892 /* don't bother if Cat-0 error */
1893 if (ata_eh_categorize_error(eflags, err_mask, &xfer_ok) == 0)
1896 /* record error and determine whether speed down is necessary */
1897 ata_ering_record(&dev->ering, eflags, err_mask);
1898 verdict = ata_eh_speed_down_verdict(dev);
1901 if ((verdict & ATA_EH_SPDN_NCQ_OFF) &&
1902 (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ |
1903 ATA_DFLAG_NCQ_OFF)) == ATA_DFLAG_NCQ) {
1904 dev->flags |= ATA_DFLAG_NCQ_OFF;
1905 ata_dev_printk(dev, KERN_WARNING,
1906 "NCQ disabled due to excessive errors\n");
1911 if (verdict & ATA_EH_SPDN_SPEED_DOWN) {
1912 /* speed down SATA link speed if possible */
1913 if (sata_down_spd_limit(link, 0) == 0) {
1914 action |= ATA_EH_RESET;
1918 /* lower transfer mode */
1919 if (dev->spdn_cnt < 2) {
1920 static const int dma_dnxfer_sel[] =
1921 { ATA_DNXFER_DMA, ATA_DNXFER_40C };
1922 static const int pio_dnxfer_sel[] =
1923 { ATA_DNXFER_PIO, ATA_DNXFER_FORCE_PIO0 };
1926 if (dev->xfer_shift != ATA_SHIFT_PIO)
1927 sel = dma_dnxfer_sel[dev->spdn_cnt];
1929 sel = pio_dnxfer_sel[dev->spdn_cnt];
1933 if (ata_down_xfermask_limit(dev, sel) == 0) {
1934 action |= ATA_EH_RESET;
1940 /* Fall back to PIO? Slowing down to PIO is meaningless for
1941 * SATA ATA devices. Consider it only for PATA and SATAPI.
1943 if ((verdict & ATA_EH_SPDN_FALLBACK_TO_PIO) && (dev->spdn_cnt >= 2) &&
1944 (link->ap->cbl != ATA_CBL_SATA || dev->class == ATA_DEV_ATAPI) &&
1945 (dev->xfer_shift != ATA_SHIFT_PIO)) {
1946 if (ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO) == 0) {
1948 action |= ATA_EH_RESET;
1955 /* device has been slowed down, blow error history */
1956 if (!(verdict & ATA_EH_SPDN_KEEP_ERRORS))
1957 ata_ering_clear(&dev->ering);
1962 * ata_eh_link_autopsy - analyze error and determine recovery action
1963 * @link: host link to perform autopsy on
1965 * Analyze why @link failed and determine which recovery actions
1966 * are needed. This function also sets more detailed AC_ERR_*
1967 * values and fills sense data for ATAPI CHECK SENSE.
1970 * Kernel thread context (may sleep).
1972 static void ata_eh_link_autopsy(struct ata_link *link)
1974 struct ata_port *ap = link->ap;
1975 struct ata_eh_context *ehc = &link->eh_context;
1976 struct ata_device *dev;
1977 unsigned int all_err_mask = 0, eflags = 0;
1984 if (ehc->i.flags & ATA_EHI_NO_AUTOPSY)
1987 /* obtain and analyze SError */
1988 rc = sata_scr_read(link, SCR_ERROR, &serror);
1990 ehc->i.serror |= serror;
1991 ata_eh_analyze_serror(link);
1992 } else if (rc != -EOPNOTSUPP) {
1993 /* SError read failed, force reset and probing */
1994 ehc->i.probe_mask |= ATA_ALL_DEVICES;
1995 ehc->i.action |= ATA_EH_RESET;
1996 ehc->i.err_mask |= AC_ERR_OTHER;
1999 /* analyze NCQ failure */
2000 ata_eh_analyze_ncq_error(link);
2002 /* any real error trumps AC_ERR_OTHER */
2003 if (ehc->i.err_mask & ~AC_ERR_OTHER)
2004 ehc->i.err_mask &= ~AC_ERR_OTHER;
2006 all_err_mask |= ehc->i.err_mask;
2008 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2009 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2011 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2012 ata_dev_phys_link(qc->dev) != link)
2015 /* inherit upper level err_mask */
2016 qc->err_mask |= ehc->i.err_mask;
2019 ehc->i.action |= ata_eh_analyze_tf(qc, &qc->result_tf);
2021 /* DEV errors are probably spurious in case of ATA_BUS error */
2022 if (qc->err_mask & AC_ERR_ATA_BUS)
2023 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_MEDIA |
2026 /* any real error trumps unknown error */
2027 if (qc->err_mask & ~AC_ERR_OTHER)
2028 qc->err_mask &= ~AC_ERR_OTHER;
2030 /* SENSE_VALID trumps dev/unknown error and revalidation */
2031 if (qc->flags & ATA_QCFLAG_SENSE_VALID)
2032 qc->err_mask &= ~(AC_ERR_DEV | AC_ERR_OTHER);
2034 /* determine whether the command is worth retrying */
2035 if (qc->flags & ATA_QCFLAG_IO ||
2036 (!(qc->err_mask & AC_ERR_INVALID) &&
2037 qc->err_mask != AC_ERR_DEV))
2038 qc->flags |= ATA_QCFLAG_RETRY;
2040 /* accumulate error info */
2041 ehc->i.dev = qc->dev;
2042 all_err_mask |= qc->err_mask;
2043 if (qc->flags & ATA_QCFLAG_IO)
2044 eflags |= ATA_EFLAG_IS_IO;
2047 /* enforce default EH actions */
2048 if (ap->pflags & ATA_PFLAG_FROZEN ||
2049 all_err_mask & (AC_ERR_HSM | AC_ERR_TIMEOUT))
2050 ehc->i.action |= ATA_EH_RESET;
2051 else if (((eflags & ATA_EFLAG_IS_IO) && all_err_mask) ||
2052 (!(eflags & ATA_EFLAG_IS_IO) && (all_err_mask & ~AC_ERR_DEV)))
2053 ehc->i.action |= ATA_EH_REVALIDATE;
2055 /* If we have offending qcs and the associated failed device,
2056 * perform per-dev EH action only on the offending device.
2059 ehc->i.dev_action[ehc->i.dev->devno] |=
2060 ehc->i.action & ATA_EH_PERDEV_MASK;
2061 ehc->i.action &= ~ATA_EH_PERDEV_MASK;
2064 /* propagate timeout to host link */
2065 if ((all_err_mask & AC_ERR_TIMEOUT) && !ata_is_host_link(link))
2066 ap->link.eh_context.i.err_mask |= AC_ERR_TIMEOUT;
2068 /* record error and consider speeding down */
2070 if (!dev && ((ata_link_max_devices(link) == 1 &&
2071 ata_dev_enabled(link->device))))
2075 if (dev->flags & ATA_DFLAG_DUBIOUS_XFER)
2076 eflags |= ATA_EFLAG_DUBIOUS_XFER;
2077 ehc->i.action |= ata_eh_speed_down(dev, eflags, all_err_mask);
2084 * ata_eh_autopsy - analyze error and determine recovery action
2085 * @ap: host port to perform autopsy on
2087 * Analyze all links of @ap and determine why they failed and
2088 * which recovery actions are needed.
2091 * Kernel thread context (may sleep).
2093 void ata_eh_autopsy(struct ata_port *ap)
2095 struct ata_link *link;
2097 ata_for_each_link(link, ap, EDGE)
2098 ata_eh_link_autopsy(link);
2100 /* Handle the frigging slave link. Autopsy is done similarly
2101 * but actions and flags are transferred over to the master
2102 * link and handled from there.
2104 if (ap->slave_link) {
2105 struct ata_eh_context *mehc = &ap->link.eh_context;
2106 struct ata_eh_context *sehc = &ap->slave_link->eh_context;
2108 /* transfer control flags from master to slave */
2109 sehc->i.flags |= mehc->i.flags & ATA_EHI_TO_SLAVE_MASK;
2111 /* perform autopsy on the slave link */
2112 ata_eh_link_autopsy(ap->slave_link);
2114 /* transfer actions from slave to master and clear slave */
2115 ata_eh_about_to_do(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2116 mehc->i.action |= sehc->i.action;
2117 mehc->i.dev_action[1] |= sehc->i.dev_action[1];
2118 mehc->i.flags |= sehc->i.flags;
2119 ata_eh_done(ap->slave_link, NULL, ATA_EH_ALL_ACTIONS);
2122 /* Autopsy of fanout ports can affect host link autopsy.
2123 * Perform host link autopsy last.
2125 if (sata_pmp_attached(ap))
2126 ata_eh_link_autopsy(&ap->link);
2130 * ata_get_cmd_descript - get description for ATA command
2131 * @command: ATA command code to get description for
2133 * Return a textual description of the given command, or NULL if the
2134 * command is not known.
2139 const char *ata_get_cmd_descript(u8 command)
2141 #ifdef CONFIG_ATA_VERBOSE_ERROR
2147 { ATA_CMD_DEV_RESET, "DEVICE RESET" },
2148 { ATA_CMD_CHK_POWER, "CHECK POWER MODE" },
2149 { ATA_CMD_STANDBY, "STANDBY" },
2150 { ATA_CMD_IDLE, "IDLE" },
2151 { ATA_CMD_EDD, "EXECUTE DEVICE DIAGNOSTIC" },
2152 { ATA_CMD_DOWNLOAD_MICRO, "DOWNLOAD MICROCODE" },
2153 { ATA_CMD_NOP, "NOP" },
2154 { ATA_CMD_FLUSH, "FLUSH CACHE" },
2155 { ATA_CMD_FLUSH_EXT, "FLUSH CACHE EXT" },
2156 { ATA_CMD_ID_ATA, "IDENTIFY DEVICE" },
2157 { ATA_CMD_ID_ATAPI, "IDENTIFY PACKET DEVICE" },
2158 { ATA_CMD_SERVICE, "SERVICE" },
2159 { ATA_CMD_READ, "READ DMA" },
2160 { ATA_CMD_READ_EXT, "READ DMA EXT" },
2161 { ATA_CMD_READ_QUEUED, "READ DMA QUEUED" },
2162 { ATA_CMD_READ_STREAM_EXT, "READ STREAM EXT" },
2163 { ATA_CMD_READ_STREAM_DMA_EXT, "READ STREAM DMA EXT" },
2164 { ATA_CMD_WRITE, "WRITE DMA" },
2165 { ATA_CMD_WRITE_EXT, "WRITE DMA EXT" },
2166 { ATA_CMD_WRITE_QUEUED, "WRITE DMA QUEUED EXT" },
2167 { ATA_CMD_WRITE_STREAM_EXT, "WRITE STREAM EXT" },
2168 { ATA_CMD_WRITE_STREAM_DMA_EXT, "WRITE STREAM DMA EXT" },
2169 { ATA_CMD_WRITE_FUA_EXT, "WRITE DMA FUA EXT" },
2170 { ATA_CMD_WRITE_QUEUED_FUA_EXT, "WRITE DMA QUEUED FUA EXT" },
2171 { ATA_CMD_FPDMA_READ, "READ FPDMA QUEUED" },
2172 { ATA_CMD_FPDMA_WRITE, "WRITE FPDMA QUEUED" },
2173 { ATA_CMD_PIO_READ, "READ SECTOR(S)" },
2174 { ATA_CMD_PIO_READ_EXT, "READ SECTOR(S) EXT" },
2175 { ATA_CMD_PIO_WRITE, "WRITE SECTOR(S)" },
2176 { ATA_CMD_PIO_WRITE_EXT, "WRITE SECTOR(S) EXT" },
2177 { ATA_CMD_READ_MULTI, "READ MULTIPLE" },
2178 { ATA_CMD_READ_MULTI_EXT, "READ MULTIPLE EXT" },
2179 { ATA_CMD_WRITE_MULTI, "WRITE MULTIPLE" },
2180 { ATA_CMD_WRITE_MULTI_EXT, "WRITE MULTIPLE EXT" },
2181 { ATA_CMD_WRITE_MULTI_FUA_EXT, "WRITE MULTIPLE FUA EXT" },
2182 { ATA_CMD_SET_FEATURES, "SET FEATURES" },
2183 { ATA_CMD_SET_MULTI, "SET MULTIPLE MODE" },
2184 { ATA_CMD_VERIFY, "READ VERIFY SECTOR(S)" },
2185 { ATA_CMD_VERIFY_EXT, "READ VERIFY SECTOR(S) EXT" },
2186 { ATA_CMD_WRITE_UNCORR_EXT, "WRITE UNCORRECTABLE EXT" },
2187 { ATA_CMD_STANDBYNOW1, "STANDBY IMMEDIATE" },
2188 { ATA_CMD_IDLEIMMEDIATE, "IDLE IMMEDIATE" },
2189 { ATA_CMD_SLEEP, "SLEEP" },
2190 { ATA_CMD_INIT_DEV_PARAMS, "INITIALIZE DEVICE PARAMETERS" },
2191 { ATA_CMD_READ_NATIVE_MAX, "READ NATIVE MAX ADDRESS" },
2192 { ATA_CMD_READ_NATIVE_MAX_EXT, "READ NATIVE MAX ADDRESS EXT" },
2193 { ATA_CMD_SET_MAX, "SET MAX ADDRESS" },
2194 { ATA_CMD_SET_MAX_EXT, "SET MAX ADDRESS EXT" },
2195 { ATA_CMD_READ_LOG_EXT, "READ LOG EXT" },
2196 { ATA_CMD_WRITE_LOG_EXT, "WRITE LOG EXT" },
2197 { ATA_CMD_READ_LOG_DMA_EXT, "READ LOG DMA EXT" },
2198 { ATA_CMD_WRITE_LOG_DMA_EXT, "WRITE LOG DMA EXT" },
2199 { ATA_CMD_TRUSTED_RCV, "TRUSTED RECEIVE" },
2200 { ATA_CMD_TRUSTED_RCV_DMA, "TRUSTED RECEIVE DMA" },
2201 { ATA_CMD_TRUSTED_SND, "TRUSTED SEND" },
2202 { ATA_CMD_TRUSTED_SND_DMA, "TRUSTED SEND DMA" },
2203 { ATA_CMD_PMP_READ, "READ BUFFER" },
2204 { ATA_CMD_PMP_WRITE, "WRITE BUFFER" },
2205 { ATA_CMD_CONF_OVERLAY, "DEVICE CONFIGURATION OVERLAY" },
2206 { ATA_CMD_SEC_SET_PASS, "SECURITY SET PASSWORD" },
2207 { ATA_CMD_SEC_UNLOCK, "SECURITY UNLOCK" },
2208 { ATA_CMD_SEC_ERASE_PREP, "SECURITY ERASE PREPARE" },
2209 { ATA_CMD_SEC_ERASE_UNIT, "SECURITY ERASE UNIT" },
2210 { ATA_CMD_SEC_FREEZE_LOCK, "SECURITY FREEZE LOCK" },
2211 { ATA_CMD_SEC_DISABLE_PASS, "SECURITY DISABLE PASSWORD" },
2212 { ATA_CMD_CONFIG_STREAM, "CONFIGURE STREAM" },
2213 { ATA_CMD_SMART, "SMART" },
2214 { ATA_CMD_MEDIA_LOCK, "DOOR LOCK" },
2215 { ATA_CMD_MEDIA_UNLOCK, "DOOR UNLOCK" },
2216 { ATA_CMD_CHK_MED_CRD_TYP, "CHECK MEDIA CARD TYPE" },
2217 { ATA_CMD_CFA_REQ_EXT_ERR, "CFA REQUEST EXTENDED ERROR" },
2218 { ATA_CMD_CFA_WRITE_NE, "CFA WRITE SECTORS WITHOUT ERASE" },
2219 { ATA_CMD_CFA_TRANS_SECT, "CFA TRANSLATE SECTOR" },
2220 { ATA_CMD_CFA_ERASE, "CFA ERASE SECTORS" },
2221 { ATA_CMD_CFA_WRITE_MULT_NE, "CFA WRITE MULTIPLE WITHOUT ERASE" },
2222 { ATA_CMD_READ_LONG, "READ LONG (with retries)" },
2223 { ATA_CMD_READ_LONG_ONCE, "READ LONG (without retries)" },
2224 { ATA_CMD_WRITE_LONG, "WRITE LONG (with retries)" },
2225 { ATA_CMD_WRITE_LONG_ONCE, "WRITE LONG (without retries)" },
2226 { ATA_CMD_RESTORE, "RECALIBRATE" },
2227 { 0, NULL } /* terminate list */
2231 for (i = 0; cmd_descr[i].text; i++)
2232 if (cmd_descr[i].command == command)
2233 return cmd_descr[i].text;
2240 * ata_eh_link_report - report error handling to user
2241 * @link: ATA link EH is going on
2243 * Report EH to user.
2248 static void ata_eh_link_report(struct ata_link *link)
2250 struct ata_port *ap = link->ap;
2251 struct ata_eh_context *ehc = &link->eh_context;
2252 const char *frozen, *desc;
2254 int tag, nr_failed = 0;
2256 if (ehc->i.flags & ATA_EHI_QUIET)
2260 if (ehc->i.desc[0] != '\0')
2263 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2264 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2266 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2267 ata_dev_phys_link(qc->dev) != link ||
2268 ((qc->flags & ATA_QCFLAG_QUIET) &&
2269 qc->err_mask == AC_ERR_DEV))
2271 if (qc->flags & ATA_QCFLAG_SENSE_VALID && !qc->err_mask)
2277 if (!nr_failed && !ehc->i.err_mask)
2281 if (ap->pflags & ATA_PFLAG_FROZEN)
2284 memset(tries_buf, 0, sizeof(tries_buf));
2285 if (ap->eh_tries < ATA_EH_MAX_TRIES)
2286 snprintf(tries_buf, sizeof(tries_buf) - 1, " t%d",
2290 ata_dev_printk(ehc->i.dev, KERN_ERR, "exception Emask 0x%x "
2291 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2292 ehc->i.err_mask, link->sactive, ehc->i.serror,
2293 ehc->i.action, frozen, tries_buf);
2295 ata_dev_printk(ehc->i.dev, KERN_ERR, "%s\n", desc);
2297 ata_link_printk(link, KERN_ERR, "exception Emask 0x%x "
2298 "SAct 0x%x SErr 0x%x action 0x%x%s%s\n",
2299 ehc->i.err_mask, link->sactive, ehc->i.serror,
2300 ehc->i.action, frozen, tries_buf);
2302 ata_link_printk(link, KERN_ERR, "%s\n", desc);
2305 #ifdef CONFIG_ATA_VERBOSE_ERROR
2307 ata_link_printk(link, KERN_ERR,
2308 "SError: { %s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s}\n",
2309 ehc->i.serror & SERR_DATA_RECOVERED ? "RecovData " : "",
2310 ehc->i.serror & SERR_COMM_RECOVERED ? "RecovComm " : "",
2311 ehc->i.serror & SERR_DATA ? "UnrecovData " : "",
2312 ehc->i.serror & SERR_PERSISTENT ? "Persist " : "",
2313 ehc->i.serror & SERR_PROTOCOL ? "Proto " : "",
2314 ehc->i.serror & SERR_INTERNAL ? "HostInt " : "",
2315 ehc->i.serror & SERR_PHYRDY_CHG ? "PHYRdyChg " : "",
2316 ehc->i.serror & SERR_PHY_INT_ERR ? "PHYInt " : "",
2317 ehc->i.serror & SERR_COMM_WAKE ? "CommWake " : "",
2318 ehc->i.serror & SERR_10B_8B_ERR ? "10B8B " : "",
2319 ehc->i.serror & SERR_DISPARITY ? "Dispar " : "",
2320 ehc->i.serror & SERR_CRC ? "BadCRC " : "",
2321 ehc->i.serror & SERR_HANDSHAKE ? "Handshk " : "",
2322 ehc->i.serror & SERR_LINK_SEQ_ERR ? "LinkSeq " : "",
2323 ehc->i.serror & SERR_TRANS_ST_ERROR ? "TrStaTrns " : "",
2324 ehc->i.serror & SERR_UNRECOG_FIS ? "UnrecFIS " : "",
2325 ehc->i.serror & SERR_DEV_XCHG ? "DevExch " : "");
2328 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
2329 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
2330 struct ata_taskfile *cmd = &qc->tf, *res = &qc->result_tf;
2331 const u8 *cdb = qc->cdb;
2332 char data_buf[20] = "";
2333 char cdb_buf[70] = "";
2335 if (!(qc->flags & ATA_QCFLAG_FAILED) ||
2336 ata_dev_phys_link(qc->dev) != link || !qc->err_mask)
2339 if (qc->dma_dir != DMA_NONE) {
2340 static const char *dma_str[] = {
2341 [DMA_BIDIRECTIONAL] = "bidi",
2342 [DMA_TO_DEVICE] = "out",
2343 [DMA_FROM_DEVICE] = "in",
2345 static const char *prot_str[] = {
2346 [ATA_PROT_PIO] = "pio",
2347 [ATA_PROT_DMA] = "dma",
2348 [ATA_PROT_NCQ] = "ncq",
2349 [ATAPI_PROT_PIO] = "pio",
2350 [ATAPI_PROT_DMA] = "dma",
2353 snprintf(data_buf, sizeof(data_buf), " %s %u %s",
2354 prot_str[qc->tf.protocol], qc->nbytes,
2355 dma_str[qc->dma_dir]);
2358 if (ata_is_atapi(qc->tf.protocol)) {
2360 scsi_print_command(qc->scsicmd);
2362 snprintf(cdb_buf, sizeof(cdb_buf),
2363 "cdb %02x %02x %02x %02x %02x %02x %02x %02x "
2364 "%02x %02x %02x %02x %02x %02x %02x %02x\n ",
2365 cdb[0], cdb[1], cdb[2], cdb[3],
2366 cdb[4], cdb[5], cdb[6], cdb[7],
2367 cdb[8], cdb[9], cdb[10], cdb[11],
2368 cdb[12], cdb[13], cdb[14], cdb[15]);
2370 const char *descr = ata_get_cmd_descript(cmd->command);
2372 ata_dev_printk(qc->dev, KERN_ERR,
2373 "failed command: %s\n", descr);
2376 ata_dev_printk(qc->dev, KERN_ERR,
2377 "cmd %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2379 "res %02x/%02x:%02x:%02x:%02x:%02x/%02x:%02x:%02x:%02x:%02x/%02x "
2380 "Emask 0x%x (%s)%s\n",
2381 cmd->command, cmd->feature, cmd->nsect,
2382 cmd->lbal, cmd->lbam, cmd->lbah,
2383 cmd->hob_feature, cmd->hob_nsect,
2384 cmd->hob_lbal, cmd->hob_lbam, cmd->hob_lbah,
2385 cmd->device, qc->tag, data_buf, cdb_buf,
2386 res->command, res->feature, res->nsect,
2387 res->lbal, res->lbam, res->lbah,
2388 res->hob_feature, res->hob_nsect,
2389 res->hob_lbal, res->hob_lbam, res->hob_lbah,
2390 res->device, qc->err_mask, ata_err_string(qc->err_mask),
2391 qc->err_mask & AC_ERR_NCQ ? " <F>" : "");
2393 #ifdef CONFIG_ATA_VERBOSE_ERROR
2394 if (res->command & (ATA_BUSY | ATA_DRDY | ATA_DF | ATA_DRQ |
2396 if (res->command & ATA_BUSY)
2397 ata_dev_printk(qc->dev, KERN_ERR,
2398 "status: { Busy }\n");
2400 ata_dev_printk(qc->dev, KERN_ERR,
2401 "status: { %s%s%s%s}\n",
2402 res->command & ATA_DRDY ? "DRDY " : "",
2403 res->command & ATA_DF ? "DF " : "",
2404 res->command & ATA_DRQ ? "DRQ " : "",
2405 res->command & ATA_ERR ? "ERR " : "");
2408 if (cmd->command != ATA_CMD_PACKET &&
2409 (res->feature & (ATA_ICRC | ATA_UNC | ATA_IDNF |
2411 ata_dev_printk(qc->dev, KERN_ERR,
2412 "error: { %s%s%s%s}\n",
2413 res->feature & ATA_ICRC ? "ICRC " : "",
2414 res->feature & ATA_UNC ? "UNC " : "",
2415 res->feature & ATA_IDNF ? "IDNF " : "",
2416 res->feature & ATA_ABORTED ? "ABRT " : "");
2422 * ata_eh_report - report error handling to user
2423 * @ap: ATA port to report EH about
2425 * Report EH to user.
2430 void ata_eh_report(struct ata_port *ap)
2432 struct ata_link *link;
2434 ata_for_each_link(link, ap, HOST_FIRST)
2435 ata_eh_link_report(link);
2438 static int ata_do_reset(struct ata_link *link, ata_reset_fn_t reset,
2439 unsigned int *classes, unsigned long deadline,
2442 struct ata_device *dev;
2445 ata_for_each_dev(dev, link, ALL)
2446 classes[dev->devno] = ATA_DEV_UNKNOWN;
2448 return reset(link, classes, deadline);
2451 static int ata_eh_followup_srst_needed(struct ata_link *link,
2452 int rc, const unsigned int *classes)
2454 if ((link->flags & ATA_LFLAG_NO_SRST) || ata_link_offline(link))
2458 if (sata_pmp_supported(link->ap) && ata_is_host_link(link))
2463 int ata_eh_reset(struct ata_link *link, int classify,
2464 ata_prereset_fn_t prereset, ata_reset_fn_t softreset,
2465 ata_reset_fn_t hardreset, ata_postreset_fn_t postreset)
2467 struct ata_port *ap = link->ap;
2468 struct ata_link *slave = ap->slave_link;
2469 struct ata_eh_context *ehc = &link->eh_context;
2470 struct ata_eh_context *sehc = slave ? &slave->eh_context : NULL;
2471 unsigned int *classes = ehc->classes;
2472 unsigned int lflags = link->flags;
2473 int verbose = !(ehc->i.flags & ATA_EHI_QUIET);
2474 int max_tries = 0, try = 0;
2475 struct ata_link *failed_link;
2476 struct ata_device *dev;
2477 unsigned long deadline, now;
2478 ata_reset_fn_t reset;
2479 unsigned long flags;
2486 while (ata_eh_reset_timeouts[max_tries] != ULONG_MAX)
2488 if (link->flags & ATA_LFLAG_NO_HRST)
2490 if (link->flags & ATA_LFLAG_NO_SRST)
2493 /* make sure each reset attemp is at least COOL_DOWN apart */
2494 if (ehc->i.flags & ATA_EHI_DID_RESET) {
2496 WARN_ON(time_after(ehc->last_reset, now));
2497 deadline = ata_deadline(ehc->last_reset,
2498 ATA_EH_RESET_COOL_DOWN);
2499 if (time_before(now, deadline))
2500 schedule_timeout_uninterruptible(deadline - now);
2503 spin_lock_irqsave(ap->lock, flags);
2504 ap->pflags |= ATA_PFLAG_RESETTING;
2505 spin_unlock_irqrestore(ap->lock, flags);
2507 ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2509 ata_for_each_dev(dev, link, ALL) {
2510 /* If we issue an SRST then an ATA drive (not ATAPI)
2511 * may change configuration and be in PIO0 timing. If
2512 * we do a hard reset (or are coming from power on)
2513 * this is true for ATA or ATAPI. Until we've set a
2514 * suitable controller mode we should not touch the
2515 * bus as we may be talking too fast.
2517 dev->pio_mode = XFER_PIO_0;
2519 /* If the controller has a pio mode setup function
2520 * then use it to set the chipset to rights. Don't
2521 * touch the DMA setup as that will be dealt with when
2522 * configuring devices.
2524 if (ap->ops->set_piomode)
2525 ap->ops->set_piomode(ap, dev);
2528 /* prefer hardreset */
2530 ehc->i.action &= ~ATA_EH_RESET;
2533 ehc->i.action |= ATA_EH_HARDRESET;
2534 } else if (softreset) {
2536 ehc->i.action |= ATA_EH_SOFTRESET;
2540 unsigned long deadline = ata_deadline(jiffies,
2541 ATA_EH_PRERESET_TIMEOUT);
2544 sehc->i.action &= ~ATA_EH_RESET;
2545 sehc->i.action |= ehc->i.action;
2548 rc = prereset(link, deadline);
2550 /* If present, do prereset on slave link too. Reset
2551 * is skipped iff both master and slave links report
2552 * -ENOENT or clear ATA_EH_RESET.
2554 if (slave && (rc == 0 || rc == -ENOENT)) {
2557 tmp = prereset(slave, deadline);
2561 ehc->i.action |= sehc->i.action;
2565 if (rc == -ENOENT) {
2566 ata_link_printk(link, KERN_DEBUG,
2567 "port disabled. ignoring.\n");
2568 ehc->i.action &= ~ATA_EH_RESET;
2570 ata_for_each_dev(dev, link, ALL)
2571 classes[dev->devno] = ATA_DEV_NONE;
2575 ata_link_printk(link, KERN_ERR,
2576 "prereset failed (errno=%d)\n", rc);
2580 /* prereset() might have cleared ATA_EH_RESET. If so,
2581 * bang classes, thaw and return.
2583 if (reset && !(ehc->i.action & ATA_EH_RESET)) {
2584 ata_for_each_dev(dev, link, ALL)
2585 classes[dev->devno] = ATA_DEV_NONE;
2586 if ((ap->pflags & ATA_PFLAG_FROZEN) &&
2587 ata_is_host_link(link))
2588 ata_eh_thaw_port(ap);
2598 if (ata_is_host_link(link))
2599 ata_eh_freeze_port(ap);
2601 deadline = ata_deadline(jiffies, ata_eh_reset_timeouts[try++]);
2605 ata_link_printk(link, KERN_INFO, "%s resetting link\n",
2606 reset == softreset ? "soft" : "hard");
2608 /* mark that this EH session started with reset */
2609 ehc->last_reset = jiffies;
2610 if (reset == hardreset)
2611 ehc->i.flags |= ATA_EHI_DID_HARDRESET;
2613 ehc->i.flags |= ATA_EHI_DID_SOFTRESET;
2615 rc = ata_do_reset(link, reset, classes, deadline, true);
2616 if (rc && rc != -EAGAIN) {
2621 /* hardreset slave link if existent */
2622 if (slave && reset == hardreset) {
2626 ata_link_printk(slave, KERN_INFO,
2627 "hard resetting link\n");
2629 ata_eh_about_to_do(slave, NULL, ATA_EH_RESET);
2630 tmp = ata_do_reset(slave, reset, classes, deadline,
2638 failed_link = slave;
2644 /* perform follow-up SRST if necessary */
2645 if (reset == hardreset &&
2646 ata_eh_followup_srst_needed(link, rc, classes)) {
2650 ata_link_printk(link, KERN_ERR,
2651 "follow-up softreset required "
2652 "but no softreset avaliable\n");
2658 ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
2659 rc = ata_do_reset(link, reset, classes, deadline, true);
2667 ata_link_printk(link, KERN_INFO, "no reset method "
2668 "available, skipping reset\n");
2669 if (!(lflags & ATA_LFLAG_ASSUME_CLASS))
2670 lflags |= ATA_LFLAG_ASSUME_ATA;
2674 * Post-reset processing
2676 ata_for_each_dev(dev, link, ALL) {
2677 /* After the reset, the device state is PIO 0 and the
2678 * controller state is undefined. Reset also wakes up
2679 * drives from sleeping mode.
2681 dev->pio_mode = XFER_PIO_0;
2682 dev->flags &= ~ATA_DFLAG_SLEEPING;
2684 if (ata_phys_link_offline(ata_dev_phys_link(dev)))
2687 /* apply class override */
2688 if (lflags & ATA_LFLAG_ASSUME_ATA)
2689 classes[dev->devno] = ATA_DEV_ATA;
2690 else if (lflags & ATA_LFLAG_ASSUME_SEMB)
2691 classes[dev->devno] = ATA_DEV_SEMB_UNSUP;
2694 /* record current link speed */
2695 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0)
2696 link->sata_spd = (sstatus >> 4) & 0xf;
2697 if (slave && sata_scr_read(slave, SCR_STATUS, &sstatus) == 0)
2698 slave->sata_spd = (sstatus >> 4) & 0xf;
2701 if (ata_is_host_link(link))
2702 ata_eh_thaw_port(ap);
2704 /* postreset() should clear hardware SError. Although SError
2705 * is cleared during link resume, clearing SError here is
2706 * necessary as some PHYs raise hotplug events after SRST.
2707 * This introduces race condition where hotplug occurs between
2708 * reset and here. This race is mediated by cross checking
2709 * link onlineness and classification result later.
2712 postreset(link, classes);
2714 postreset(slave, classes);
2718 * Some controllers can't be frozen very well and may set
2719 * spuruious error conditions during reset. Clear accumulated
2720 * error information. As reset is the final recovery action,
2721 * nothing is lost by doing this.
2723 spin_lock_irqsave(link->ap->lock, flags);
2724 memset(&link->eh_info, 0, sizeof(link->eh_info));
2726 memset(&slave->eh_info, 0, sizeof(link->eh_info));
2727 ap->pflags &= ~ATA_PFLAG_EH_PENDING;
2728 spin_unlock_irqrestore(link->ap->lock, flags);
2731 * Make sure onlineness and classification result correspond.
2732 * Hotplug could have happened during reset and some
2733 * controllers fail to wait while a drive is spinning up after
2734 * being hotplugged causing misdetection. By cross checking
2735 * link on/offlineness and classification result, those
2736 * conditions can be reliably detected and retried.
2739 ata_for_each_dev(dev, link, ALL) {
2740 if (ata_phys_link_online(ata_dev_phys_link(dev))) {
2741 if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2742 ata_dev_printk(dev, KERN_DEBUG, "link online "
2743 "but device misclassifed\n");
2744 classes[dev->devno] = ATA_DEV_NONE;
2747 } else if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
2748 if (ata_class_enabled(classes[dev->devno]))
2749 ata_dev_printk(dev, KERN_DEBUG, "link offline, "
2750 "clearing class %d to NONE\n",
2751 classes[dev->devno]);
2752 classes[dev->devno] = ATA_DEV_NONE;
2753 } else if (classes[dev->devno] == ATA_DEV_UNKNOWN) {
2754 ata_dev_printk(dev, KERN_DEBUG, "link status unknown, "
2755 "clearing UNKNOWN to NONE\n");
2756 classes[dev->devno] = ATA_DEV_NONE;
2760 if (classify && nr_unknown) {
2761 if (try < max_tries) {
2762 ata_link_printk(link, KERN_WARNING, "link online but "
2763 "%d devices misclassified, retrying\n",
2769 ata_link_printk(link, KERN_WARNING,
2770 "link online but %d devices misclassified, "
2771 "device detection might fail\n", nr_unknown);
2774 /* reset successful, schedule revalidation */
2775 ata_eh_done(link, NULL, ATA_EH_RESET);
2777 ata_eh_done(slave, NULL, ATA_EH_RESET);
2778 ehc->last_reset = jiffies; /* update to completion time */
2779 ehc->i.action |= ATA_EH_REVALIDATE;
2783 /* clear hotplug flag */
2784 ehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2786 sehc->i.flags &= ~ATA_EHI_HOTPLUGGED;
2788 spin_lock_irqsave(ap->lock, flags);
2789 ap->pflags &= ~ATA_PFLAG_RESETTING;
2790 spin_unlock_irqrestore(ap->lock, flags);
2795 /* if SCR isn't accessible on a fan-out port, PMP needs to be reset */
2796 if (!ata_is_host_link(link) &&
2797 sata_scr_read(link, SCR_STATUS, &sstatus))
2800 if (rc == -ERESTART || try >= max_tries)
2804 if (time_before(now, deadline)) {
2805 unsigned long delta = deadline - now;
2807 ata_link_printk(failed_link, KERN_WARNING,
2808 "reset failed (errno=%d), retrying in %u secs\n",
2809 rc, DIV_ROUND_UP(jiffies_to_msecs(delta), 1000));
2812 delta = schedule_timeout_uninterruptible(delta);
2815 if (try == max_tries - 1) {
2816 sata_down_spd_limit(link, 0);
2818 sata_down_spd_limit(slave, 0);
2819 } else if (rc == -EPIPE)
2820 sata_down_spd_limit(failed_link, 0);
2827 static inline void ata_eh_pull_park_action(struct ata_port *ap)
2829 struct ata_link *link;
2830 struct ata_device *dev;
2831 unsigned long flags;
2834 * This function can be thought of as an extended version of
2835 * ata_eh_about_to_do() specially crafted to accommodate the
2836 * requirements of ATA_EH_PARK handling. Since the EH thread
2837 * does not leave the do {} while () loop in ata_eh_recover as
2838 * long as the timeout for a park request to *one* device on
2839 * the port has not expired, and since we still want to pick
2840 * up park requests to other devices on the same port or
2841 * timeout updates for the same device, we have to pull
2842 * ATA_EH_PARK actions from eh_info into eh_context.i
2843 * ourselves at the beginning of each pass over the loop.
2845 * Additionally, all write accesses to &ap->park_req_pending
2846 * through INIT_COMPLETION() (see below) or complete_all()
2847 * (see ata_scsi_park_store()) are protected by the host lock.
2848 * As a result we have that park_req_pending.done is zero on
2849 * exit from this function, i.e. when ATA_EH_PARK actions for
2850 * *all* devices on port ap have been pulled into the
2851 * respective eh_context structs. If, and only if,
2852 * park_req_pending.done is non-zero by the time we reach
2853 * wait_for_completion_timeout(), another ATA_EH_PARK action
2854 * has been scheduled for at least one of the devices on port
2855 * ap and we have to cycle over the do {} while () loop in
2856 * ata_eh_recover() again.
2859 spin_lock_irqsave(ap->lock, flags);
2860 INIT_COMPLETION(ap->park_req_pending);
2861 ata_for_each_link(link, ap, EDGE) {
2862 ata_for_each_dev(dev, link, ALL) {
2863 struct ata_eh_info *ehi = &link->eh_info;
2865 link->eh_context.i.dev_action[dev->devno] |=
2866 ehi->dev_action[dev->devno] & ATA_EH_PARK;
2867 ata_eh_clear_action(link, dev, ehi, ATA_EH_PARK);
2870 spin_unlock_irqrestore(ap->lock, flags);
2873 static void ata_eh_park_issue_cmd(struct ata_device *dev, int park)
2875 struct ata_eh_context *ehc = &dev->link->eh_context;
2876 struct ata_taskfile tf;
2877 unsigned int err_mask;
2879 ata_tf_init(dev, &tf);
2881 ehc->unloaded_mask |= 1 << dev->devno;
2882 tf.command = ATA_CMD_IDLEIMMEDIATE;
2888 ehc->unloaded_mask &= ~(1 << dev->devno);
2889 tf.command = ATA_CMD_CHK_POWER;
2892 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
2893 tf.protocol |= ATA_PROT_NODATA;
2894 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
2895 if (park && (err_mask || tf.lbal != 0xc4)) {
2896 ata_dev_printk(dev, KERN_ERR, "head unload failed!\n");
2897 ehc->unloaded_mask &= ~(1 << dev->devno);
2901 static int ata_eh_revalidate_and_attach(struct ata_link *link,
2902 struct ata_device **r_failed_dev)
2904 struct ata_port *ap = link->ap;
2905 struct ata_eh_context *ehc = &link->eh_context;
2906 struct ata_device *dev;
2907 unsigned int new_mask = 0;
2908 unsigned long flags;
2913 /* For PATA drive side cable detection to work, IDENTIFY must
2914 * be done backwards such that PDIAG- is released by the slave
2915 * device before the master device is identified.
2917 ata_for_each_dev(dev, link, ALL_REVERSE) {
2918 unsigned int action = ata_eh_dev_action(dev);
2919 unsigned int readid_flags = 0;
2921 if (ehc->i.flags & ATA_EHI_DID_RESET)
2922 readid_flags |= ATA_READID_POSTRESET;
2924 if ((action & ATA_EH_REVALIDATE) && ata_dev_enabled(dev)) {
2925 WARN_ON(dev->class == ATA_DEV_PMP);
2927 if (ata_phys_link_offline(ata_dev_phys_link(dev))) {
2932 ata_eh_about_to_do(link, dev, ATA_EH_REVALIDATE);
2933 rc = ata_dev_revalidate(dev, ehc->classes[dev->devno],
2938 ata_eh_done(link, dev, ATA_EH_REVALIDATE);
2940 /* Configuration may have changed, reconfigure
2943 ehc->i.flags |= ATA_EHI_SETMODE;
2945 /* schedule the scsi_rescan_device() here */
2946 queue_work(ata_aux_wq, &(ap->scsi_rescan_task));
2947 } else if (dev->class == ATA_DEV_UNKNOWN &&
2948 ehc->tries[dev->devno] &&
2949 ata_class_enabled(ehc->classes[dev->devno])) {
2950 /* Temporarily set dev->class, it will be
2951 * permanently set once all configurations are
2952 * complete. This is necessary because new
2953 * device configuration is done in two
2956 dev->class = ehc->classes[dev->devno];
2958 if (dev->class == ATA_DEV_PMP)
2959 rc = sata_pmp_attach(dev);
2961 rc = ata_dev_read_id(dev, &dev->class,
2962 readid_flags, dev->id);
2964 /* read_id might have changed class, store and reset */
2965 ehc->classes[dev->devno] = dev->class;
2966 dev->class = ATA_DEV_UNKNOWN;
2970 /* clear error info accumulated during probe */
2971 ata_ering_clear(&dev->ering);
2972 new_mask |= 1 << dev->devno;
2975 /* IDENTIFY was issued to non-existent
2976 * device. No need to reset. Just
2977 * thaw and ignore the device.
2979 ata_eh_thaw_port(ap);
2987 /* PDIAG- should have been released, ask cable type if post-reset */
2988 if ((ehc->i.flags & ATA_EHI_DID_RESET) && ata_is_host_link(link)) {
2989 if (ap->ops->cable_detect)
2990 ap->cbl = ap->ops->cable_detect(ap);
2994 /* Configure new devices forward such that user doesn't see
2995 * device detection messages backwards.
2997 ata_for_each_dev(dev, link, ALL) {
2998 if (!(new_mask & (1 << dev->devno)))
3001 dev->class = ehc->classes[dev->devno];
3003 if (dev->class == ATA_DEV_PMP)
3006 ehc->i.flags |= ATA_EHI_PRINTINFO;
3007 rc = ata_dev_configure(dev);
3008 ehc->i.flags &= ~ATA_EHI_PRINTINFO;
3010 dev->class = ATA_DEV_UNKNOWN;
3014 spin_lock_irqsave(ap->lock, flags);
3015 ap->pflags |= ATA_PFLAG_SCSI_HOTPLUG;
3016 spin_unlock_irqrestore(ap->lock, flags);
3018 /* new device discovered, configure xfermode */
3019 ehc->i.flags |= ATA_EHI_SETMODE;
3025 *r_failed_dev = dev;
3026 DPRINTK("EXIT rc=%d\n", rc);
3031 * ata_set_mode - Program timings and issue SET FEATURES - XFER
3032 * @link: link on which timings will be programmed
3033 * @r_failed_dev: out parameter for failed device
3035 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3036 * ata_set_mode() fails, pointer to the failing device is
3037 * returned in @r_failed_dev.
3040 * PCI/etc. bus probe sem.
3043 * 0 on success, negative errno otherwise
3045 int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
3047 struct ata_port *ap = link->ap;
3048 struct ata_device *dev;
3051 /* if data transfer is verified, clear DUBIOUS_XFER on ering top */
3052 ata_for_each_dev(dev, link, ENABLED) {
3053 if (!(dev->flags & ATA_DFLAG_DUBIOUS_XFER)) {
3054 struct ata_ering_entry *ent;
3056 ent = ata_ering_top(&dev->ering);
3058 ent->eflags &= ~ATA_EFLAG_DUBIOUS_XFER;
3062 /* has private set_mode? */
3063 if (ap->ops->set_mode)
3064 rc = ap->ops->set_mode(link, r_failed_dev);
3066 rc = ata_do_set_mode(link, r_failed_dev);
3068 /* if transfer mode has changed, set DUBIOUS_XFER on device */
3069 ata_for_each_dev(dev, link, ENABLED) {
3070 struct ata_eh_context *ehc = &link->eh_context;
3071 u8 saved_xfer_mode = ehc->saved_xfer_mode[dev->devno];
3072 u8 saved_ncq = !!(ehc->saved_ncq_enabled & (1 << dev->devno));
3074 if (dev->xfer_mode != saved_xfer_mode ||
3075 ata_ncq_enabled(dev) != saved_ncq)
3076 dev->flags |= ATA_DFLAG_DUBIOUS_XFER;
3083 * atapi_eh_clear_ua - Clear ATAPI UNIT ATTENTION after reset
3084 * @dev: ATAPI device to clear UA for
3086 * Resets and other operations can make an ATAPI device raise
3087 * UNIT ATTENTION which causes the next operation to fail. This
3088 * function clears UA.
3091 * EH context (may sleep).
3094 * 0 on success, -errno on failure.
3096 static int atapi_eh_clear_ua(struct ata_device *dev)
3100 for (i = 0; i < ATA_EH_UA_TRIES; i++) {
3101 u8 *sense_buffer = dev->link->ap->sector_buf;
3103 unsigned int err_mask;
3105 err_mask = atapi_eh_tur(dev, &sense_key);
3106 if (err_mask != 0 && err_mask != AC_ERR_DEV) {
3107 ata_dev_printk(dev, KERN_WARNING, "TEST_UNIT_READY "
3108 "failed (err_mask=0x%x)\n", err_mask);
3112 if (!err_mask || sense_key != UNIT_ATTENTION)
3115 err_mask = atapi_eh_request_sense(dev, sense_buffer, sense_key);
3117 ata_dev_printk(dev, KERN_WARNING, "failed to clear "
3118 "UNIT ATTENTION (err_mask=0x%x)\n", err_mask);
3123 ata_dev_printk(dev, KERN_WARNING,
3124 "UNIT ATTENTION persists after %d tries\n", ATA_EH_UA_TRIES);
3130 * ata_eh_maybe_retry_flush - Retry FLUSH if necessary
3131 * @dev: ATA device which may need FLUSH retry
3133 * If @dev failed FLUSH, it needs to be reported upper layer
3134 * immediately as it means that @dev failed to remap and already
3135 * lost at least a sector and further FLUSH retrials won't make
3136 * any difference to the lost sector. However, if FLUSH failed
3137 * for other reasons, for example transmission error, FLUSH needs
3140 * This function determines whether FLUSH failure retry is
3141 * necessary and performs it if so.
3144 * 0 if EH can continue, -errno if EH needs to be repeated.
3146 static int ata_eh_maybe_retry_flush(struct ata_device *dev)
3148 struct ata_link *link = dev->link;
3149 struct ata_port *ap = link->ap;
3150 struct ata_queued_cmd *qc;
3151 struct ata_taskfile tf;
3152 unsigned int err_mask;
3155 /* did flush fail for this device? */
3156 if (!ata_tag_valid(link->active_tag))
3159 qc = __ata_qc_from_tag(ap, link->active_tag);
3160 if (qc->dev != dev || (qc->tf.command != ATA_CMD_FLUSH_EXT &&
3161 qc->tf.command != ATA_CMD_FLUSH))
3164 /* if the device failed it, it should be reported to upper layers */
3165 if (qc->err_mask & AC_ERR_DEV)
3168 /* flush failed for some other reason, give it another shot */
3169 ata_tf_init(dev, &tf);
3171 tf.command = qc->tf.command;
3172 tf.flags |= ATA_TFLAG_DEVICE;
3173 tf.protocol = ATA_PROT_NODATA;
3175 ata_dev_printk(dev, KERN_WARNING, "retrying FLUSH 0x%x Emask 0x%x\n",
3176 tf.command, qc->err_mask);
3178 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
3181 * FLUSH is complete but there's no way to
3182 * successfully complete a failed command from EH.
3183 * Making sure retry is allowed at least once and
3184 * retrying it should do the trick - whatever was in
3185 * the cache is already on the platter and this won't
3186 * cause infinite loop.
3188 qc->scsicmd->allowed = max(qc->scsicmd->allowed, 1);
3190 ata_dev_printk(dev, KERN_WARNING, "FLUSH failed Emask 0x%x\n",
3194 /* if device failed it, report it to upper layers */
3195 if (err_mask & AC_ERR_DEV) {
3196 qc->err_mask |= AC_ERR_DEV;
3198 if (!(ap->pflags & ATA_PFLAG_FROZEN))
3205 static int ata_link_nr_enabled(struct ata_link *link)
3207 struct ata_device *dev;
3210 ata_for_each_dev(dev, link, ENABLED)
3215 static int ata_link_nr_vacant(struct ata_link *link)
3217 struct ata_device *dev;
3220 ata_for_each_dev(dev, link, ALL)
3221 if (dev->class == ATA_DEV_UNKNOWN)
3226 static int ata_eh_skip_recovery(struct ata_link *link)
3228 struct ata_port *ap = link->ap;
3229 struct ata_eh_context *ehc = &link->eh_context;
3230 struct ata_device *dev;
3232 /* skip disabled links */
3233 if (link->flags & ATA_LFLAG_DISABLED)
3236 /* thaw frozen port and recover failed devices */
3237 if ((ap->pflags & ATA_PFLAG_FROZEN) || ata_link_nr_enabled(link))
3240 /* reset at least once if reset is requested */
3241 if ((ehc->i.action & ATA_EH_RESET) &&
3242 !(ehc->i.flags & ATA_EHI_DID_RESET))
3245 /* skip if class codes for all vacant slots are ATA_DEV_NONE */
3246 ata_for_each_dev(dev, link, ALL) {
3247 if (dev->class == ATA_DEV_UNKNOWN &&
3248 ehc->classes[dev->devno] != ATA_DEV_NONE)
3255 static int ata_count_probe_trials_cb(struct ata_ering_entry *ent, void *void_arg)
3257 u64 interval = msecs_to_jiffies(ATA_EH_PROBE_TRIAL_INTERVAL);
3258 u64 now = get_jiffies_64();
3259 int *trials = void_arg;
3261 if (ent->timestamp < now - min(now, interval))
3268 static int ata_eh_schedule_probe(struct ata_device *dev)
3270 struct ata_eh_context *ehc = &dev->link->eh_context;
3271 struct ata_link *link = ata_dev_phys_link(dev);
3274 if (!(ehc->i.probe_mask & (1 << dev->devno)) ||
3275 (ehc->did_probe_mask & (1 << dev->devno)))
3278 ata_eh_detach_dev(dev);
3280 ehc->did_probe_mask |= (1 << dev->devno);
3281 ehc->i.action |= ATA_EH_RESET;
3282 ehc->saved_xfer_mode[dev->devno] = 0;
3283 ehc->saved_ncq_enabled &= ~(1 << dev->devno);
3285 /* Record and count probe trials on the ering. The specific
3286 * error mask used is irrelevant. Because a successful device
3287 * detection clears the ering, this count accumulates only if
3288 * there are consecutive failed probes.
3290 * If the count is equal to or higher than ATA_EH_PROBE_TRIALS
3291 * in the last ATA_EH_PROBE_TRIAL_INTERVAL, link speed is
3292 * forced to 1.5Gbps.
3294 * This is to work around cases where failed link speed
3295 * negotiation results in device misdetection leading to
3296 * infinite DEVXCHG or PHRDY CHG events.
3298 ata_ering_record(&dev->ering, 0, AC_ERR_OTHER);
3299 ata_ering_map(&dev->ering, ata_count_probe_trials_cb, &trials);
3301 if (trials > ATA_EH_PROBE_TRIALS)
3302 sata_down_spd_limit(link, 1);
3307 static int ata_eh_handle_dev_fail(struct ata_device *dev, int err)
3309 struct ata_eh_context *ehc = &dev->link->eh_context;
3311 /* -EAGAIN from EH routine indicates retry without prejudice.
3312 * The requester is responsible for ensuring forward progress.
3315 ehc->tries[dev->devno]--;
3319 /* device missing or wrong IDENTIFY data, schedule probing */
3320 ehc->i.probe_mask |= (1 << dev->devno);
3322 /* give it just one more chance */
3323 ehc->tries[dev->devno] = min(ehc->tries[dev->devno], 1);
3325 if (ehc->tries[dev->devno] == 1) {
3326 /* This is the last chance, better to slow
3327 * down than lose it.
3329 sata_down_spd_limit(ata_dev_phys_link(dev), 0);
3330 if (dev->pio_mode > XFER_PIO_0)
3331 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
3335 if (ata_dev_enabled(dev) && !ehc->tries[dev->devno]) {
3336 /* disable device if it has used up all its chances */
3337 ata_dev_disable(dev);
3339 /* detach if offline */
3340 if (ata_phys_link_offline(ata_dev_phys_link(dev)))
3341 ata_eh_detach_dev(dev);
3343 /* schedule probe if necessary */
3344 if (ata_eh_schedule_probe(dev)) {
3345 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
3346 memset(ehc->cmd_timeout_idx[dev->devno], 0,
3347 sizeof(ehc->cmd_timeout_idx[dev->devno]));
3352 ehc->i.action |= ATA_EH_RESET;
3358 * ata_eh_recover - recover host port after error
3359 * @ap: host port to recover
3360 * @prereset: prereset method (can be NULL)
3361 * @softreset: softreset method (can be NULL)
3362 * @hardreset: hardreset method (can be NULL)
3363 * @postreset: postreset method (can be NULL)
3364 * @r_failed_link: out parameter for failed link
3366 * This is the alpha and omega, eum and yang, heart and soul of
3367 * libata exception handling. On entry, actions required to
3368 * recover each link and hotplug requests are recorded in the
3369 * link's eh_context. This function executes all the operations
3370 * with appropriate retrials and fallbacks to resurrect failed
3371 * devices, detach goners and greet newcomers.
3374 * Kernel thread context (may sleep).
3377 * 0 on success, -errno on failure.
3379 int ata_eh_recover(struct ata_port *ap, ata_prereset_fn_t prereset,
3380 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3381 ata_postreset_fn_t postreset,
3382 struct ata_link **r_failed_link)
3384 struct ata_link *link;
3385 struct ata_device *dev;
3388 unsigned long flags, deadline;
3392 /* prep for recovery */
3393 ata_for_each_link(link, ap, EDGE) {
3394 struct ata_eh_context *ehc = &link->eh_context;
3396 /* re-enable link? */
3397 if (ehc->i.action & ATA_EH_ENABLE_LINK) {
3398 ata_eh_about_to_do(link, NULL, ATA_EH_ENABLE_LINK);
3399 spin_lock_irqsave(ap->lock, flags);
3400 link->flags &= ~ATA_LFLAG_DISABLED;
3401 spin_unlock_irqrestore(ap->lock, flags);
3402 ata_eh_done(link, NULL, ATA_EH_ENABLE_LINK);
3405 ata_for_each_dev(dev, link, ALL) {
3406 if (link->flags & ATA_LFLAG_NO_RETRY)
3407 ehc->tries[dev->devno] = 1;
3409 ehc->tries[dev->devno] = ATA_EH_DEV_TRIES;
3411 /* collect port action mask recorded in dev actions */
3412 ehc->i.action |= ehc->i.dev_action[dev->devno] &
3413 ~ATA_EH_PERDEV_MASK;
3414 ehc->i.dev_action[dev->devno] &= ATA_EH_PERDEV_MASK;
3416 /* process hotplug request */
3417 if (dev->flags & ATA_DFLAG_DETACH)
3418 ata_eh_detach_dev(dev);
3420 /* schedule probe if necessary */
3421 if (!ata_dev_enabled(dev))
3422 ata_eh_schedule_probe(dev);
3430 /* if UNLOADING, finish immediately */
3431 if (ap->pflags & ATA_PFLAG_UNLOADING)
3435 ata_for_each_link(link, ap, EDGE) {
3436 struct ata_eh_context *ehc = &link->eh_context;
3438 /* skip EH if possible. */
3439 if (ata_eh_skip_recovery(link))
3442 ata_for_each_dev(dev, link, ALL)
3443 ehc->classes[dev->devno] = ATA_DEV_UNKNOWN;
3447 ata_for_each_link(link, ap, EDGE) {
3448 struct ata_eh_context *ehc = &link->eh_context;
3450 if (!(ehc->i.action & ATA_EH_RESET))
3453 rc = ata_eh_reset(link, ata_link_nr_vacant(link),
3454 prereset, softreset, hardreset, postreset);
3456 ata_link_printk(link, KERN_ERR,
3457 "reset failed, giving up\n");
3466 * clears ATA_EH_PARK in eh_info and resets
3467 * ap->park_req_pending
3469 ata_eh_pull_park_action(ap);
3472 ata_for_each_link(link, ap, EDGE) {
3473 ata_for_each_dev(dev, link, ALL) {
3474 struct ata_eh_context *ehc = &link->eh_context;
3477 if (dev->class != ATA_DEV_ATA)
3479 if (!(ehc->i.dev_action[dev->devno] &
3482 tmp = dev->unpark_deadline;
3483 if (time_before(deadline, tmp))
3485 else if (time_before_eq(tmp, jiffies))
3487 if (ehc->unloaded_mask & (1 << dev->devno))
3490 ata_eh_park_issue_cmd(dev, 1);
3495 if (time_before_eq(deadline, now))
3498 deadline = wait_for_completion_timeout(&ap->park_req_pending,
3501 ata_for_each_link(link, ap, EDGE) {
3502 ata_for_each_dev(dev, link, ALL) {
3503 if (!(link->eh_context.unloaded_mask &
3507 ata_eh_park_issue_cmd(dev, 0);
3508 ata_eh_done(link, dev, ATA_EH_PARK);
3513 ata_for_each_link(link, ap, EDGE) {
3514 struct ata_eh_context *ehc = &link->eh_context;
3516 /* revalidate existing devices and attach new ones */
3517 rc = ata_eh_revalidate_and_attach(link, &dev);
3521 /* if PMP got attached, return, pmp EH will take care of it */
3522 if (link->device->class == ATA_DEV_PMP) {
3527 /* configure transfer mode if necessary */
3528 if (ehc->i.flags & ATA_EHI_SETMODE) {
3529 rc = ata_set_mode(link, &dev);
3532 ehc->i.flags &= ~ATA_EHI_SETMODE;
3535 /* If reset has been issued, clear UA to avoid
3536 * disrupting the current users of the device.
3538 if (ehc->i.flags & ATA_EHI_DID_RESET) {
3539 ata_for_each_dev(dev, link, ALL) {
3540 if (dev->class != ATA_DEV_ATAPI)
3542 rc = atapi_eh_clear_ua(dev);
3548 /* retry flush if necessary */
3549 ata_for_each_dev(dev, link, ALL) {
3550 if (dev->class != ATA_DEV_ATA)
3552 rc = ata_eh_maybe_retry_flush(dev);
3557 /* configure link power saving */
3558 if (ehc->i.action & ATA_EH_LPM)
3559 ata_for_each_dev(dev, link, ALL)
3560 ata_dev_enable_pm(dev, ap->pm_policy);
3562 /* this link is okay now */
3568 ata_eh_handle_dev_fail(dev, rc);
3570 if (ap->pflags & ATA_PFLAG_FROZEN) {
3571 /* PMP reset requires working host port.
3572 * Can't retry if it's frozen.
3574 if (sata_pmp_attached(ap))
3584 if (rc && r_failed_link)
3585 *r_failed_link = link;
3587 DPRINTK("EXIT, rc=%d\n", rc);
3592 * ata_eh_finish - finish up EH
3593 * @ap: host port to finish EH for
3595 * Recovery is complete. Clean up EH states and retry or finish
3601 void ata_eh_finish(struct ata_port *ap)
3605 /* retry or finish qcs */
3606 for (tag = 0; tag < ATA_MAX_QUEUE; tag++) {
3607 struct ata_queued_cmd *qc = __ata_qc_from_tag(ap, tag);
3609 if (!(qc->flags & ATA_QCFLAG_FAILED))
3613 /* FIXME: Once EH migration is complete,
3614 * generate sense data in this function,
3615 * considering both err_mask and tf.
3617 if (qc->flags & ATA_QCFLAG_RETRY)
3618 ata_eh_qc_retry(qc);
3620 ata_eh_qc_complete(qc);
3622 if (qc->flags & ATA_QCFLAG_SENSE_VALID) {
3623 ata_eh_qc_complete(qc);
3625 /* feed zero TF to sense generation */
3626 memset(&qc->result_tf, 0, sizeof(qc->result_tf));
3627 ata_eh_qc_retry(qc);
3632 /* make sure nr_active_links is zero after EH */
3633 WARN_ON(ap->nr_active_links);
3634 ap->nr_active_links = 0;
3638 * ata_do_eh - do standard error handling
3639 * @ap: host port to handle error for
3641 * @prereset: prereset method (can be NULL)
3642 * @softreset: softreset method (can be NULL)
3643 * @hardreset: hardreset method (can be NULL)
3644 * @postreset: postreset method (can be NULL)
3646 * Perform standard error handling sequence.
3649 * Kernel thread context (may sleep).
3651 void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
3652 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
3653 ata_postreset_fn_t postreset)
3655 struct ata_device *dev;
3661 rc = ata_eh_recover(ap, prereset, softreset, hardreset, postreset,
3664 ata_for_each_dev(dev, &ap->link, ALL)
3665 ata_dev_disable(dev);
3672 * ata_std_error_handler - standard error handler
3673 * @ap: host port to handle error for
3675 * Standard error handler
3678 * Kernel thread context (may sleep).
3680 void ata_std_error_handler(struct ata_port *ap)
3682 struct ata_port_operations *ops = ap->ops;
3683 ata_reset_fn_t hardreset = ops->hardreset;
3685 /* ignore built-in hardreset if SCR access is not available */
3686 if (ata_is_builtin_hardreset(hardreset) && !sata_scr_valid(&ap->link))
3689 ata_do_eh(ap, ops->prereset, ops->softreset, hardreset, ops->postreset);
3694 * ata_eh_handle_port_suspend - perform port suspend operation
3695 * @ap: port to suspend
3700 * Kernel thread context (may sleep).
3702 static void ata_eh_handle_port_suspend(struct ata_port *ap)
3704 unsigned long flags;
3707 /* are we suspending? */
3708 spin_lock_irqsave(ap->lock, flags);
3709 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
3710 ap->pm_mesg.event == PM_EVENT_ON) {
3711 spin_unlock_irqrestore(ap->lock, flags);
3714 spin_unlock_irqrestore(ap->lock, flags);
3716 WARN_ON(ap->pflags & ATA_PFLAG_SUSPENDED);
3718 /* tell ACPI we're suspending */
3719 rc = ata_acpi_on_suspend(ap);
3724 ata_eh_freeze_port(ap);
3726 if (ap->ops->port_suspend)
3727 rc = ap->ops->port_suspend(ap, ap->pm_mesg);
3729 ata_acpi_set_state(ap, PMSG_SUSPEND);
3732 spin_lock_irqsave(ap->lock, flags);
3734 ap->pflags &= ~ATA_PFLAG_PM_PENDING;
3736 ap->pflags |= ATA_PFLAG_SUSPENDED;
3737 else if (ap->pflags & ATA_PFLAG_FROZEN)
3738 ata_port_schedule_eh(ap);
3740 if (ap->pm_result) {
3741 *ap->pm_result = rc;
3742 ap->pm_result = NULL;
3745 spin_unlock_irqrestore(ap->lock, flags);
3751 * ata_eh_handle_port_resume - perform port resume operation
3752 * @ap: port to resume
3757 * Kernel thread context (may sleep).
3759 static void ata_eh_handle_port_resume(struct ata_port *ap)
3761 struct ata_link *link;
3762 struct ata_device *dev;
3763 unsigned long flags;
3766 /* are we resuming? */
3767 spin_lock_irqsave(ap->lock, flags);
3768 if (!(ap->pflags & ATA_PFLAG_PM_PENDING) ||
3769 ap->pm_mesg.event != PM_EVENT_ON) {
3770 spin_unlock_irqrestore(ap->lock, flags);
3773 spin_unlock_irqrestore(ap->lock, flags);
3775 WARN_ON(!(ap->pflags & ATA_PFLAG_SUSPENDED));
3778 * Error timestamps are in jiffies which doesn't run while
3779 * suspended and PHY events during resume isn't too uncommon.
3780 * When the two are combined, it can lead to unnecessary speed
3781 * downs if the machine is suspended and resumed repeatedly.
3782 * Clear error history.
3784 ata_for_each_link(link, ap, HOST_FIRST)
3785 ata_for_each_dev(dev, link, ALL)
3786 ata_ering_clear(&dev->ering);
3788 ata_acpi_set_state(ap, PMSG_ON);
3790 if (ap->ops->port_resume)
3791 rc = ap->ops->port_resume(ap);
3793 /* tell ACPI that we're resuming */
3794 ata_acpi_on_resume(ap);
3797 spin_lock_irqsave(ap->lock, flags);
3798 ap->pflags &= ~(ATA_PFLAG_PM_PENDING | ATA_PFLAG_SUSPENDED);
3799 if (ap->pm_result) {
3800 *ap->pm_result = rc;
3801 ap->pm_result = NULL;
3803 spin_unlock_irqrestore(ap->lock, flags);
3805 #endif /* CONFIG_PM */