]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/ata/libata-core.c
[PATCH] libata: cosmetic changes to sense generation functions
[net-next-2.6.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
1da177e4
LT
33 */
34
1da177e4
LT
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/mm.h>
41#include <linux/highmem.h>
42#include <linux/spinlock.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/timer.h>
46#include <linux/interrupt.h>
47#include <linux/completion.h>
48#include <linux/suspend.h>
49#include <linux/workqueue.h>
67846b30 50#include <linux/jiffies.h>
378f058c 51#include <linux/scatterlist.h>
1da177e4 52#include <scsi/scsi.h>
193515d5 53#include <scsi/scsi_cmnd.h>
1da177e4
LT
54#include <scsi/scsi_host.h>
55#include <linux/libata.h>
56#include <asm/io.h>
57#include <asm/semaphore.h>
58#include <asm/byteorder.h>
59
60#include "libata.h"
61
d7bb4cc7 62/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
63const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
64const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
65const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 66
3373efd8
TH
67static unsigned int ata_dev_init_params(struct ata_device *dev,
68 u16 heads, u16 sectors);
69static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
70static void ata_dev_xfermask(struct ata_device *dev);
1da177e4
LT
71
72static unsigned int ata_unique_id = 1;
73static struct workqueue_struct *ata_wq;
74
453b07ac
TH
75struct workqueue_struct *ata_aux_wq;
76
418dc1f5 77int atapi_enabled = 1;
1623c81e
JG
78module_param(atapi_enabled, int, 0444);
79MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
80
95de719a
AL
81int atapi_dmadir = 0;
82module_param(atapi_dmadir, int, 0444);
83MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
84
c3c013a2
JG
85int libata_fua = 0;
86module_param_named(fua, libata_fua, int, 0444);
87MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
88
a8601e5f
AM
89static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
90module_param(ata_probe_timeout, int, 0444);
91MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
92
1da177e4
LT
93MODULE_AUTHOR("Jeff Garzik");
94MODULE_DESCRIPTION("Library module for ATA devices");
95MODULE_LICENSE("GPL");
96MODULE_VERSION(DRV_VERSION);
97
0baab86b 98
1da177e4
LT
99/**
100 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
101 * @tf: Taskfile to convert
102 * @fis: Buffer into which data will output
103 * @pmp: Port multiplier port
104 *
105 * Converts a standard ATA taskfile to a Serial ATA
106 * FIS structure (Register - Host to Device).
107 *
108 * LOCKING:
109 * Inherited from caller.
110 */
111
057ace5e 112void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
1da177e4
LT
113{
114 fis[0] = 0x27; /* Register - Host to Device FIS */
115 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
116 bit 7 indicates Command FIS */
117 fis[2] = tf->command;
118 fis[3] = tf->feature;
119
120 fis[4] = tf->lbal;
121 fis[5] = tf->lbam;
122 fis[6] = tf->lbah;
123 fis[7] = tf->device;
124
125 fis[8] = tf->hob_lbal;
126 fis[9] = tf->hob_lbam;
127 fis[10] = tf->hob_lbah;
128 fis[11] = tf->hob_feature;
129
130 fis[12] = tf->nsect;
131 fis[13] = tf->hob_nsect;
132 fis[14] = 0;
133 fis[15] = tf->ctl;
134
135 fis[16] = 0;
136 fis[17] = 0;
137 fis[18] = 0;
138 fis[19] = 0;
139}
140
141/**
142 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
143 * @fis: Buffer from which data will be input
144 * @tf: Taskfile to output
145 *
e12a1be6 146 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
147 *
148 * LOCKING:
149 * Inherited from caller.
150 */
151
057ace5e 152void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
153{
154 tf->command = fis[2]; /* status */
155 tf->feature = fis[3]; /* error */
156
157 tf->lbal = fis[4];
158 tf->lbam = fis[5];
159 tf->lbah = fis[6];
160 tf->device = fis[7];
161
162 tf->hob_lbal = fis[8];
163 tf->hob_lbam = fis[9];
164 tf->hob_lbah = fis[10];
165
166 tf->nsect = fis[12];
167 tf->hob_nsect = fis[13];
168}
169
8cbd6df1
AL
170static const u8 ata_rw_cmds[] = {
171 /* pio multi */
172 ATA_CMD_READ_MULTI,
173 ATA_CMD_WRITE_MULTI,
174 ATA_CMD_READ_MULTI_EXT,
175 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
176 0,
177 0,
178 0,
179 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
180 /* pio */
181 ATA_CMD_PIO_READ,
182 ATA_CMD_PIO_WRITE,
183 ATA_CMD_PIO_READ_EXT,
184 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
185 0,
186 0,
187 0,
188 0,
8cbd6df1
AL
189 /* dma */
190 ATA_CMD_READ,
191 ATA_CMD_WRITE,
192 ATA_CMD_READ_EXT,
9a3dccc4
TH
193 ATA_CMD_WRITE_EXT,
194 0,
195 0,
196 0,
197 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 198};
1da177e4
LT
199
200/**
8cbd6df1
AL
201 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
202 * @qc: command to examine and configure
1da177e4 203 *
2e9edbf8 204 * Examine the device configuration and tf->flags to calculate
8cbd6df1 205 * the proper read/write commands and protocol to use.
1da177e4
LT
206 *
207 * LOCKING:
208 * caller.
209 */
9a3dccc4 210int ata_rwcmd_protocol(struct ata_queued_cmd *qc)
1da177e4 211{
8cbd6df1
AL
212 struct ata_taskfile *tf = &qc->tf;
213 struct ata_device *dev = qc->dev;
9a3dccc4 214 u8 cmd;
1da177e4 215
9a3dccc4 216 int index, fua, lba48, write;
2e9edbf8 217
9a3dccc4 218 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
219 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
220 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 221
8cbd6df1
AL
222 if (dev->flags & ATA_DFLAG_PIO) {
223 tf->protocol = ATA_PROT_PIO;
9a3dccc4 224 index = dev->multi_count ? 0 : 8;
8d238e01
AC
225 } else if (lba48 && (qc->ap->flags & ATA_FLAG_PIO_LBA48)) {
226 /* Unable to use DMA due to host limitation */
227 tf->protocol = ATA_PROT_PIO;
0565c26d 228 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
229 } else {
230 tf->protocol = ATA_PROT_DMA;
9a3dccc4 231 index = 16;
8cbd6df1 232 }
1da177e4 233
9a3dccc4
TH
234 cmd = ata_rw_cmds[index + fua + lba48 + write];
235 if (cmd) {
236 tf->command = cmd;
237 return 0;
238 }
239 return -1;
1da177e4
LT
240}
241
cb95d562
TH
242/**
243 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
244 * @pio_mask: pio_mask
245 * @mwdma_mask: mwdma_mask
246 * @udma_mask: udma_mask
247 *
248 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
249 * unsigned int xfer_mask.
250 *
251 * LOCKING:
252 * None.
253 *
254 * RETURNS:
255 * Packed xfer_mask.
256 */
257static unsigned int ata_pack_xfermask(unsigned int pio_mask,
258 unsigned int mwdma_mask,
259 unsigned int udma_mask)
260{
261 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
262 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
263 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
264}
265
c0489e4e
TH
266/**
267 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
268 * @xfer_mask: xfer_mask to unpack
269 * @pio_mask: resulting pio_mask
270 * @mwdma_mask: resulting mwdma_mask
271 * @udma_mask: resulting udma_mask
272 *
273 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
274 * Any NULL distination masks will be ignored.
275 */
276static void ata_unpack_xfermask(unsigned int xfer_mask,
277 unsigned int *pio_mask,
278 unsigned int *mwdma_mask,
279 unsigned int *udma_mask)
280{
281 if (pio_mask)
282 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
283 if (mwdma_mask)
284 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
285 if (udma_mask)
286 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
287}
288
cb95d562 289static const struct ata_xfer_ent {
be9a50c8 290 int shift, bits;
cb95d562
TH
291 u8 base;
292} ata_xfer_tbl[] = {
293 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
294 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
295 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
296 { -1, },
297};
298
299/**
300 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
301 * @xfer_mask: xfer_mask of interest
302 *
303 * Return matching XFER_* value for @xfer_mask. Only the highest
304 * bit of @xfer_mask is considered.
305 *
306 * LOCKING:
307 * None.
308 *
309 * RETURNS:
310 * Matching XFER_* value, 0 if no match found.
311 */
312static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
313{
314 int highbit = fls(xfer_mask) - 1;
315 const struct ata_xfer_ent *ent;
316
317 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
318 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
319 return ent->base + highbit - ent->shift;
320 return 0;
321}
322
323/**
324 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
325 * @xfer_mode: XFER_* of interest
326 *
327 * Return matching xfer_mask for @xfer_mode.
328 *
329 * LOCKING:
330 * None.
331 *
332 * RETURNS:
333 * Matching xfer_mask, 0 if no match found.
334 */
335static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
336{
337 const struct ata_xfer_ent *ent;
338
339 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
340 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
341 return 1 << (ent->shift + xfer_mode - ent->base);
342 return 0;
343}
344
345/**
346 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
347 * @xfer_mode: XFER_* of interest
348 *
349 * Return matching xfer_shift for @xfer_mode.
350 *
351 * LOCKING:
352 * None.
353 *
354 * RETURNS:
355 * Matching xfer_shift, -1 if no match found.
356 */
357static int ata_xfer_mode2shift(unsigned int xfer_mode)
358{
359 const struct ata_xfer_ent *ent;
360
361 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
362 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
363 return ent->shift;
364 return -1;
365}
366
1da177e4 367/**
1da7b0d0
TH
368 * ata_mode_string - convert xfer_mask to string
369 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
370 *
371 * Determine string which represents the highest speed
1da7b0d0 372 * (highest bit in @modemask).
1da177e4
LT
373 *
374 * LOCKING:
375 * None.
376 *
377 * RETURNS:
378 * Constant C string representing highest speed listed in
1da7b0d0 379 * @mode_mask, or the constant C string "<n/a>".
1da177e4 380 */
1da7b0d0 381static const char *ata_mode_string(unsigned int xfer_mask)
1da177e4 382{
75f554bc
TH
383 static const char * const xfer_mode_str[] = {
384 "PIO0",
385 "PIO1",
386 "PIO2",
387 "PIO3",
388 "PIO4",
b352e57d
AC
389 "PIO5",
390 "PIO6",
75f554bc
TH
391 "MWDMA0",
392 "MWDMA1",
393 "MWDMA2",
b352e57d
AC
394 "MWDMA3",
395 "MWDMA4",
75f554bc
TH
396 "UDMA/16",
397 "UDMA/25",
398 "UDMA/33",
399 "UDMA/44",
400 "UDMA/66",
401 "UDMA/100",
402 "UDMA/133",
403 "UDMA7",
404 };
1da7b0d0 405 int highbit;
1da177e4 406
1da7b0d0
TH
407 highbit = fls(xfer_mask) - 1;
408 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
409 return xfer_mode_str[highbit];
1da177e4 410 return "<n/a>";
1da177e4
LT
411}
412
4c360c81
TH
413static const char *sata_spd_string(unsigned int spd)
414{
415 static const char * const spd_str[] = {
416 "1.5 Gbps",
417 "3.0 Gbps",
418 };
419
420 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
421 return "<unknown>";
422 return spd_str[spd - 1];
423}
424
3373efd8 425void ata_dev_disable(struct ata_device *dev)
0b8efb0a 426{
0dd4b21f 427 if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
f15a1daf 428 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
0b8efb0a
TH
429 dev->class++;
430 }
431}
432
1da177e4
LT
433/**
434 * ata_pio_devchk - PATA device presence detection
435 * @ap: ATA channel to examine
436 * @device: Device to examine (starting at zero)
437 *
438 * This technique was originally described in
439 * Hale Landis's ATADRVR (www.ata-atapi.com), and
440 * later found its way into the ATA/ATAPI spec.
441 *
442 * Write a pattern to the ATA shadow registers,
443 * and if a device is present, it will respond by
444 * correctly storing and echoing back the
445 * ATA shadow register contents.
446 *
447 * LOCKING:
448 * caller.
449 */
450
451static unsigned int ata_pio_devchk(struct ata_port *ap,
452 unsigned int device)
453{
454 struct ata_ioports *ioaddr = &ap->ioaddr;
455 u8 nsect, lbal;
456
457 ap->ops->dev_select(ap, device);
458
459 outb(0x55, ioaddr->nsect_addr);
460 outb(0xaa, ioaddr->lbal_addr);
461
462 outb(0xaa, ioaddr->nsect_addr);
463 outb(0x55, ioaddr->lbal_addr);
464
465 outb(0x55, ioaddr->nsect_addr);
466 outb(0xaa, ioaddr->lbal_addr);
467
468 nsect = inb(ioaddr->nsect_addr);
469 lbal = inb(ioaddr->lbal_addr);
470
471 if ((nsect == 0x55) && (lbal == 0xaa))
472 return 1; /* we found a device */
473
474 return 0; /* nothing found */
475}
476
477/**
478 * ata_mmio_devchk - PATA device presence detection
479 * @ap: ATA channel to examine
480 * @device: Device to examine (starting at zero)
481 *
482 * This technique was originally described in
483 * Hale Landis's ATADRVR (www.ata-atapi.com), and
484 * later found its way into the ATA/ATAPI spec.
485 *
486 * Write a pattern to the ATA shadow registers,
487 * and if a device is present, it will respond by
488 * correctly storing and echoing back the
489 * ATA shadow register contents.
490 *
491 * LOCKING:
492 * caller.
493 */
494
495static unsigned int ata_mmio_devchk(struct ata_port *ap,
496 unsigned int device)
497{
498 struct ata_ioports *ioaddr = &ap->ioaddr;
499 u8 nsect, lbal;
500
501 ap->ops->dev_select(ap, device);
502
503 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
504 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
505
506 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
507 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
508
509 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
510 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
511
512 nsect = readb((void __iomem *) ioaddr->nsect_addr);
513 lbal = readb((void __iomem *) ioaddr->lbal_addr);
514
515 if ((nsect == 0x55) && (lbal == 0xaa))
516 return 1; /* we found a device */
517
518 return 0; /* nothing found */
519}
520
521/**
522 * ata_devchk - PATA device presence detection
523 * @ap: ATA channel to examine
524 * @device: Device to examine (starting at zero)
525 *
526 * Dispatch ATA device presence detection, depending
527 * on whether we are using PIO or MMIO to talk to the
528 * ATA shadow registers.
529 *
530 * LOCKING:
531 * caller.
532 */
533
534static unsigned int ata_devchk(struct ata_port *ap,
535 unsigned int device)
536{
537 if (ap->flags & ATA_FLAG_MMIO)
538 return ata_mmio_devchk(ap, device);
539 return ata_pio_devchk(ap, device);
540}
541
542/**
543 * ata_dev_classify - determine device type based on ATA-spec signature
544 * @tf: ATA taskfile register set for device to be identified
545 *
546 * Determine from taskfile register contents whether a device is
547 * ATA or ATAPI, as per "Signature and persistence" section
548 * of ATA/PI spec (volume 1, sect 5.14).
549 *
550 * LOCKING:
551 * None.
552 *
553 * RETURNS:
554 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
555 * the event of failure.
556 */
557
057ace5e 558unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
559{
560 /* Apple's open source Darwin code hints that some devices only
561 * put a proper signature into the LBA mid/high registers,
562 * So, we only check those. It's sufficient for uniqueness.
563 */
564
565 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
566 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
567 DPRINTK("found ATA device by sig\n");
568 return ATA_DEV_ATA;
569 }
570
571 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
572 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
573 DPRINTK("found ATAPI device by sig\n");
574 return ATA_DEV_ATAPI;
575 }
576
577 DPRINTK("unknown device\n");
578 return ATA_DEV_UNKNOWN;
579}
580
581/**
582 * ata_dev_try_classify - Parse returned ATA device signature
583 * @ap: ATA channel to examine
584 * @device: Device to examine (starting at zero)
b4dc7623 585 * @r_err: Value of error register on completion
1da177e4
LT
586 *
587 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
588 * an ATA/ATAPI-defined set of values is placed in the ATA
589 * shadow registers, indicating the results of device detection
590 * and diagnostics.
591 *
592 * Select the ATA device, and read the values from the ATA shadow
593 * registers. Then parse according to the Error register value,
594 * and the spec-defined values examined by ata_dev_classify().
595 *
596 * LOCKING:
597 * caller.
b4dc7623
TH
598 *
599 * RETURNS:
600 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4
LT
601 */
602
b4dc7623
TH
603static unsigned int
604ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
1da177e4 605{
1da177e4
LT
606 struct ata_taskfile tf;
607 unsigned int class;
608 u8 err;
609
610 ap->ops->dev_select(ap, device);
611
612 memset(&tf, 0, sizeof(tf));
613
1da177e4 614 ap->ops->tf_read(ap, &tf);
0169e284 615 err = tf.feature;
b4dc7623
TH
616 if (r_err)
617 *r_err = err;
1da177e4 618
93590859
AC
619 /* see if device passed diags: if master then continue and warn later */
620 if (err == 0 && device == 0)
621 /* diagnostic fail : do nothing _YET_ */
622 ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
623 else if (err == 1)
1da177e4
LT
624 /* do nothing */ ;
625 else if ((device == 0) && (err == 0x81))
626 /* do nothing */ ;
627 else
b4dc7623 628 return ATA_DEV_NONE;
1da177e4 629
b4dc7623 630 /* determine if device is ATA or ATAPI */
1da177e4 631 class = ata_dev_classify(&tf);
b4dc7623 632
1da177e4 633 if (class == ATA_DEV_UNKNOWN)
b4dc7623 634 return ATA_DEV_NONE;
1da177e4 635 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
b4dc7623
TH
636 return ATA_DEV_NONE;
637 return class;
1da177e4
LT
638}
639
640/**
6a62a04d 641 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
642 * @id: IDENTIFY DEVICE results we will examine
643 * @s: string into which data is output
644 * @ofs: offset into identify device page
645 * @len: length of string to return. must be an even number.
646 *
647 * The strings in the IDENTIFY DEVICE page are broken up into
648 * 16-bit chunks. Run through the string, and output each
649 * 8-bit chunk linearly, regardless of platform.
650 *
651 * LOCKING:
652 * caller.
653 */
654
6a62a04d
TH
655void ata_id_string(const u16 *id, unsigned char *s,
656 unsigned int ofs, unsigned int len)
1da177e4
LT
657{
658 unsigned int c;
659
660 while (len > 0) {
661 c = id[ofs] >> 8;
662 *s = c;
663 s++;
664
665 c = id[ofs] & 0xff;
666 *s = c;
667 s++;
668
669 ofs++;
670 len -= 2;
671 }
672}
673
0e949ff3 674/**
6a62a04d 675 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
676 * @id: IDENTIFY DEVICE results we will examine
677 * @s: string into which data is output
678 * @ofs: offset into identify device page
679 * @len: length of string to return. must be an odd number.
680 *
6a62a04d 681 * This function is identical to ata_id_string except that it
0e949ff3
TH
682 * trims trailing spaces and terminates the resulting string with
683 * null. @len must be actual maximum length (even number) + 1.
684 *
685 * LOCKING:
686 * caller.
687 */
6a62a04d
TH
688void ata_id_c_string(const u16 *id, unsigned char *s,
689 unsigned int ofs, unsigned int len)
0e949ff3
TH
690{
691 unsigned char *p;
692
693 WARN_ON(!(len & 1));
694
6a62a04d 695 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
696
697 p = s + strnlen(s, len - 1);
698 while (p > s && p[-1] == ' ')
699 p--;
700 *p = '\0';
701}
0baab86b 702
2940740b
TH
703static u64 ata_id_n_sectors(const u16 *id)
704{
705 if (ata_id_has_lba(id)) {
706 if (ata_id_has_lba48(id))
707 return ata_id_u64(id, 100);
708 else
709 return ata_id_u32(id, 60);
710 } else {
711 if (ata_id_current_chs_valid(id))
712 return ata_id_u32(id, 57);
713 else
714 return id[1] * id[3] * id[6];
715 }
716}
717
0baab86b
EF
718/**
719 * ata_noop_dev_select - Select device 0/1 on ATA bus
720 * @ap: ATA channel to manipulate
721 * @device: ATA device (numbered from zero) to select
722 *
723 * This function performs no actual function.
724 *
725 * May be used as the dev_select() entry in ata_port_operations.
726 *
727 * LOCKING:
728 * caller.
729 */
1da177e4
LT
730void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
731{
732}
733
0baab86b 734
1da177e4
LT
735/**
736 * ata_std_dev_select - Select device 0/1 on ATA bus
737 * @ap: ATA channel to manipulate
738 * @device: ATA device (numbered from zero) to select
739 *
740 * Use the method defined in the ATA specification to
741 * make either device 0, or device 1, active on the
0baab86b
EF
742 * ATA channel. Works with both PIO and MMIO.
743 *
744 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
745 *
746 * LOCKING:
747 * caller.
748 */
749
750void ata_std_dev_select (struct ata_port *ap, unsigned int device)
751{
752 u8 tmp;
753
754 if (device == 0)
755 tmp = ATA_DEVICE_OBS;
756 else
757 tmp = ATA_DEVICE_OBS | ATA_DEV1;
758
759 if (ap->flags & ATA_FLAG_MMIO) {
760 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
761 } else {
762 outb(tmp, ap->ioaddr.device_addr);
763 }
764 ata_pause(ap); /* needed; also flushes, for mmio */
765}
766
767/**
768 * ata_dev_select - Select device 0/1 on ATA bus
769 * @ap: ATA channel to manipulate
770 * @device: ATA device (numbered from zero) to select
771 * @wait: non-zero to wait for Status register BSY bit to clear
772 * @can_sleep: non-zero if context allows sleeping
773 *
774 * Use the method defined in the ATA specification to
775 * make either device 0, or device 1, active on the
776 * ATA channel.
777 *
778 * This is a high-level version of ata_std_dev_select(),
779 * which additionally provides the services of inserting
780 * the proper pauses and status polling, where needed.
781 *
782 * LOCKING:
783 * caller.
784 */
785
786void ata_dev_select(struct ata_port *ap, unsigned int device,
787 unsigned int wait, unsigned int can_sleep)
788{
88574551 789 if (ata_msg_probe(ap))
0dd4b21f 790 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, ata%u: "
88574551 791 "device %u, wait %u\n", ap->id, device, wait);
1da177e4
LT
792
793 if (wait)
794 ata_wait_idle(ap);
795
796 ap->ops->dev_select(ap, device);
797
798 if (wait) {
799 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
800 msleep(150);
801 ata_wait_idle(ap);
802 }
803}
804
805/**
806 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 807 * @id: IDENTIFY DEVICE page to dump
1da177e4 808 *
0bd3300a
TH
809 * Dump selected 16-bit words from the given IDENTIFY DEVICE
810 * page.
1da177e4
LT
811 *
812 * LOCKING:
813 * caller.
814 */
815
0bd3300a 816static inline void ata_dump_id(const u16 *id)
1da177e4
LT
817{
818 DPRINTK("49==0x%04x "
819 "53==0x%04x "
820 "63==0x%04x "
821 "64==0x%04x "
822 "75==0x%04x \n",
0bd3300a
TH
823 id[49],
824 id[53],
825 id[63],
826 id[64],
827 id[75]);
1da177e4
LT
828 DPRINTK("80==0x%04x "
829 "81==0x%04x "
830 "82==0x%04x "
831 "83==0x%04x "
832 "84==0x%04x \n",
0bd3300a
TH
833 id[80],
834 id[81],
835 id[82],
836 id[83],
837 id[84]);
1da177e4
LT
838 DPRINTK("88==0x%04x "
839 "93==0x%04x\n",
0bd3300a
TH
840 id[88],
841 id[93]);
1da177e4
LT
842}
843
cb95d562
TH
844/**
845 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
846 * @id: IDENTIFY data to compute xfer mask from
847 *
848 * Compute the xfermask for this device. This is not as trivial
849 * as it seems if we must consider early devices correctly.
850 *
851 * FIXME: pre IDE drive timing (do we care ?).
852 *
853 * LOCKING:
854 * None.
855 *
856 * RETURNS:
857 * Computed xfermask
858 */
859static unsigned int ata_id_xfermask(const u16 *id)
860{
861 unsigned int pio_mask, mwdma_mask, udma_mask;
862
863 /* Usual case. Word 53 indicates word 64 is valid */
864 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
865 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
866 pio_mask <<= 3;
867 pio_mask |= 0x7;
868 } else {
869 /* If word 64 isn't valid then Word 51 high byte holds
870 * the PIO timing number for the maximum. Turn it into
871 * a mask.
872 */
46767aeb
AC
873 u8 mode = id[ATA_ID_OLD_PIO_MODES] & 0xFF;
874 if (mode < 5) /* Valid PIO range */
875 pio_mask = (2 << mode) - 1;
876 else
877 pio_mask = 1;
cb95d562
TH
878
879 /* But wait.. there's more. Design your standards by
880 * committee and you too can get a free iordy field to
881 * process. However its the speeds not the modes that
882 * are supported... Note drivers using the timing API
883 * will get this right anyway
884 */
885 }
886
887 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 888
b352e57d
AC
889 if (ata_id_is_cfa(id)) {
890 /*
891 * Process compact flash extended modes
892 */
893 int pio = id[163] & 0x7;
894 int dma = (id[163] >> 3) & 7;
895
896 if (pio)
897 pio_mask |= (1 << 5);
898 if (pio > 1)
899 pio_mask |= (1 << 6);
900 if (dma)
901 mwdma_mask |= (1 << 3);
902 if (dma > 1)
903 mwdma_mask |= (1 << 4);
904 }
905
fb21f0d0
TH
906 udma_mask = 0;
907 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
908 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
909
910 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
911}
912
86e45b6b
TH
913/**
914 * ata_port_queue_task - Queue port_task
915 * @ap: The ata_port to queue port_task for
e2a7f77a
RD
916 * @fn: workqueue function to be scheduled
917 * @data: data value to pass to workqueue function
918 * @delay: delay time for workqueue function
86e45b6b
TH
919 *
920 * Schedule @fn(@data) for execution after @delay jiffies using
921 * port_task. There is one port_task per port and it's the
922 * user(low level driver)'s responsibility to make sure that only
923 * one task is active at any given time.
924 *
925 * libata core layer takes care of synchronization between
926 * port_task and EH. ata_port_queue_task() may be ignored for EH
927 * synchronization.
928 *
929 * LOCKING:
930 * Inherited from caller.
931 */
932void ata_port_queue_task(struct ata_port *ap, void (*fn)(void *), void *data,
933 unsigned long delay)
934{
935 int rc;
936
b51e9e5d 937 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
86e45b6b
TH
938 return;
939
940 PREPARE_WORK(&ap->port_task, fn, data);
941
942 if (!delay)
943 rc = queue_work(ata_wq, &ap->port_task);
944 else
945 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
946
947 /* rc == 0 means that another user is using port task */
948 WARN_ON(rc == 0);
949}
950
951/**
952 * ata_port_flush_task - Flush port_task
953 * @ap: The ata_port to flush port_task for
954 *
955 * After this function completes, port_task is guranteed not to
956 * be running or scheduled.
957 *
958 * LOCKING:
959 * Kernel thread context (may sleep)
960 */
961void ata_port_flush_task(struct ata_port *ap)
962{
963 unsigned long flags;
964
965 DPRINTK("ENTER\n");
966
ba6a1308 967 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 968 ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK;
ba6a1308 969 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b
TH
970
971 DPRINTK("flush #1\n");
972 flush_workqueue(ata_wq);
973
974 /*
975 * At this point, if a task is running, it's guaranteed to see
976 * the FLUSH flag; thus, it will never queue pio tasks again.
977 * Cancel and flush.
978 */
979 if (!cancel_delayed_work(&ap->port_task)) {
0dd4b21f 980 if (ata_msg_ctl(ap))
88574551
TH
981 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
982 __FUNCTION__);
86e45b6b
TH
983 flush_workqueue(ata_wq);
984 }
985
ba6a1308 986 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 987 ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK;
ba6a1308 988 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b 989
0dd4b21f
BP
990 if (ata_msg_ctl(ap))
991 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
86e45b6b
TH
992}
993
77853bf2 994void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 995{
77853bf2 996 struct completion *waiting = qc->private_data;
a2a7a662 997
a2a7a662 998 complete(waiting);
a2a7a662
TH
999}
1000
1001/**
1002 * ata_exec_internal - execute libata internal command
a2a7a662
TH
1003 * @dev: Device to which the command is sent
1004 * @tf: Taskfile registers for the command and the result
d69cf37d 1005 * @cdb: CDB for packet command
a2a7a662
TH
1006 * @dma_dir: Data tranfer direction of the command
1007 * @buf: Data buffer of the command
1008 * @buflen: Length of data buffer
1009 *
1010 * Executes libata internal command with timeout. @tf contains
1011 * command on entry and result on return. Timeout and error
1012 * conditions are reported via return value. No recovery action
1013 * is taken after a command times out. It's caller's duty to
1014 * clean up after timeout.
1015 *
1016 * LOCKING:
1017 * None. Should be called with kernel context, might sleep.
551e8889
TH
1018 *
1019 * RETURNS:
1020 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1021 */
3373efd8 1022unsigned ata_exec_internal(struct ata_device *dev,
1ad8e7f9
TH
1023 struct ata_taskfile *tf, const u8 *cdb,
1024 int dma_dir, void *buf, unsigned int buflen)
a2a7a662 1025{
3373efd8 1026 struct ata_port *ap = dev->ap;
a2a7a662
TH
1027 u8 command = tf->command;
1028 struct ata_queued_cmd *qc;
2ab7db1f 1029 unsigned int tag, preempted_tag;
dedaf2b0 1030 u32 preempted_sactive, preempted_qc_active;
60be6b9a 1031 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1032 unsigned long flags;
77853bf2 1033 unsigned int err_mask;
d95a717f 1034 int rc;
a2a7a662 1035
ba6a1308 1036 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1037
e3180499 1038 /* no internal command while frozen */
b51e9e5d 1039 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1040 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1041 return AC_ERR_SYSTEM;
1042 }
1043
2ab7db1f 1044 /* initialize internal qc */
a2a7a662 1045
2ab7db1f
TH
1046 /* XXX: Tag 0 is used for drivers with legacy EH as some
1047 * drivers choke if any other tag is given. This breaks
1048 * ata_tag_internal() test for those drivers. Don't use new
1049 * EH stuff without converting to it.
1050 */
1051 if (ap->ops->error_handler)
1052 tag = ATA_TAG_INTERNAL;
1053 else
1054 tag = 0;
1055
6cec4a39 1056 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1057 BUG();
f69499f4 1058 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1059
1060 qc->tag = tag;
1061 qc->scsicmd = NULL;
1062 qc->ap = ap;
1063 qc->dev = dev;
1064 ata_qc_reinit(qc);
1065
1066 preempted_tag = ap->active_tag;
dedaf2b0
TH
1067 preempted_sactive = ap->sactive;
1068 preempted_qc_active = ap->qc_active;
2ab7db1f 1069 ap->active_tag = ATA_TAG_POISON;
dedaf2b0
TH
1070 ap->sactive = 0;
1071 ap->qc_active = 0;
2ab7db1f
TH
1072
1073 /* prepare & issue qc */
a2a7a662 1074 qc->tf = *tf;
d69cf37d
TH
1075 if (cdb)
1076 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1077 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1078 qc->dma_dir = dma_dir;
1079 if (dma_dir != DMA_NONE) {
1080 ata_sg_init_one(qc, buf, buflen);
1081 qc->nsect = buflen / ATA_SECT_SIZE;
1082 }
1083
77853bf2 1084 qc->private_data = &wait;
a2a7a662
TH
1085 qc->complete_fn = ata_qc_complete_internal;
1086
8e0e694a 1087 ata_qc_issue(qc);
a2a7a662 1088
ba6a1308 1089 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1090
a8601e5f 1091 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
d95a717f
TH
1092
1093 ata_port_flush_task(ap);
41ade50c 1094
d95a717f 1095 if (!rc) {
ba6a1308 1096 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1097
1098 /* We're racing with irq here. If we lose, the
1099 * following test prevents us from completing the qc
d95a717f
TH
1100 * twice. If we win, the port is frozen and will be
1101 * cleaned up by ->post_internal_cmd().
a2a7a662 1102 */
77853bf2 1103 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1104 qc->err_mask |= AC_ERR_TIMEOUT;
1105
1106 if (ap->ops->error_handler)
1107 ata_port_freeze(ap);
1108 else
1109 ata_qc_complete(qc);
f15a1daf 1110
0dd4b21f
BP
1111 if (ata_msg_warn(ap))
1112 ata_dev_printk(dev, KERN_WARNING,
88574551 1113 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1114 }
1115
ba6a1308 1116 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1117 }
1118
d95a717f
TH
1119 /* do post_internal_cmd */
1120 if (ap->ops->post_internal_cmd)
1121 ap->ops->post_internal_cmd(qc);
1122
1123 if (qc->flags & ATA_QCFLAG_FAILED && !qc->err_mask) {
0dd4b21f 1124 if (ata_msg_warn(ap))
88574551 1125 ata_dev_printk(dev, KERN_WARNING,
0dd4b21f 1126 "zero err_mask for failed "
88574551 1127 "internal command, assuming AC_ERR_OTHER\n");
d95a717f
TH
1128 qc->err_mask |= AC_ERR_OTHER;
1129 }
1130
15869303 1131 /* finish up */
ba6a1308 1132 spin_lock_irqsave(ap->lock, flags);
15869303 1133
e61e0672 1134 *tf = qc->result_tf;
77853bf2
TH
1135 err_mask = qc->err_mask;
1136
1137 ata_qc_free(qc);
2ab7db1f 1138 ap->active_tag = preempted_tag;
dedaf2b0
TH
1139 ap->sactive = preempted_sactive;
1140 ap->qc_active = preempted_qc_active;
77853bf2 1141
1f7dd3e9
TH
1142 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1143 * Until those drivers are fixed, we detect the condition
1144 * here, fail the command with AC_ERR_SYSTEM and reenable the
1145 * port.
1146 *
1147 * Note that this doesn't change any behavior as internal
1148 * command failure results in disabling the device in the
1149 * higher layer for LLDDs without new reset/EH callbacks.
1150 *
1151 * Kill the following code as soon as those drivers are fixed.
1152 */
198e0fed 1153 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1154 err_mask |= AC_ERR_SYSTEM;
1155 ata_port_probe(ap);
1156 }
1157
ba6a1308 1158 spin_unlock_irqrestore(ap->lock, flags);
15869303 1159
77853bf2 1160 return err_mask;
a2a7a662
TH
1161}
1162
977e6b9f
TH
1163/**
1164 * ata_do_simple_cmd - execute simple internal command
1165 * @dev: Device to which the command is sent
1166 * @cmd: Opcode to execute
1167 *
1168 * Execute a 'simple' command, that only consists of the opcode
1169 * 'cmd' itself, without filling any other registers
1170 *
1171 * LOCKING:
1172 * Kernel thread context (may sleep).
1173 *
1174 * RETURNS:
1175 * Zero on success, AC_ERR_* mask on failure
e58eb583 1176 */
77b08fb5 1177unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1178{
1179 struct ata_taskfile tf;
e58eb583
TH
1180
1181 ata_tf_init(dev, &tf);
1182
1183 tf.command = cmd;
1184 tf.flags |= ATA_TFLAG_DEVICE;
1185 tf.protocol = ATA_PROT_NODATA;
1186
977e6b9f 1187 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
e58eb583
TH
1188}
1189
1bc4ccff
AC
1190/**
1191 * ata_pio_need_iordy - check if iordy needed
1192 * @adev: ATA device
1193 *
1194 * Check if the current speed of the device requires IORDY. Used
1195 * by various controllers for chip configuration.
1196 */
1197
1198unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1199{
1200 int pio;
1201 int speed = adev->pio_mode - XFER_PIO_0;
1202
1203 if (speed < 2)
1204 return 0;
1205 if (speed > 2)
1206 return 1;
2e9edbf8 1207
1bc4ccff
AC
1208 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1209
1210 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1211 pio = adev->id[ATA_ID_EIDE_PIO];
1212 /* Is the speed faster than the drive allows non IORDY ? */
1213 if (pio) {
1214 /* This is cycle times not frequency - watch the logic! */
1215 if (pio > 240) /* PIO2 is 240nS per cycle */
1216 return 1;
1217 return 0;
1218 }
1219 }
1220 return 0;
1221}
1222
1da177e4 1223/**
49016aca 1224 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1225 * @dev: target device
1226 * @p_class: pointer to class of the target device (may be changed)
bff04647 1227 * @flags: ATA_READID_* flags
fe635c7e 1228 * @id: buffer to read IDENTIFY data into
1da177e4 1229 *
49016aca
TH
1230 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1231 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1232 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1233 * for pre-ATA4 drives.
1da177e4
LT
1234 *
1235 * LOCKING:
49016aca
TH
1236 * Kernel thread context (may sleep)
1237 *
1238 * RETURNS:
1239 * 0 on success, -errno otherwise.
1da177e4 1240 */
a9beec95 1241int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1242 unsigned int flags, u16 *id)
1da177e4 1243{
3373efd8 1244 struct ata_port *ap = dev->ap;
49016aca 1245 unsigned int class = *p_class;
a0123703 1246 struct ata_taskfile tf;
49016aca
TH
1247 unsigned int err_mask = 0;
1248 const char *reason;
1249 int rc;
1da177e4 1250
0dd4b21f 1251 if (ata_msg_ctl(ap))
88574551
TH
1252 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1253 __FUNCTION__, ap->id, dev->devno);
1da177e4 1254
49016aca 1255 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1da177e4 1256
49016aca 1257 retry:
3373efd8 1258 ata_tf_init(dev, &tf);
a0123703 1259
49016aca
TH
1260 switch (class) {
1261 case ATA_DEV_ATA:
a0123703 1262 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1263 break;
1264 case ATA_DEV_ATAPI:
a0123703 1265 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1266 break;
1267 default:
1268 rc = -ENODEV;
1269 reason = "unsupported class";
1270 goto err_out;
1da177e4
LT
1271 }
1272
a0123703 1273 tf.protocol = ATA_PROT_PIO;
1da177e4 1274
55a8e2c8
TH
1275 /* presence detection using polling IDENTIFY? */
1276 if (flags & ATA_READID_DETECT)
1277 tf.flags |= ATA_TFLAG_POLLING;
1278
3373efd8 1279 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
49016aca 1280 id, sizeof(id[0]) * ATA_ID_WORDS);
a0123703 1281 if (err_mask) {
55a8e2c8
TH
1282 if ((flags & ATA_READID_DETECT) &&
1283 (err_mask & AC_ERR_NODEV_HINT)) {
1284 DPRINTK("ata%u.%d: NODEV after polling detection\n",
1285 ap->id, dev->devno);
1286 return -ENOENT;
1287 }
1288
49016aca
TH
1289 rc = -EIO;
1290 reason = "I/O error";
1da177e4
LT
1291 goto err_out;
1292 }
1293
49016aca 1294 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1295
49016aca 1296 /* sanity check */
a4f5749b
TH
1297 rc = -EINVAL;
1298 reason = "device reports illegal type";
1299
1300 if (class == ATA_DEV_ATA) {
1301 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1302 goto err_out;
1303 } else {
1304 if (ata_id_is_ata(id))
1305 goto err_out;
49016aca
TH
1306 }
1307
bff04647 1308 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
1309 /*
1310 * The exact sequence expected by certain pre-ATA4 drives is:
1311 * SRST RESET
1312 * IDENTIFY
1313 * INITIALIZE DEVICE PARAMETERS
1314 * anything else..
1315 * Some drives were very specific about that exact sequence.
1316 */
1317 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 1318 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
1319 if (err_mask) {
1320 rc = -EIO;
1321 reason = "INIT_DEV_PARAMS failed";
1322 goto err_out;
1323 }
1324
1325 /* current CHS translation info (id[53-58]) might be
1326 * changed. reread the identify device info.
1327 */
bff04647 1328 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
1329 goto retry;
1330 }
1331 }
1332
1333 *p_class = class;
fe635c7e 1334
49016aca
TH
1335 return 0;
1336
1337 err_out:
88574551 1338 if (ata_msg_warn(ap))
0dd4b21f 1339 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 1340 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
1341 return rc;
1342}
1343
3373efd8 1344static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 1345{
3373efd8 1346 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
1347}
1348
a6e6ce8e
TH
1349static void ata_dev_config_ncq(struct ata_device *dev,
1350 char *desc, size_t desc_sz)
1351{
1352 struct ata_port *ap = dev->ap;
1353 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1354
1355 if (!ata_id_has_ncq(dev->id)) {
1356 desc[0] = '\0';
1357 return;
1358 }
6919a0a6
AC
1359 if (ata_device_blacklisted(dev) & ATA_HORKAGE_NONCQ) {
1360 snprintf(desc, desc_sz, "NCQ (not used)");
1361 return;
1362 }
a6e6ce8e 1363 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 1364 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
1365 dev->flags |= ATA_DFLAG_NCQ;
1366 }
1367
1368 if (hdepth >= ddepth)
1369 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1370 else
1371 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1372}
1373
e6d902a3
BK
1374static void ata_set_port_max_cmd_len(struct ata_port *ap)
1375{
1376 int i;
1377
cca3974e
JG
1378 if (ap->scsi_host) {
1379 unsigned int len = 0;
1380
e6d902a3 1381 for (i = 0; i < ATA_MAX_DEVICES; i++)
cca3974e
JG
1382 len = max(len, ap->device[i].cdb_len);
1383
1384 ap->scsi_host->max_cmd_len = len;
e6d902a3
BK
1385 }
1386}
1387
49016aca 1388/**
ffeae418 1389 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
1390 * @dev: Target device to configure
1391 *
1392 * Configure @dev according to @dev->id. Generic and low-level
1393 * driver specific fixups are also applied.
49016aca
TH
1394 *
1395 * LOCKING:
ffeae418
TH
1396 * Kernel thread context (may sleep)
1397 *
1398 * RETURNS:
1399 * 0 on success, -errno otherwise
49016aca 1400 */
efdaedc4 1401int ata_dev_configure(struct ata_device *dev)
49016aca 1402{
3373efd8 1403 struct ata_port *ap = dev->ap;
efdaedc4 1404 int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO;
1148c3a7 1405 const u16 *id = dev->id;
ff8854b2 1406 unsigned int xfer_mask;
b352e57d 1407 char revbuf[7]; /* XYZ-99\0 */
e6d902a3 1408 int rc;
49016aca 1409
0dd4b21f 1410 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
88574551
TH
1411 ata_dev_printk(dev, KERN_INFO,
1412 "%s: ENTER/EXIT (host %u, dev %u) -- nodev\n",
1413 __FUNCTION__, ap->id, dev->devno);
ffeae418 1414 return 0;
49016aca
TH
1415 }
1416
0dd4b21f 1417 if (ata_msg_probe(ap))
88574551
TH
1418 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1419 __FUNCTION__, ap->id, dev->devno);
1da177e4 1420
c39f5ebe 1421 /* print device capabilities */
0dd4b21f 1422 if (ata_msg_probe(ap))
88574551
TH
1423 ata_dev_printk(dev, KERN_DEBUG,
1424 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1425 "85:%04x 86:%04x 87:%04x 88:%04x\n",
0dd4b21f 1426 __FUNCTION__,
f15a1daf
TH
1427 id[49], id[82], id[83], id[84],
1428 id[85], id[86], id[87], id[88]);
c39f5ebe 1429
208a9933 1430 /* initialize to-be-configured parameters */
ea1dd4e1 1431 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
1432 dev->max_sectors = 0;
1433 dev->cdb_len = 0;
1434 dev->n_sectors = 0;
1435 dev->cylinders = 0;
1436 dev->heads = 0;
1437 dev->sectors = 0;
1438
1da177e4
LT
1439 /*
1440 * common ATA, ATAPI feature tests
1441 */
1442
ff8854b2 1443 /* find max transfer mode; for printk only */
1148c3a7 1444 xfer_mask = ata_id_xfermask(id);
1da177e4 1445
0dd4b21f
BP
1446 if (ata_msg_probe(ap))
1447 ata_dump_id(id);
1da177e4
LT
1448
1449 /* ATA-specific feature tests */
1450 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
1451 if (ata_id_is_cfa(id)) {
1452 if (id[162] & 1) /* CPRM may make this media unusable */
1453 ata_dev_printk(dev, KERN_WARNING, "ata%u: device %u supports DRM functions and may not be fully accessable.\n",
1454 ap->id, dev->devno);
1455 snprintf(revbuf, 7, "CFA");
1456 }
1457 else
1458 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1459
1148c3a7 1460 dev->n_sectors = ata_id_n_sectors(id);
2940740b 1461
1148c3a7 1462 if (ata_id_has_lba(id)) {
4c2d721a 1463 const char *lba_desc;
a6e6ce8e 1464 char ncq_desc[20];
8bf62ece 1465
4c2d721a
TH
1466 lba_desc = "LBA";
1467 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 1468 if (ata_id_has_lba48(id)) {
8bf62ece 1469 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 1470 lba_desc = "LBA48";
6fc49adb
TH
1471
1472 if (dev->n_sectors >= (1UL << 28) &&
1473 ata_id_has_flush_ext(id))
1474 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 1475 }
8bf62ece 1476
a6e6ce8e
TH
1477 /* config NCQ */
1478 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1479
8bf62ece 1480 /* print device info to dmesg */
5afc8142 1481 if (ata_msg_drv(ap) && print_info)
b352e57d 1482 ata_dev_printk(dev, KERN_INFO, "%s, "
a6e6ce8e 1483 "max %s, %Lu sectors: %s %s\n",
b352e57d 1484 revbuf,
f15a1daf
TH
1485 ata_mode_string(xfer_mask),
1486 (unsigned long long)dev->n_sectors,
a6e6ce8e 1487 lba_desc, ncq_desc);
ffeae418 1488 } else {
8bf62ece
AL
1489 /* CHS */
1490
1491 /* Default translation */
1148c3a7
TH
1492 dev->cylinders = id[1];
1493 dev->heads = id[3];
1494 dev->sectors = id[6];
8bf62ece 1495
1148c3a7 1496 if (ata_id_current_chs_valid(id)) {
8bf62ece 1497 /* Current CHS translation is valid. */
1148c3a7
TH
1498 dev->cylinders = id[54];
1499 dev->heads = id[55];
1500 dev->sectors = id[56];
8bf62ece
AL
1501 }
1502
1503 /* print device info to dmesg */
5afc8142 1504 if (ata_msg_drv(ap) && print_info)
b352e57d 1505 ata_dev_printk(dev, KERN_INFO, "%s, "
f15a1daf 1506 "max %s, %Lu sectors: CHS %u/%u/%u\n",
b352e57d 1507 revbuf,
f15a1daf
TH
1508 ata_mode_string(xfer_mask),
1509 (unsigned long long)dev->n_sectors,
88574551
TH
1510 dev->cylinders, dev->heads,
1511 dev->sectors);
1da177e4
LT
1512 }
1513
07f6f7d0
AL
1514 if (dev->id[59] & 0x100) {
1515 dev->multi_count = dev->id[59] & 0xff;
5afc8142 1516 if (ata_msg_drv(ap) && print_info)
88574551
TH
1517 ata_dev_printk(dev, KERN_INFO,
1518 "ata%u: dev %u multi count %u\n",
1519 ap->id, dev->devno, dev->multi_count);
07f6f7d0
AL
1520 }
1521
6e7846e9 1522 dev->cdb_len = 16;
1da177e4
LT
1523 }
1524
1525 /* ATAPI-specific feature tests */
2c13b7ce 1526 else if (dev->class == ATA_DEV_ATAPI) {
08a556db
AL
1527 char *cdb_intr_string = "";
1528
1148c3a7 1529 rc = atapi_cdb_len(id);
1da177e4 1530 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 1531 if (ata_msg_warn(ap))
88574551
TH
1532 ata_dev_printk(dev, KERN_WARNING,
1533 "unsupported CDB len\n");
ffeae418 1534 rc = -EINVAL;
1da177e4
LT
1535 goto err_out_nosup;
1536 }
6e7846e9 1537 dev->cdb_len = (unsigned int) rc;
1da177e4 1538
08a556db 1539 if (ata_id_cdb_intr(dev->id)) {
312f7da2 1540 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
1541 cdb_intr_string = ", CDB intr";
1542 }
312f7da2 1543
1da177e4 1544 /* print device info to dmesg */
5afc8142 1545 if (ata_msg_drv(ap) && print_info)
12436c30
TH
1546 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1547 ata_mode_string(xfer_mask),
1548 cdb_intr_string);
1da177e4
LT
1549 }
1550
914ed354
TH
1551 /* determine max_sectors */
1552 dev->max_sectors = ATA_MAX_SECTORS;
1553 if (dev->flags & ATA_DFLAG_LBA48)
1554 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
1555
93590859
AC
1556 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
1557 /* Let the user know. We don't want to disallow opens for
1558 rescue purposes, or in case the vendor is just a blithering
1559 idiot */
1560 if (print_info) {
1561 ata_dev_printk(dev, KERN_WARNING,
1562"Drive reports diagnostics failure. This may indicate a drive\n");
1563 ata_dev_printk(dev, KERN_WARNING,
1564"fault or invalid emulation. Contact drive vendor for information.\n");
1565 }
1566 }
1567
e6d902a3 1568 ata_set_port_max_cmd_len(ap);
6e7846e9 1569
4b2f3ede 1570 /* limit bridge transfers to udma5, 200 sectors */
3373efd8 1571 if (ata_dev_knobble(dev)) {
5afc8142 1572 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
1573 ata_dev_printk(dev, KERN_INFO,
1574 "applying bridge limits\n");
5a529139 1575 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
1576 dev->max_sectors = ATA_MAX_SECTORS;
1577 }
1578
1579 if (ap->ops->dev_config)
1580 ap->ops->dev_config(ap, dev);
1581
0dd4b21f
BP
1582 if (ata_msg_probe(ap))
1583 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
1584 __FUNCTION__, ata_chk_status(ap));
ffeae418 1585 return 0;
1da177e4
LT
1586
1587err_out_nosup:
0dd4b21f 1588 if (ata_msg_probe(ap))
88574551
TH
1589 ata_dev_printk(dev, KERN_DEBUG,
1590 "%s: EXIT, err\n", __FUNCTION__);
ffeae418 1591 return rc;
1da177e4
LT
1592}
1593
1594/**
1595 * ata_bus_probe - Reset and probe ATA bus
1596 * @ap: Bus to probe
1597 *
0cba632b
JG
1598 * Master ATA bus probing function. Initiates a hardware-dependent
1599 * bus reset, then attempts to identify any devices found on
1600 * the bus.
1601 *
1da177e4 1602 * LOCKING:
0cba632b 1603 * PCI/etc. bus probe sem.
1da177e4
LT
1604 *
1605 * RETURNS:
96072e69 1606 * Zero on success, negative errno otherwise.
1da177e4
LT
1607 */
1608
80289167 1609int ata_bus_probe(struct ata_port *ap)
1da177e4 1610{
28ca5c57 1611 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1
TH
1612 int tries[ATA_MAX_DEVICES];
1613 int i, rc, down_xfermask;
e82cbdb9 1614 struct ata_device *dev;
1da177e4 1615
28ca5c57 1616 ata_port_probe(ap);
c19ba8af 1617
14d2bac1
TH
1618 for (i = 0; i < ATA_MAX_DEVICES; i++)
1619 tries[i] = ATA_PROBE_MAX_TRIES;
1620
1621 retry:
1622 down_xfermask = 0;
1623
2044470c 1624 /* reset and determine device classes */
52783c5d 1625 ap->ops->phy_reset(ap);
2061a47a 1626
52783c5d
TH
1627 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1628 dev = &ap->device[i];
c19ba8af 1629
52783c5d
TH
1630 if (!(ap->flags & ATA_FLAG_DISABLED) &&
1631 dev->class != ATA_DEV_UNKNOWN)
1632 classes[dev->devno] = dev->class;
1633 else
1634 classes[dev->devno] = ATA_DEV_NONE;
2044470c 1635
52783c5d 1636 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 1637 }
1da177e4 1638
52783c5d 1639 ata_port_probe(ap);
2044470c 1640
b6079ca4
AC
1641 /* after the reset the device state is PIO 0 and the controller
1642 state is undefined. Record the mode */
1643
1644 for (i = 0; i < ATA_MAX_DEVICES; i++)
1645 ap->device[i].pio_mode = XFER_PIO_0;
1646
28ca5c57 1647 /* read IDENTIFY page and configure devices */
1da177e4 1648 for (i = 0; i < ATA_MAX_DEVICES; i++) {
e82cbdb9 1649 dev = &ap->device[i];
28ca5c57 1650
ec573755
TH
1651 if (tries[i])
1652 dev->class = classes[i];
ffeae418 1653
14d2bac1 1654 if (!ata_dev_enabled(dev))
ffeae418 1655 continue;
ffeae418 1656
bff04647
TH
1657 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
1658 dev->id);
14d2bac1
TH
1659 if (rc)
1660 goto fail;
1661
efdaedc4
TH
1662 ap->eh_context.i.flags |= ATA_EHI_PRINTINFO;
1663 rc = ata_dev_configure(dev);
1664 ap->eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
1665 if (rc)
1666 goto fail;
1da177e4
LT
1667 }
1668
e82cbdb9 1669 /* configure transfer mode */
3adcebb2 1670 rc = ata_set_mode(ap, &dev);
51713d35
TH
1671 if (rc) {
1672 down_xfermask = 1;
1673 goto fail;
e82cbdb9 1674 }
1da177e4 1675
e82cbdb9
TH
1676 for (i = 0; i < ATA_MAX_DEVICES; i++)
1677 if (ata_dev_enabled(&ap->device[i]))
1678 return 0;
1da177e4 1679
e82cbdb9
TH
1680 /* no device present, disable port */
1681 ata_port_disable(ap);
1da177e4 1682 ap->ops->port_disable(ap);
96072e69 1683 return -ENODEV;
14d2bac1
TH
1684
1685 fail:
1686 switch (rc) {
1687 case -EINVAL:
1688 case -ENODEV:
1689 tries[dev->devno] = 0;
1690 break;
1691 case -EIO:
3c567b7d 1692 sata_down_spd_limit(ap);
14d2bac1
TH
1693 /* fall through */
1694 default:
1695 tries[dev->devno]--;
1696 if (down_xfermask &&
3373efd8 1697 ata_down_xfermask_limit(dev, tries[dev->devno] == 1))
14d2bac1
TH
1698 tries[dev->devno] = 0;
1699 }
1700
ec573755 1701 if (!tries[dev->devno]) {
3373efd8
TH
1702 ata_down_xfermask_limit(dev, 1);
1703 ata_dev_disable(dev);
ec573755
TH
1704 }
1705
14d2bac1 1706 goto retry;
1da177e4
LT
1707}
1708
1709/**
0cba632b
JG
1710 * ata_port_probe - Mark port as enabled
1711 * @ap: Port for which we indicate enablement
1da177e4 1712 *
0cba632b
JG
1713 * Modify @ap data structure such that the system
1714 * thinks that the entire port is enabled.
1715 *
cca3974e 1716 * LOCKING: host lock, or some other form of
0cba632b 1717 * serialization.
1da177e4
LT
1718 */
1719
1720void ata_port_probe(struct ata_port *ap)
1721{
198e0fed 1722 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
1723}
1724
3be680b7
TH
1725/**
1726 * sata_print_link_status - Print SATA link status
1727 * @ap: SATA port to printk link status about
1728 *
1729 * This function prints link speed and status of a SATA link.
1730 *
1731 * LOCKING:
1732 * None.
1733 */
1734static void sata_print_link_status(struct ata_port *ap)
1735{
6d5f9732 1736 u32 sstatus, scontrol, tmp;
3be680b7 1737
81952c54 1738 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
3be680b7 1739 return;
81952c54 1740 sata_scr_read(ap, SCR_CONTROL, &scontrol);
3be680b7 1741
81952c54 1742 if (ata_port_online(ap)) {
3be680b7 1743 tmp = (sstatus >> 4) & 0xf;
f15a1daf
TH
1744 ata_port_printk(ap, KERN_INFO,
1745 "SATA link up %s (SStatus %X SControl %X)\n",
1746 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 1747 } else {
f15a1daf
TH
1748 ata_port_printk(ap, KERN_INFO,
1749 "SATA link down (SStatus %X SControl %X)\n",
1750 sstatus, scontrol);
3be680b7
TH
1751 }
1752}
1753
1da177e4 1754/**
780a87f7
JG
1755 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1756 * @ap: SATA port associated with target SATA PHY.
1da177e4 1757 *
780a87f7
JG
1758 * This function issues commands to standard SATA Sxxx
1759 * PHY registers, to wake up the phy (and device), and
1760 * clear any reset condition.
1da177e4
LT
1761 *
1762 * LOCKING:
0cba632b 1763 * PCI/etc. bus probe sem.
1da177e4
LT
1764 *
1765 */
1766void __sata_phy_reset(struct ata_port *ap)
1767{
1768 u32 sstatus;
1769 unsigned long timeout = jiffies + (HZ * 5);
1770
1771 if (ap->flags & ATA_FLAG_SATA_RESET) {
cdcca89e 1772 /* issue phy wake/reset */
81952c54 1773 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
62ba2841
TH
1774 /* Couldn't find anything in SATA I/II specs, but
1775 * AHCI-1.1 10.4.2 says at least 1 ms. */
1776 mdelay(1);
1da177e4 1777 }
81952c54
TH
1778 /* phy wake/clear reset */
1779 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1da177e4
LT
1780
1781 /* wait for phy to become ready, if necessary */
1782 do {
1783 msleep(200);
81952c54 1784 sata_scr_read(ap, SCR_STATUS, &sstatus);
1da177e4
LT
1785 if ((sstatus & 0xf) != 1)
1786 break;
1787 } while (time_before(jiffies, timeout));
1788
3be680b7
TH
1789 /* print link status */
1790 sata_print_link_status(ap);
656563e3 1791
3be680b7 1792 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 1793 if (!ata_port_offline(ap))
1da177e4 1794 ata_port_probe(ap);
3be680b7 1795 else
1da177e4 1796 ata_port_disable(ap);
1da177e4 1797
198e0fed 1798 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
1799 return;
1800
1801 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1802 ata_port_disable(ap);
1803 return;
1804 }
1805
1806 ap->cbl = ATA_CBL_SATA;
1807}
1808
1809/**
780a87f7
JG
1810 * sata_phy_reset - Reset SATA bus.
1811 * @ap: SATA port associated with target SATA PHY.
1da177e4 1812 *
780a87f7
JG
1813 * This function resets the SATA bus, and then probes
1814 * the bus for devices.
1da177e4
LT
1815 *
1816 * LOCKING:
0cba632b 1817 * PCI/etc. bus probe sem.
1da177e4
LT
1818 *
1819 */
1820void sata_phy_reset(struct ata_port *ap)
1821{
1822 __sata_phy_reset(ap);
198e0fed 1823 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
1824 return;
1825 ata_bus_reset(ap);
1826}
1827
ebdfca6e
AC
1828/**
1829 * ata_dev_pair - return other device on cable
ebdfca6e
AC
1830 * @adev: device
1831 *
1832 * Obtain the other device on the same cable, or if none is
1833 * present NULL is returned
1834 */
2e9edbf8 1835
3373efd8 1836struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 1837{
3373efd8 1838 struct ata_port *ap = adev->ap;
ebdfca6e 1839 struct ata_device *pair = &ap->device[1 - adev->devno];
e1211e3f 1840 if (!ata_dev_enabled(pair))
ebdfca6e
AC
1841 return NULL;
1842 return pair;
1843}
1844
1da177e4 1845/**
780a87f7
JG
1846 * ata_port_disable - Disable port.
1847 * @ap: Port to be disabled.
1da177e4 1848 *
780a87f7
JG
1849 * Modify @ap data structure such that the system
1850 * thinks that the entire port is disabled, and should
1851 * never attempt to probe or communicate with devices
1852 * on this port.
1853 *
cca3974e 1854 * LOCKING: host lock, or some other form of
780a87f7 1855 * serialization.
1da177e4
LT
1856 */
1857
1858void ata_port_disable(struct ata_port *ap)
1859{
1860 ap->device[0].class = ATA_DEV_NONE;
1861 ap->device[1].class = ATA_DEV_NONE;
198e0fed 1862 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
1863}
1864
1c3fae4d 1865/**
3c567b7d 1866 * sata_down_spd_limit - adjust SATA spd limit downward
1c3fae4d
TH
1867 * @ap: Port to adjust SATA spd limit for
1868 *
1869 * Adjust SATA spd limit of @ap downward. Note that this
1870 * function only adjusts the limit. The change must be applied
3c567b7d 1871 * using sata_set_spd().
1c3fae4d
TH
1872 *
1873 * LOCKING:
1874 * Inherited from caller.
1875 *
1876 * RETURNS:
1877 * 0 on success, negative errno on failure
1878 */
3c567b7d 1879int sata_down_spd_limit(struct ata_port *ap)
1c3fae4d 1880{
81952c54
TH
1881 u32 sstatus, spd, mask;
1882 int rc, highbit;
1c3fae4d 1883
81952c54
TH
1884 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
1885 if (rc)
1886 return rc;
1c3fae4d
TH
1887
1888 mask = ap->sata_spd_limit;
1889 if (mask <= 1)
1890 return -EINVAL;
1891 highbit = fls(mask) - 1;
1892 mask &= ~(1 << highbit);
1893
81952c54 1894 spd = (sstatus >> 4) & 0xf;
1c3fae4d
TH
1895 if (spd <= 1)
1896 return -EINVAL;
1897 spd--;
1898 mask &= (1 << spd) - 1;
1899 if (!mask)
1900 return -EINVAL;
1901
1902 ap->sata_spd_limit = mask;
1903
f15a1daf
TH
1904 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
1905 sata_spd_string(fls(mask)));
1c3fae4d
TH
1906
1907 return 0;
1908}
1909
3c567b7d 1910static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
1c3fae4d
TH
1911{
1912 u32 spd, limit;
1913
1914 if (ap->sata_spd_limit == UINT_MAX)
1915 limit = 0;
1916 else
1917 limit = fls(ap->sata_spd_limit);
1918
1919 spd = (*scontrol >> 4) & 0xf;
1920 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
1921
1922 return spd != limit;
1923}
1924
1925/**
3c567b7d 1926 * sata_set_spd_needed - is SATA spd configuration needed
1c3fae4d
TH
1927 * @ap: Port in question
1928 *
1929 * Test whether the spd limit in SControl matches
1930 * @ap->sata_spd_limit. This function is used to determine
1931 * whether hardreset is necessary to apply SATA spd
1932 * configuration.
1933 *
1934 * LOCKING:
1935 * Inherited from caller.
1936 *
1937 * RETURNS:
1938 * 1 if SATA spd configuration is needed, 0 otherwise.
1939 */
3c567b7d 1940int sata_set_spd_needed(struct ata_port *ap)
1c3fae4d
TH
1941{
1942 u32 scontrol;
1943
81952c54 1944 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
1c3fae4d
TH
1945 return 0;
1946
3c567b7d 1947 return __sata_set_spd_needed(ap, &scontrol);
1c3fae4d
TH
1948}
1949
1950/**
3c567b7d 1951 * sata_set_spd - set SATA spd according to spd limit
1c3fae4d
TH
1952 * @ap: Port to set SATA spd for
1953 *
1954 * Set SATA spd of @ap according to sata_spd_limit.
1955 *
1956 * LOCKING:
1957 * Inherited from caller.
1958 *
1959 * RETURNS:
1960 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 1961 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 1962 */
3c567b7d 1963int sata_set_spd(struct ata_port *ap)
1c3fae4d
TH
1964{
1965 u32 scontrol;
81952c54 1966 int rc;
1c3fae4d 1967
81952c54
TH
1968 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
1969 return rc;
1c3fae4d 1970
3c567b7d 1971 if (!__sata_set_spd_needed(ap, &scontrol))
1c3fae4d
TH
1972 return 0;
1973
81952c54
TH
1974 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
1975 return rc;
1976
1c3fae4d
TH
1977 return 1;
1978}
1979
452503f9
AC
1980/*
1981 * This mode timing computation functionality is ported over from
1982 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
1983 */
1984/*
b352e57d 1985 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 1986 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
1987 * for UDMA6, which is currently supported only by Maxtor drives.
1988 *
1989 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
1990 */
1991
1992static const struct ata_timing ata_timing[] = {
1993
1994 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
1995 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
1996 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
1997 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
1998
b352e57d
AC
1999 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2000 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
452503f9
AC
2001 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2002 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2003 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2004
2005/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2e9edbf8 2006
452503f9
AC
2007 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2008 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2009 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2e9edbf8 2010
452503f9
AC
2011 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2012 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2013 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2014
b352e57d
AC
2015 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2016 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
452503f9
AC
2017 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2018 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2019
2020 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2021 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2022 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2023
2024/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2025
2026 { 0xFF }
2027};
2028
2029#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2030#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2031
2032static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2033{
2034 q->setup = EZ(t->setup * 1000, T);
2035 q->act8b = EZ(t->act8b * 1000, T);
2036 q->rec8b = EZ(t->rec8b * 1000, T);
2037 q->cyc8b = EZ(t->cyc8b * 1000, T);
2038 q->active = EZ(t->active * 1000, T);
2039 q->recover = EZ(t->recover * 1000, T);
2040 q->cycle = EZ(t->cycle * 1000, T);
2041 q->udma = EZ(t->udma * 1000, UT);
2042}
2043
2044void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2045 struct ata_timing *m, unsigned int what)
2046{
2047 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2048 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2049 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2050 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2051 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2052 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2053 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2054 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2055}
2056
2057static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2058{
2059 const struct ata_timing *t;
2060
2061 for (t = ata_timing; t->mode != speed; t++)
91190758 2062 if (t->mode == 0xFF)
452503f9 2063 return NULL;
2e9edbf8 2064 return t;
452503f9
AC
2065}
2066
2067int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2068 struct ata_timing *t, int T, int UT)
2069{
2070 const struct ata_timing *s;
2071 struct ata_timing p;
2072
2073 /*
2e9edbf8 2074 * Find the mode.
75b1f2f8 2075 */
452503f9
AC
2076
2077 if (!(s = ata_timing_find_mode(speed)))
2078 return -EINVAL;
2079
75b1f2f8
AL
2080 memcpy(t, s, sizeof(*s));
2081
452503f9
AC
2082 /*
2083 * If the drive is an EIDE drive, it can tell us it needs extended
2084 * PIO/MW_DMA cycle timing.
2085 */
2086
2087 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2088 memset(&p, 0, sizeof(p));
2089 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2090 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2091 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2092 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2093 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2094 }
2095 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2096 }
2097
2098 /*
2099 * Convert the timing to bus clock counts.
2100 */
2101
75b1f2f8 2102 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2103
2104 /*
c893a3ae
RD
2105 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2106 * S.M.A.R.T * and some other commands. We have to ensure that the
2107 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2108 */
2109
2110 if (speed > XFER_PIO_4) {
2111 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2112 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2113 }
2114
2115 /*
c893a3ae 2116 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2117 */
2118
2119 if (t->act8b + t->rec8b < t->cyc8b) {
2120 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2121 t->rec8b = t->cyc8b - t->act8b;
2122 }
2123
2124 if (t->active + t->recover < t->cycle) {
2125 t->active += (t->cycle - (t->active + t->recover)) / 2;
2126 t->recover = t->cycle - t->active;
2127 }
2128
2129 return 0;
2130}
2131
cf176e1a
TH
2132/**
2133 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a
TH
2134 * @dev: Device to adjust xfer masks
2135 * @force_pio0: Force PIO0
2136 *
2137 * Adjust xfer masks of @dev downward. Note that this function
2138 * does not apply the change. Invoking ata_set_mode() afterwards
2139 * will apply the limit.
2140 *
2141 * LOCKING:
2142 * Inherited from caller.
2143 *
2144 * RETURNS:
2145 * 0 on success, negative errno on failure
2146 */
3373efd8 2147int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0)
cf176e1a
TH
2148{
2149 unsigned long xfer_mask;
2150 int highbit;
2151
2152 xfer_mask = ata_pack_xfermask(dev->pio_mask, dev->mwdma_mask,
2153 dev->udma_mask);
2154
2155 if (!xfer_mask)
2156 goto fail;
2157 /* don't gear down to MWDMA from UDMA, go directly to PIO */
2158 if (xfer_mask & ATA_MASK_UDMA)
2159 xfer_mask &= ~ATA_MASK_MWDMA;
2160
2161 highbit = fls(xfer_mask) - 1;
2162 xfer_mask &= ~(1 << highbit);
2163 if (force_pio0)
2164 xfer_mask &= 1 << ATA_SHIFT_PIO;
2165 if (!xfer_mask)
2166 goto fail;
2167
2168 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2169 &dev->udma_mask);
2170
f15a1daf
TH
2171 ata_dev_printk(dev, KERN_WARNING, "limiting speed to %s\n",
2172 ata_mode_string(xfer_mask));
cf176e1a
TH
2173
2174 return 0;
2175
2176 fail:
2177 return -EINVAL;
2178}
2179
3373efd8 2180static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 2181{
baa1e78a 2182 struct ata_eh_context *ehc = &dev->ap->eh_context;
83206a29
TH
2183 unsigned int err_mask;
2184 int rc;
1da177e4 2185
e8384607 2186 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
2187 if (dev->xfer_shift == ATA_SHIFT_PIO)
2188 dev->flags |= ATA_DFLAG_PIO;
2189
3373efd8 2190 err_mask = ata_dev_set_xfermode(dev);
83206a29 2191 if (err_mask) {
f15a1daf
TH
2192 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2193 "(err_mask=0x%x)\n", err_mask);
83206a29
TH
2194 return -EIO;
2195 }
1da177e4 2196
baa1e78a 2197 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3373efd8 2198 rc = ata_dev_revalidate(dev, 0);
baa1e78a 2199 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
5eb45c02 2200 if (rc)
83206a29 2201 return rc;
48a8a14f 2202
23e71c3d
TH
2203 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2204 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 2205
f15a1daf
TH
2206 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2207 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
83206a29 2208 return 0;
1da177e4
LT
2209}
2210
1da177e4
LT
2211/**
2212 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2213 * @ap: port on which timings will be programmed
e82cbdb9 2214 * @r_failed_dev: out paramter for failed device
1da177e4 2215 *
e82cbdb9
TH
2216 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2217 * ata_set_mode() fails, pointer to the failing device is
2218 * returned in @r_failed_dev.
780a87f7 2219 *
1da177e4 2220 * LOCKING:
0cba632b 2221 * PCI/etc. bus probe sem.
e82cbdb9
TH
2222 *
2223 * RETURNS:
2224 * 0 on success, negative errno otherwise
1da177e4 2225 */
1ad8e7f9 2226int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
1da177e4 2227{
e8e0619f 2228 struct ata_device *dev;
e82cbdb9 2229 int i, rc = 0, used_dma = 0, found = 0;
1da177e4 2230
3adcebb2
TH
2231 /* has private set_mode? */
2232 if (ap->ops->set_mode) {
2233 /* FIXME: make ->set_mode handle no device case and
2234 * return error code and failing device on failure.
2235 */
2236 for (i = 0; i < ATA_MAX_DEVICES; i++) {
02670bf3 2237 if (ata_dev_ready(&ap->device[i])) {
3adcebb2
TH
2238 ap->ops->set_mode(ap);
2239 break;
2240 }
2241 }
2242 return 0;
2243 }
2244
a6d5a51c
TH
2245 /* step 1: calculate xfer_mask */
2246 for (i = 0; i < ATA_MAX_DEVICES; i++) {
acf356b1 2247 unsigned int pio_mask, dma_mask;
a6d5a51c 2248
e8e0619f
TH
2249 dev = &ap->device[i];
2250
e1211e3f 2251 if (!ata_dev_enabled(dev))
a6d5a51c
TH
2252 continue;
2253
3373efd8 2254 ata_dev_xfermask(dev);
1da177e4 2255
acf356b1
TH
2256 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2257 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2258 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2259 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 2260
4f65977d 2261 found = 1;
5444a6f4
AC
2262 if (dev->dma_mode)
2263 used_dma = 1;
a6d5a51c 2264 }
4f65977d 2265 if (!found)
e82cbdb9 2266 goto out;
a6d5a51c
TH
2267
2268 /* step 2: always set host PIO timings */
e8e0619f
TH
2269 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2270 dev = &ap->device[i];
2271 if (!ata_dev_enabled(dev))
2272 continue;
2273
2274 if (!dev->pio_mode) {
f15a1daf 2275 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 2276 rc = -EINVAL;
e82cbdb9 2277 goto out;
e8e0619f
TH
2278 }
2279
2280 dev->xfer_mode = dev->pio_mode;
2281 dev->xfer_shift = ATA_SHIFT_PIO;
2282 if (ap->ops->set_piomode)
2283 ap->ops->set_piomode(ap, dev);
2284 }
1da177e4 2285
a6d5a51c 2286 /* step 3: set host DMA timings */
e8e0619f
TH
2287 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2288 dev = &ap->device[i];
2289
2290 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2291 continue;
2292
2293 dev->xfer_mode = dev->dma_mode;
2294 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2295 if (ap->ops->set_dmamode)
2296 ap->ops->set_dmamode(ap, dev);
2297 }
1da177e4
LT
2298
2299 /* step 4: update devices' xfer mode */
83206a29 2300 for (i = 0; i < ATA_MAX_DEVICES; i++) {
e8e0619f 2301 dev = &ap->device[i];
1da177e4 2302
02670bf3
TH
2303 /* don't udpate suspended devices' xfer mode */
2304 if (!ata_dev_ready(dev))
83206a29
TH
2305 continue;
2306
3373efd8 2307 rc = ata_dev_set_mode(dev);
5bbc53f4 2308 if (rc)
e82cbdb9 2309 goto out;
83206a29 2310 }
1da177e4 2311
e8e0619f
TH
2312 /* Record simplex status. If we selected DMA then the other
2313 * host channels are not permitted to do so.
5444a6f4 2314 */
cca3974e
JG
2315 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
2316 ap->host->simplex_claimed = 1;
5444a6f4 2317
e8e0619f 2318 /* step5: chip specific finalisation */
1da177e4
LT
2319 if (ap->ops->post_set_mode)
2320 ap->ops->post_set_mode(ap);
2321
e82cbdb9
TH
2322 out:
2323 if (rc)
2324 *r_failed_dev = dev;
2325 return rc;
1da177e4
LT
2326}
2327
1fdffbce
JG
2328/**
2329 * ata_tf_to_host - issue ATA taskfile to host controller
2330 * @ap: port to which command is being issued
2331 * @tf: ATA taskfile register set
2332 *
2333 * Issues ATA taskfile register set to ATA host controller,
2334 * with proper synchronization with interrupt handler and
2335 * other threads.
2336 *
2337 * LOCKING:
cca3974e 2338 * spin_lock_irqsave(host lock)
1fdffbce
JG
2339 */
2340
2341static inline void ata_tf_to_host(struct ata_port *ap,
2342 const struct ata_taskfile *tf)
2343{
2344 ap->ops->tf_load(ap, tf);
2345 ap->ops->exec_command(ap, tf);
2346}
2347
1da177e4
LT
2348/**
2349 * ata_busy_sleep - sleep until BSY clears, or timeout
2350 * @ap: port containing status register to be polled
2351 * @tmout_pat: impatience timeout
2352 * @tmout: overall timeout
2353 *
780a87f7
JG
2354 * Sleep until ATA Status register bit BSY clears,
2355 * or a timeout occurs.
2356 *
d1adc1bb
TH
2357 * LOCKING:
2358 * Kernel thread context (may sleep).
2359 *
2360 * RETURNS:
2361 * 0 on success, -errno otherwise.
1da177e4 2362 */
d1adc1bb
TH
2363int ata_busy_sleep(struct ata_port *ap,
2364 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
2365{
2366 unsigned long timer_start, timeout;
2367 u8 status;
2368
2369 status = ata_busy_wait(ap, ATA_BUSY, 300);
2370 timer_start = jiffies;
2371 timeout = timer_start + tmout_pat;
d1adc1bb
TH
2372 while (status != 0xff && (status & ATA_BUSY) &&
2373 time_before(jiffies, timeout)) {
1da177e4
LT
2374 msleep(50);
2375 status = ata_busy_wait(ap, ATA_BUSY, 3);
2376 }
2377
d1adc1bb 2378 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 2379 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
2380 "port is slow to respond, please be patient "
2381 "(Status 0x%x)\n", status);
1da177e4
LT
2382
2383 timeout = timer_start + tmout;
d1adc1bb
TH
2384 while (status != 0xff && (status & ATA_BUSY) &&
2385 time_before(jiffies, timeout)) {
1da177e4
LT
2386 msleep(50);
2387 status = ata_chk_status(ap);
2388 }
2389
d1adc1bb
TH
2390 if (status == 0xff)
2391 return -ENODEV;
2392
1da177e4 2393 if (status & ATA_BUSY) {
f15a1daf 2394 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
2395 "(%lu secs, Status 0x%x)\n",
2396 tmout / HZ, status);
d1adc1bb 2397 return -EBUSY;
1da177e4
LT
2398 }
2399
2400 return 0;
2401}
2402
2403static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
2404{
2405 struct ata_ioports *ioaddr = &ap->ioaddr;
2406 unsigned int dev0 = devmask & (1 << 0);
2407 unsigned int dev1 = devmask & (1 << 1);
2408 unsigned long timeout;
2409
2410 /* if device 0 was found in ata_devchk, wait for its
2411 * BSY bit to clear
2412 */
2413 if (dev0)
2414 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2415
2416 /* if device 1 was found in ata_devchk, wait for
2417 * register access, then wait for BSY to clear
2418 */
2419 timeout = jiffies + ATA_TMOUT_BOOT;
2420 while (dev1) {
2421 u8 nsect, lbal;
2422
2423 ap->ops->dev_select(ap, 1);
2424 if (ap->flags & ATA_FLAG_MMIO) {
2425 nsect = readb((void __iomem *) ioaddr->nsect_addr);
2426 lbal = readb((void __iomem *) ioaddr->lbal_addr);
2427 } else {
2428 nsect = inb(ioaddr->nsect_addr);
2429 lbal = inb(ioaddr->lbal_addr);
2430 }
2431 if ((nsect == 1) && (lbal == 1))
2432 break;
2433 if (time_after(jiffies, timeout)) {
2434 dev1 = 0;
2435 break;
2436 }
2437 msleep(50); /* give drive a breather */
2438 }
2439 if (dev1)
2440 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2441
2442 /* is all this really necessary? */
2443 ap->ops->dev_select(ap, 0);
2444 if (dev1)
2445 ap->ops->dev_select(ap, 1);
2446 if (dev0)
2447 ap->ops->dev_select(ap, 0);
2448}
2449
1da177e4
LT
2450static unsigned int ata_bus_softreset(struct ata_port *ap,
2451 unsigned int devmask)
2452{
2453 struct ata_ioports *ioaddr = &ap->ioaddr;
2454
2455 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
2456
2457 /* software reset. causes dev0 to be selected */
2458 if (ap->flags & ATA_FLAG_MMIO) {
2459 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2460 udelay(20); /* FIXME: flush */
2461 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
2462 udelay(20); /* FIXME: flush */
2463 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2464 } else {
2465 outb(ap->ctl, ioaddr->ctl_addr);
2466 udelay(10);
2467 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2468 udelay(10);
2469 outb(ap->ctl, ioaddr->ctl_addr);
2470 }
2471
2472 /* spec mandates ">= 2ms" before checking status.
2473 * We wait 150ms, because that was the magic delay used for
2474 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2475 * between when the ATA command register is written, and then
2476 * status is checked. Because waiting for "a while" before
2477 * checking status is fine, post SRST, we perform this magic
2478 * delay here as well.
09c7ad79
AC
2479 *
2480 * Old drivers/ide uses the 2mS rule and then waits for ready
1da177e4
LT
2481 */
2482 msleep(150);
2483
2e9edbf8 2484 /* Before we perform post reset processing we want to see if
298a41ca
TH
2485 * the bus shows 0xFF because the odd clown forgets the D7
2486 * pulldown resistor.
2487 */
d1adc1bb
TH
2488 if (ata_check_status(ap) == 0xFF)
2489 return 0;
09c7ad79 2490
1da177e4
LT
2491 ata_bus_post_reset(ap, devmask);
2492
2493 return 0;
2494}
2495
2496/**
2497 * ata_bus_reset - reset host port and associated ATA channel
2498 * @ap: port to reset
2499 *
2500 * This is typically the first time we actually start issuing
2501 * commands to the ATA channel. We wait for BSY to clear, then
2502 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2503 * result. Determine what devices, if any, are on the channel
2504 * by looking at the device 0/1 error register. Look at the signature
2505 * stored in each device's taskfile registers, to determine if
2506 * the device is ATA or ATAPI.
2507 *
2508 * LOCKING:
0cba632b 2509 * PCI/etc. bus probe sem.
cca3974e 2510 * Obtains host lock.
1da177e4
LT
2511 *
2512 * SIDE EFFECTS:
198e0fed 2513 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
2514 */
2515
2516void ata_bus_reset(struct ata_port *ap)
2517{
2518 struct ata_ioports *ioaddr = &ap->ioaddr;
2519 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2520 u8 err;
aec5c3c1 2521 unsigned int dev0, dev1 = 0, devmask = 0;
1da177e4
LT
2522
2523 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2524
2525 /* determine if device 0/1 are present */
2526 if (ap->flags & ATA_FLAG_SATA_RESET)
2527 dev0 = 1;
2528 else {
2529 dev0 = ata_devchk(ap, 0);
2530 if (slave_possible)
2531 dev1 = ata_devchk(ap, 1);
2532 }
2533
2534 if (dev0)
2535 devmask |= (1 << 0);
2536 if (dev1)
2537 devmask |= (1 << 1);
2538
2539 /* select device 0 again */
2540 ap->ops->dev_select(ap, 0);
2541
2542 /* issue bus reset */
2543 if (ap->flags & ATA_FLAG_SRST)
aec5c3c1
TH
2544 if (ata_bus_softreset(ap, devmask))
2545 goto err_out;
1da177e4
LT
2546
2547 /*
2548 * determine by signature whether we have ATA or ATAPI devices
2549 */
b4dc7623 2550 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
1da177e4 2551 if ((slave_possible) && (err != 0x81))
b4dc7623 2552 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
1da177e4
LT
2553
2554 /* re-enable interrupts */
2555 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2556 ata_irq_on(ap);
2557
2558 /* is double-select really necessary? */
2559 if (ap->device[1].class != ATA_DEV_NONE)
2560 ap->ops->dev_select(ap, 1);
2561 if (ap->device[0].class != ATA_DEV_NONE)
2562 ap->ops->dev_select(ap, 0);
2563
2564 /* if no devices were detected, disable this port */
2565 if ((ap->device[0].class == ATA_DEV_NONE) &&
2566 (ap->device[1].class == ATA_DEV_NONE))
2567 goto err_out;
2568
2569 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2570 /* set up device control for ATA_FLAG_SATA_RESET */
2571 if (ap->flags & ATA_FLAG_MMIO)
2572 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2573 else
2574 outb(ap->ctl, ioaddr->ctl_addr);
2575 }
2576
2577 DPRINTK("EXIT\n");
2578 return;
2579
2580err_out:
f15a1daf 2581 ata_port_printk(ap, KERN_ERR, "disabling port\n");
1da177e4
LT
2582 ap->ops->port_disable(ap);
2583
2584 DPRINTK("EXIT\n");
2585}
2586
d7bb4cc7
TH
2587/**
2588 * sata_phy_debounce - debounce SATA phy status
2589 * @ap: ATA port to debounce SATA phy status for
2590 * @params: timing parameters { interval, duratinon, timeout } in msec
2591 *
2592 * Make sure SStatus of @ap reaches stable state, determined by
2593 * holding the same value where DET is not 1 for @duration polled
2594 * every @interval, before @timeout. Timeout constraints the
2595 * beginning of the stable state. Because, after hot unplugging,
2596 * DET gets stuck at 1 on some controllers, this functions waits
2597 * until timeout then returns 0 if DET is stable at 1.
2598 *
2599 * LOCKING:
2600 * Kernel thread context (may sleep)
2601 *
2602 * RETURNS:
2603 * 0 on success, -errno on failure.
2604 */
2605int sata_phy_debounce(struct ata_port *ap, const unsigned long *params)
7a7921e8 2606{
d7bb4cc7
TH
2607 unsigned long interval_msec = params[0];
2608 unsigned long duration = params[1] * HZ / 1000;
2609 unsigned long timeout = jiffies + params[2] * HZ / 1000;
2610 unsigned long last_jiffies;
2611 u32 last, cur;
2612 int rc;
2613
2614 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2615 return rc;
2616 cur &= 0xf;
2617
2618 last = cur;
2619 last_jiffies = jiffies;
2620
2621 while (1) {
2622 msleep(interval_msec);
2623 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2624 return rc;
2625 cur &= 0xf;
2626
2627 /* DET stable? */
2628 if (cur == last) {
2629 if (cur == 1 && time_before(jiffies, timeout))
2630 continue;
2631 if (time_after(jiffies, last_jiffies + duration))
2632 return 0;
2633 continue;
2634 }
2635
2636 /* unstable, start over */
2637 last = cur;
2638 last_jiffies = jiffies;
2639
2640 /* check timeout */
2641 if (time_after(jiffies, timeout))
2642 return -EBUSY;
2643 }
2644}
2645
2646/**
2647 * sata_phy_resume - resume SATA phy
2648 * @ap: ATA port to resume SATA phy for
2649 * @params: timing parameters { interval, duratinon, timeout } in msec
2650 *
2651 * Resume SATA phy of @ap and debounce it.
2652 *
2653 * LOCKING:
2654 * Kernel thread context (may sleep)
2655 *
2656 * RETURNS:
2657 * 0 on success, -errno on failure.
2658 */
2659int sata_phy_resume(struct ata_port *ap, const unsigned long *params)
2660{
2661 u32 scontrol;
81952c54
TH
2662 int rc;
2663
2664 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2665 return rc;
7a7921e8 2666
852ee16a 2667 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54
TH
2668
2669 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2670 return rc;
7a7921e8 2671
d7bb4cc7
TH
2672 /* Some PHYs react badly if SStatus is pounded immediately
2673 * after resuming. Delay 200ms before debouncing.
2674 */
2675 msleep(200);
7a7921e8 2676
d7bb4cc7 2677 return sata_phy_debounce(ap, params);
7a7921e8
TH
2678}
2679
f5914a46
TH
2680static void ata_wait_spinup(struct ata_port *ap)
2681{
2682 struct ata_eh_context *ehc = &ap->eh_context;
2683 unsigned long end, secs;
2684 int rc;
2685
2686 /* first, debounce phy if SATA */
2687 if (ap->cbl == ATA_CBL_SATA) {
e9c83914 2688 rc = sata_phy_debounce(ap, sata_deb_timing_hotplug);
f5914a46
TH
2689
2690 /* if debounced successfully and offline, no need to wait */
2691 if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap))
2692 return;
2693 }
2694
2695 /* okay, let's give the drive time to spin up */
2696 end = ehc->i.hotplug_timestamp + ATA_SPINUP_WAIT * HZ / 1000;
2697 secs = ((end - jiffies) + HZ - 1) / HZ;
2698
2699 if (time_after(jiffies, end))
2700 return;
2701
2702 if (secs > 5)
2703 ata_port_printk(ap, KERN_INFO, "waiting for device to spin up "
2704 "(%lu secs)\n", secs);
2705
2706 schedule_timeout_uninterruptible(end - jiffies);
2707}
2708
2709/**
2710 * ata_std_prereset - prepare for reset
2711 * @ap: ATA port to be reset
2712 *
2713 * @ap is about to be reset. Initialize it.
2714 *
2715 * LOCKING:
2716 * Kernel thread context (may sleep)
2717 *
2718 * RETURNS:
2719 * 0 on success, -errno otherwise.
2720 */
2721int ata_std_prereset(struct ata_port *ap)
2722{
2723 struct ata_eh_context *ehc = &ap->eh_context;
e9c83914 2724 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
2725 int rc;
2726
28324304
TH
2727 /* handle link resume & hotplug spinup */
2728 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
2729 (ap->flags & ATA_FLAG_HRST_TO_RESUME))
2730 ehc->i.action |= ATA_EH_HARDRESET;
2731
2732 if ((ehc->i.flags & ATA_EHI_HOTPLUGGED) &&
2733 (ap->flags & ATA_FLAG_SKIP_D2H_BSY))
2734 ata_wait_spinup(ap);
f5914a46
TH
2735
2736 /* if we're about to do hardreset, nothing more to do */
2737 if (ehc->i.action & ATA_EH_HARDRESET)
2738 return 0;
2739
2740 /* if SATA, resume phy */
2741 if (ap->cbl == ATA_CBL_SATA) {
f5914a46
TH
2742 rc = sata_phy_resume(ap, timing);
2743 if (rc && rc != -EOPNOTSUPP) {
2744 /* phy resume failed */
2745 ata_port_printk(ap, KERN_WARNING, "failed to resume "
2746 "link for reset (errno=%d)\n", rc);
2747 return rc;
2748 }
2749 }
2750
2751 /* Wait for !BSY if the controller can wait for the first D2H
2752 * Reg FIS and we don't know that no device is attached.
2753 */
2754 if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap))
2755 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2756
2757 return 0;
2758}
2759
c2bd5804
TH
2760/**
2761 * ata_std_softreset - reset host port via ATA SRST
2762 * @ap: port to reset
c2bd5804
TH
2763 * @classes: resulting classes of attached devices
2764 *
52783c5d 2765 * Reset host port using ATA SRST.
c2bd5804
TH
2766 *
2767 * LOCKING:
2768 * Kernel thread context (may sleep)
2769 *
2770 * RETURNS:
2771 * 0 on success, -errno otherwise.
2772 */
2bf2cb26 2773int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
c2bd5804
TH
2774{
2775 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2776 unsigned int devmask = 0, err_mask;
2777 u8 err;
2778
2779 DPRINTK("ENTER\n");
2780
81952c54 2781 if (ata_port_offline(ap)) {
3a39746a
TH
2782 classes[0] = ATA_DEV_NONE;
2783 goto out;
2784 }
2785
c2bd5804
TH
2786 /* determine if device 0/1 are present */
2787 if (ata_devchk(ap, 0))
2788 devmask |= (1 << 0);
2789 if (slave_possible && ata_devchk(ap, 1))
2790 devmask |= (1 << 1);
2791
c2bd5804
TH
2792 /* select device 0 again */
2793 ap->ops->dev_select(ap, 0);
2794
2795 /* issue bus reset */
2796 DPRINTK("about to softreset, devmask=%x\n", devmask);
2797 err_mask = ata_bus_softreset(ap, devmask);
2798 if (err_mask) {
f15a1daf
TH
2799 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
2800 err_mask);
c2bd5804
TH
2801 return -EIO;
2802 }
2803
2804 /* determine by signature whether we have ATA or ATAPI devices */
2805 classes[0] = ata_dev_try_classify(ap, 0, &err);
2806 if (slave_possible && err != 0x81)
2807 classes[1] = ata_dev_try_classify(ap, 1, &err);
2808
3a39746a 2809 out:
c2bd5804
TH
2810 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2811 return 0;
2812}
2813
2814/**
b6103f6d 2815 * sata_port_hardreset - reset port via SATA phy reset
c2bd5804 2816 * @ap: port to reset
b6103f6d 2817 * @timing: timing parameters { interval, duratinon, timeout } in msec
c2bd5804
TH
2818 *
2819 * SATA phy-reset host port using DET bits of SControl register.
c2bd5804
TH
2820 *
2821 * LOCKING:
2822 * Kernel thread context (may sleep)
2823 *
2824 * RETURNS:
2825 * 0 on success, -errno otherwise.
2826 */
b6103f6d 2827int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing)
c2bd5804 2828{
852ee16a 2829 u32 scontrol;
81952c54 2830 int rc;
852ee16a 2831
c2bd5804
TH
2832 DPRINTK("ENTER\n");
2833
3c567b7d 2834 if (sata_set_spd_needed(ap)) {
1c3fae4d
TH
2835 /* SATA spec says nothing about how to reconfigure
2836 * spd. To be on the safe side, turn off phy during
2837 * reconfiguration. This works for at least ICH7 AHCI
2838 * and Sil3124.
2839 */
81952c54 2840 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
b6103f6d 2841 goto out;
81952c54 2842
a34b6fc0 2843 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54
TH
2844
2845 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
b6103f6d 2846 goto out;
1c3fae4d 2847
3c567b7d 2848 sata_set_spd(ap);
1c3fae4d
TH
2849 }
2850
2851 /* issue phy wake/reset */
81952c54 2852 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
b6103f6d 2853 goto out;
81952c54 2854
852ee16a 2855 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54
TH
2856
2857 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
b6103f6d 2858 goto out;
c2bd5804 2859
1c3fae4d 2860 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
2861 * 10.4.2 says at least 1 ms.
2862 */
2863 msleep(1);
2864
1c3fae4d 2865 /* bring phy back */
b6103f6d
TH
2866 rc = sata_phy_resume(ap, timing);
2867 out:
2868 DPRINTK("EXIT, rc=%d\n", rc);
2869 return rc;
2870}
2871
2872/**
2873 * sata_std_hardreset - reset host port via SATA phy reset
2874 * @ap: port to reset
2875 * @class: resulting class of attached device
2876 *
2877 * SATA phy-reset host port using DET bits of SControl register,
2878 * wait for !BSY and classify the attached device.
2879 *
2880 * LOCKING:
2881 * Kernel thread context (may sleep)
2882 *
2883 * RETURNS:
2884 * 0 on success, -errno otherwise.
2885 */
2886int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
2887{
2888 const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context);
2889 int rc;
2890
2891 DPRINTK("ENTER\n");
2892
2893 /* do hardreset */
2894 rc = sata_port_hardreset(ap, timing);
2895 if (rc) {
2896 ata_port_printk(ap, KERN_ERR,
2897 "COMRESET failed (errno=%d)\n", rc);
2898 return rc;
2899 }
c2bd5804 2900
c2bd5804 2901 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 2902 if (ata_port_offline(ap)) {
c2bd5804
TH
2903 *class = ATA_DEV_NONE;
2904 DPRINTK("EXIT, link offline\n");
2905 return 0;
2906 }
2907
2908 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
f15a1daf
TH
2909 ata_port_printk(ap, KERN_ERR,
2910 "COMRESET failed (device not ready)\n");
c2bd5804
TH
2911 return -EIO;
2912 }
2913
3a39746a
TH
2914 ap->ops->dev_select(ap, 0); /* probably unnecessary */
2915
c2bd5804
TH
2916 *class = ata_dev_try_classify(ap, 0, NULL);
2917
2918 DPRINTK("EXIT, class=%u\n", *class);
2919 return 0;
2920}
2921
2922/**
2923 * ata_std_postreset - standard postreset callback
2924 * @ap: the target ata_port
2925 * @classes: classes of attached devices
2926 *
2927 * This function is invoked after a successful reset. Note that
2928 * the device might have been reset more than once using
2929 * different reset methods before postreset is invoked.
c2bd5804 2930 *
c2bd5804
TH
2931 * LOCKING:
2932 * Kernel thread context (may sleep)
2933 */
2934void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
2935{
dc2b3515
TH
2936 u32 serror;
2937
c2bd5804
TH
2938 DPRINTK("ENTER\n");
2939
c2bd5804 2940 /* print link status */
81952c54 2941 sata_print_link_status(ap);
c2bd5804 2942
dc2b3515
TH
2943 /* clear SError */
2944 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
2945 sata_scr_write(ap, SCR_ERROR, serror);
2946
3a39746a 2947 /* re-enable interrupts */
e3180499
TH
2948 if (!ap->ops->error_handler) {
2949 /* FIXME: hack. create a hook instead */
2950 if (ap->ioaddr.ctl_addr)
2951 ata_irq_on(ap);
2952 }
c2bd5804
TH
2953
2954 /* is double-select really necessary? */
2955 if (classes[0] != ATA_DEV_NONE)
2956 ap->ops->dev_select(ap, 1);
2957 if (classes[1] != ATA_DEV_NONE)
2958 ap->ops->dev_select(ap, 0);
2959
3a39746a
TH
2960 /* bail out if no device is present */
2961 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
2962 DPRINTK("EXIT, no device\n");
2963 return;
2964 }
2965
2966 /* set up device control */
2967 if (ap->ioaddr.ctl_addr) {
2968 if (ap->flags & ATA_FLAG_MMIO)
2969 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
2970 else
2971 outb(ap->ctl, ap->ioaddr.ctl_addr);
2972 }
c2bd5804
TH
2973
2974 DPRINTK("EXIT\n");
2975}
2976
623a3128
TH
2977/**
2978 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
2979 * @dev: device to compare against
2980 * @new_class: class of the new device
2981 * @new_id: IDENTIFY page of the new device
2982 *
2983 * Compare @new_class and @new_id against @dev and determine
2984 * whether @dev is the device indicated by @new_class and
2985 * @new_id.
2986 *
2987 * LOCKING:
2988 * None.
2989 *
2990 * RETURNS:
2991 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
2992 */
3373efd8
TH
2993static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
2994 const u16 *new_id)
623a3128
TH
2995{
2996 const u16 *old_id = dev->id;
2997 unsigned char model[2][41], serial[2][21];
2998 u64 new_n_sectors;
2999
3000 if (dev->class != new_class) {
f15a1daf
TH
3001 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3002 dev->class, new_class);
623a3128
TH
3003 return 0;
3004 }
3005
3006 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
3007 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
3008 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
3009 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
3010 new_n_sectors = ata_id_n_sectors(new_id);
3011
3012 if (strcmp(model[0], model[1])) {
f15a1daf
TH
3013 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3014 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
3015 return 0;
3016 }
3017
3018 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
3019 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3020 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
3021 return 0;
3022 }
3023
3024 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
f15a1daf
TH
3025 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3026 "%llu != %llu\n",
3027 (unsigned long long)dev->n_sectors,
3028 (unsigned long long)new_n_sectors);
623a3128
TH
3029 return 0;
3030 }
3031
3032 return 1;
3033}
3034
3035/**
3036 * ata_dev_revalidate - Revalidate ATA device
623a3128 3037 * @dev: device to revalidate
bff04647 3038 * @readid_flags: read ID flags
623a3128
TH
3039 *
3040 * Re-read IDENTIFY page and make sure @dev is still attached to
3041 * the port.
3042 *
3043 * LOCKING:
3044 * Kernel thread context (may sleep)
3045 *
3046 * RETURNS:
3047 * 0 on success, negative errno otherwise
3048 */
bff04647 3049int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
623a3128 3050{
5eb45c02 3051 unsigned int class = dev->class;
f15a1daf 3052 u16 *id = (void *)dev->ap->sector_buf;
623a3128
TH
3053 int rc;
3054
5eb45c02
TH
3055 if (!ata_dev_enabled(dev)) {
3056 rc = -ENODEV;
3057 goto fail;
3058 }
623a3128 3059
fe635c7e 3060 /* read ID data */
bff04647 3061 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128
TH
3062 if (rc)
3063 goto fail;
3064
3065 /* is the device still there? */
3373efd8 3066 if (!ata_dev_same_device(dev, class, id)) {
623a3128
TH
3067 rc = -ENODEV;
3068 goto fail;
3069 }
3070
fe635c7e 3071 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
623a3128
TH
3072
3073 /* configure device according to the new ID */
efdaedc4 3074 rc = ata_dev_configure(dev);
5eb45c02
TH
3075 if (rc == 0)
3076 return 0;
623a3128
TH
3077
3078 fail:
f15a1daf 3079 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
3080 return rc;
3081}
3082
6919a0a6
AC
3083struct ata_blacklist_entry {
3084 const char *model_num;
3085 const char *model_rev;
3086 unsigned long horkage;
3087};
3088
3089static const struct ata_blacklist_entry ata_device_blacklist [] = {
3090 /* Devices with DMA related problems under Linux */
3091 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3092 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3093 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3094 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3095 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3096 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3097 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3098 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3099 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3100 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3101 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3102 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3103 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3104 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3105 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3106 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3107 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3108 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3109 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3110 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3111 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3112 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3113 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3114 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3115 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3116 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
3117 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3118 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3119 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3120 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
3121
3122 /* Devices we expect to fail diagnostics */
3123
3124 /* Devices where NCQ should be avoided */
3125 /* NCQ is slow */
3126 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
3127
3128 /* Devices with NCQ limits */
3129
3130 /* End Marker */
3131 { }
1da177e4 3132};
2e9edbf8 3133
f4b15fef
AC
3134static int ata_strim(char *s, size_t len)
3135{
3136 len = strnlen(s, len);
3137
3138 /* ATAPI specifies that empty space is blank-filled; remove blanks */
3139 while ((len > 0) && (s[len - 1] == ' ')) {
3140 len--;
3141 s[len] = 0;
3142 }
3143 return len;
3144}
1da177e4 3145
6919a0a6 3146unsigned long ata_device_blacklisted(const struct ata_device *dev)
1da177e4 3147{
f4b15fef
AC
3148 unsigned char model_num[40];
3149 unsigned char model_rev[16];
3150 unsigned int nlen, rlen;
6919a0a6 3151 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 3152
f4b15fef
AC
3153 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
3154 sizeof(model_num));
3155 ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
3156 sizeof(model_rev));
3157 nlen = ata_strim(model_num, sizeof(model_num));
3158 rlen = ata_strim(model_rev, sizeof(model_rev));
1da177e4 3159
6919a0a6
AC
3160 while (ad->model_num) {
3161 if (!strncmp(ad->model_num, model_num, nlen)) {
3162 if (ad->model_rev == NULL)
3163 return ad->horkage;
3164 if (!strncmp(ad->model_rev, model_rev, rlen))
3165 return ad->horkage;
f4b15fef 3166 }
6919a0a6 3167 ad++;
f4b15fef 3168 }
1da177e4
LT
3169 return 0;
3170}
3171
6919a0a6
AC
3172static int ata_dma_blacklisted(const struct ata_device *dev)
3173{
3174 /* We don't support polling DMA.
3175 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3176 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3177 */
3178 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
3179 (dev->flags & ATA_DFLAG_CDB_INTR))
3180 return 1;
3181 return (ata_device_blacklisted(dev) & ATA_HORKAGE_NODMA) ? 1 : 0;
3182}
3183
a6d5a51c
TH
3184/**
3185 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
3186 * @dev: Device to compute xfermask for
3187 *
acf356b1
TH
3188 * Compute supported xfermask of @dev and store it in
3189 * dev->*_mask. This function is responsible for applying all
3190 * known limits including host controller limits, device
3191 * blacklist, etc...
a6d5a51c
TH
3192 *
3193 * LOCKING:
3194 * None.
a6d5a51c 3195 */
3373efd8 3196static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 3197{
3373efd8 3198 struct ata_port *ap = dev->ap;
cca3974e 3199 struct ata_host *host = ap->host;
a6d5a51c 3200 unsigned long xfer_mask;
1da177e4 3201
37deecb5 3202 /* controller modes available */
565083e1
TH
3203 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3204 ap->mwdma_mask, ap->udma_mask);
3205
3206 /* Apply cable rule here. Don't apply it early because when
3207 * we handle hot plug the cable type can itself change.
3208 */
3209 if (ap->cbl == ATA_CBL_PATA40)
3210 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
fc085150
AC
3211 /* Apply drive side cable rule. Unknown or 80 pin cables reported
3212 * host side are checked drive side as well. Cases where we know a
3213 * 40wire cable is used safely for 80 are not checked here.
3214 */
3215 if (ata_drive_40wire(dev->id) && (ap->cbl == ATA_CBL_PATA_UNK || ap->cbl == ATA_CBL_PATA80))
3216 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3217
1da177e4 3218
37deecb5
TH
3219 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3220 dev->mwdma_mask, dev->udma_mask);
3221 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 3222
b352e57d
AC
3223 /*
3224 * CFA Advanced TrueIDE timings are not allowed on a shared
3225 * cable
3226 */
3227 if (ata_dev_pair(dev)) {
3228 /* No PIO5 or PIO6 */
3229 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3230 /* No MWDMA3 or MWDMA 4 */
3231 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3232 }
3233
37deecb5
TH
3234 if (ata_dma_blacklisted(dev)) {
3235 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
3236 ata_dev_printk(dev, KERN_WARNING,
3237 "device is on DMA blacklist, disabling DMA\n");
37deecb5 3238 }
a6d5a51c 3239
cca3974e 3240 if ((host->flags & ATA_HOST_SIMPLEX) && host->simplex_claimed) {
37deecb5
TH
3241 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3242 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3243 "other device, disabling DMA\n");
5444a6f4 3244 }
565083e1 3245
5444a6f4
AC
3246 if (ap->ops->mode_filter)
3247 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
3248
565083e1
TH
3249 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3250 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
3251}
3252
1da177e4
LT
3253/**
3254 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
3255 * @dev: Device to which command will be sent
3256 *
780a87f7
JG
3257 * Issue SET FEATURES - XFER MODE command to device @dev
3258 * on port @ap.
3259 *
1da177e4 3260 * LOCKING:
0cba632b 3261 * PCI/etc. bus probe sem.
83206a29
TH
3262 *
3263 * RETURNS:
3264 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
3265 */
3266
3373efd8 3267static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 3268{
a0123703 3269 struct ata_taskfile tf;
83206a29 3270 unsigned int err_mask;
1da177e4
LT
3271
3272 /* set up set-features taskfile */
3273 DPRINTK("set features - xfer mode\n");
3274
3373efd8 3275 ata_tf_init(dev, &tf);
a0123703
TH
3276 tf.command = ATA_CMD_SET_FEATURES;
3277 tf.feature = SETFEATURES_XFER;
3278 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3279 tf.protocol = ATA_PROT_NODATA;
3280 tf.nsect = dev->xfer_mode;
1da177e4 3281
3373efd8 3282 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1da177e4 3283
83206a29
TH
3284 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3285 return err_mask;
1da177e4
LT
3286}
3287
8bf62ece
AL
3288/**
3289 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 3290 * @dev: Device to which command will be sent
e2a7f77a
RD
3291 * @heads: Number of heads (taskfile parameter)
3292 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
3293 *
3294 * LOCKING:
6aff8f1f
TH
3295 * Kernel thread context (may sleep)
3296 *
3297 * RETURNS:
3298 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 3299 */
3373efd8
TH
3300static unsigned int ata_dev_init_params(struct ata_device *dev,
3301 u16 heads, u16 sectors)
8bf62ece 3302{
a0123703 3303 struct ata_taskfile tf;
6aff8f1f 3304 unsigned int err_mask;
8bf62ece
AL
3305
3306 /* Number of sectors per track 1-255. Number of heads 1-16 */
3307 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 3308 return AC_ERR_INVALID;
8bf62ece
AL
3309
3310 /* set up init dev params taskfile */
3311 DPRINTK("init dev params \n");
3312
3373efd8 3313 ata_tf_init(dev, &tf);
a0123703
TH
3314 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3315 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3316 tf.protocol = ATA_PROT_NODATA;
3317 tf.nsect = sectors;
3318 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 3319
3373efd8 3320 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
8bf62ece 3321
6aff8f1f
TH
3322 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3323 return err_mask;
8bf62ece
AL
3324}
3325
1da177e4 3326/**
0cba632b
JG
3327 * ata_sg_clean - Unmap DMA memory associated with command
3328 * @qc: Command containing DMA memory to be released
3329 *
3330 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
3331 *
3332 * LOCKING:
cca3974e 3333 * spin_lock_irqsave(host lock)
1da177e4
LT
3334 */
3335
3336static void ata_sg_clean(struct ata_queued_cmd *qc)
3337{
3338 struct ata_port *ap = qc->ap;
cedc9a47 3339 struct scatterlist *sg = qc->__sg;
1da177e4 3340 int dir = qc->dma_dir;
cedc9a47 3341 void *pad_buf = NULL;
1da177e4 3342
a4631474
TH
3343 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
3344 WARN_ON(sg == NULL);
1da177e4
LT
3345
3346 if (qc->flags & ATA_QCFLAG_SINGLE)
f131883e 3347 WARN_ON(qc->n_elem > 1);
1da177e4 3348
2c13b7ce 3349 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 3350
cedc9a47
JG
3351 /* if we padded the buffer out to 32-bit bound, and data
3352 * xfer direction is from-device, we must copy from the
3353 * pad buffer back into the supplied buffer
3354 */
3355 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
3356 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3357
3358 if (qc->flags & ATA_QCFLAG_SG) {
e1410f2d 3359 if (qc->n_elem)
2f1f610b 3360 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
cedc9a47
JG
3361 /* restore last sg */
3362 sg[qc->orig_n_elem - 1].length += qc->pad_len;
3363 if (pad_buf) {
3364 struct scatterlist *psg = &qc->pad_sgent;
3365 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3366 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
dfa15988 3367 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
3368 }
3369 } else {
2e242fa9 3370 if (qc->n_elem)
2f1f610b 3371 dma_unmap_single(ap->dev,
e1410f2d
JG
3372 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
3373 dir);
cedc9a47
JG
3374 /* restore sg */
3375 sg->length += qc->pad_len;
3376 if (pad_buf)
3377 memcpy(qc->buf_virt + sg->length - qc->pad_len,
3378 pad_buf, qc->pad_len);
3379 }
1da177e4
LT
3380
3381 qc->flags &= ~ATA_QCFLAG_DMAMAP;
cedc9a47 3382 qc->__sg = NULL;
1da177e4
LT
3383}
3384
3385/**
3386 * ata_fill_sg - Fill PCI IDE PRD table
3387 * @qc: Metadata associated with taskfile to be transferred
3388 *
780a87f7
JG
3389 * Fill PCI IDE PRD (scatter-gather) table with segments
3390 * associated with the current disk command.
3391 *
1da177e4 3392 * LOCKING:
cca3974e 3393 * spin_lock_irqsave(host lock)
1da177e4
LT
3394 *
3395 */
3396static void ata_fill_sg(struct ata_queued_cmd *qc)
3397{
1da177e4 3398 struct ata_port *ap = qc->ap;
cedc9a47
JG
3399 struct scatterlist *sg;
3400 unsigned int idx;
1da177e4 3401
a4631474 3402 WARN_ON(qc->__sg == NULL);
f131883e 3403 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1da177e4
LT
3404
3405 idx = 0;
cedc9a47 3406 ata_for_each_sg(sg, qc) {
1da177e4
LT
3407 u32 addr, offset;
3408 u32 sg_len, len;
3409
3410 /* determine if physical DMA addr spans 64K boundary.
3411 * Note h/w doesn't support 64-bit, so we unconditionally
3412 * truncate dma_addr_t to u32.
3413 */
3414 addr = (u32) sg_dma_address(sg);
3415 sg_len = sg_dma_len(sg);
3416
3417 while (sg_len) {
3418 offset = addr & 0xffff;
3419 len = sg_len;
3420 if ((offset + sg_len) > 0x10000)
3421 len = 0x10000 - offset;
3422
3423 ap->prd[idx].addr = cpu_to_le32(addr);
3424 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
3425 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
3426
3427 idx++;
3428 sg_len -= len;
3429 addr += len;
3430 }
3431 }
3432
3433 if (idx)
3434 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
3435}
3436/**
3437 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
3438 * @qc: Metadata associated with taskfile to check
3439 *
780a87f7
JG
3440 * Allow low-level driver to filter ATA PACKET commands, returning
3441 * a status indicating whether or not it is OK to use DMA for the
3442 * supplied PACKET command.
3443 *
1da177e4 3444 * LOCKING:
cca3974e 3445 * spin_lock_irqsave(host lock)
0cba632b 3446 *
1da177e4
LT
3447 * RETURNS: 0 when ATAPI DMA can be used
3448 * nonzero otherwise
3449 */
3450int ata_check_atapi_dma(struct ata_queued_cmd *qc)
3451{
3452 struct ata_port *ap = qc->ap;
3453 int rc = 0; /* Assume ATAPI DMA is OK by default */
3454
3455 if (ap->ops->check_atapi_dma)
3456 rc = ap->ops->check_atapi_dma(qc);
3457
3458 return rc;
3459}
3460/**
3461 * ata_qc_prep - Prepare taskfile for submission
3462 * @qc: Metadata associated with taskfile to be prepared
3463 *
780a87f7
JG
3464 * Prepare ATA taskfile for submission.
3465 *
1da177e4 3466 * LOCKING:
cca3974e 3467 * spin_lock_irqsave(host lock)
1da177e4
LT
3468 */
3469void ata_qc_prep(struct ata_queued_cmd *qc)
3470{
3471 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
3472 return;
3473
3474 ata_fill_sg(qc);
3475}
3476
e46834cd
BK
3477void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
3478
0cba632b
JG
3479/**
3480 * ata_sg_init_one - Associate command with memory buffer
3481 * @qc: Command to be associated
3482 * @buf: Memory buffer
3483 * @buflen: Length of memory buffer, in bytes.
3484 *
3485 * Initialize the data-related elements of queued_cmd @qc
3486 * to point to a single memory buffer, @buf of byte length @buflen.
3487 *
3488 * LOCKING:
cca3974e 3489 * spin_lock_irqsave(host lock)
0cba632b
JG
3490 */
3491
1da177e4
LT
3492void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
3493{
1da177e4
LT
3494 qc->flags |= ATA_QCFLAG_SINGLE;
3495
cedc9a47 3496 qc->__sg = &qc->sgent;
1da177e4 3497 qc->n_elem = 1;
cedc9a47 3498 qc->orig_n_elem = 1;
1da177e4 3499 qc->buf_virt = buf;
233277ca 3500 qc->nbytes = buflen;
1da177e4 3501
61c0596c 3502 sg_init_one(&qc->sgent, buf, buflen);
1da177e4
LT
3503}
3504
0cba632b
JG
3505/**
3506 * ata_sg_init - Associate command with scatter-gather table.
3507 * @qc: Command to be associated
3508 * @sg: Scatter-gather table.
3509 * @n_elem: Number of elements in s/g table.
3510 *
3511 * Initialize the data-related elements of queued_cmd @qc
3512 * to point to a scatter-gather table @sg, containing @n_elem
3513 * elements.
3514 *
3515 * LOCKING:
cca3974e 3516 * spin_lock_irqsave(host lock)
0cba632b
JG
3517 */
3518
1da177e4
LT
3519void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
3520 unsigned int n_elem)
3521{
3522 qc->flags |= ATA_QCFLAG_SG;
cedc9a47 3523 qc->__sg = sg;
1da177e4 3524 qc->n_elem = n_elem;
cedc9a47 3525 qc->orig_n_elem = n_elem;
1da177e4
LT
3526}
3527
3528/**
0cba632b
JG
3529 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
3530 * @qc: Command with memory buffer to be mapped.
3531 *
3532 * DMA-map the memory buffer associated with queued_cmd @qc.
1da177e4
LT
3533 *
3534 * LOCKING:
cca3974e 3535 * spin_lock_irqsave(host lock)
1da177e4
LT
3536 *
3537 * RETURNS:
0cba632b 3538 * Zero on success, negative on error.
1da177e4
LT
3539 */
3540
3541static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3542{
3543 struct ata_port *ap = qc->ap;
3544 int dir = qc->dma_dir;
cedc9a47 3545 struct scatterlist *sg = qc->__sg;
1da177e4 3546 dma_addr_t dma_address;
2e242fa9 3547 int trim_sg = 0;
1da177e4 3548
cedc9a47
JG
3549 /* we must lengthen transfers to end on a 32-bit boundary */
3550 qc->pad_len = sg->length & 3;
3551 if (qc->pad_len) {
3552 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3553 struct scatterlist *psg = &qc->pad_sgent;
3554
a4631474 3555 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
3556
3557 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3558
3559 if (qc->tf.flags & ATA_TFLAG_WRITE)
3560 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3561 qc->pad_len);
3562
3563 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3564 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3565 /* trim sg */
3566 sg->length -= qc->pad_len;
2e242fa9
TH
3567 if (sg->length == 0)
3568 trim_sg = 1;
cedc9a47
JG
3569
3570 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3571 sg->length, qc->pad_len);
3572 }
3573
2e242fa9
TH
3574 if (trim_sg) {
3575 qc->n_elem--;
e1410f2d
JG
3576 goto skip_map;
3577 }
3578
2f1f610b 3579 dma_address = dma_map_single(ap->dev, qc->buf_virt,
32529e01 3580 sg->length, dir);
537a95d9
TH
3581 if (dma_mapping_error(dma_address)) {
3582 /* restore sg */
3583 sg->length += qc->pad_len;
1da177e4 3584 return -1;
537a95d9 3585 }
1da177e4
LT
3586
3587 sg_dma_address(sg) = dma_address;
32529e01 3588 sg_dma_len(sg) = sg->length;
1da177e4 3589
2e242fa9 3590skip_map:
1da177e4
LT
3591 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3592 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3593
3594 return 0;
3595}
3596
3597/**
0cba632b
JG
3598 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3599 * @qc: Command with scatter-gather table to be mapped.
3600 *
3601 * DMA-map the scatter-gather table associated with queued_cmd @qc.
1da177e4
LT
3602 *
3603 * LOCKING:
cca3974e 3604 * spin_lock_irqsave(host lock)
1da177e4
LT
3605 *
3606 * RETURNS:
0cba632b 3607 * Zero on success, negative on error.
1da177e4
LT
3608 *
3609 */
3610
3611static int ata_sg_setup(struct ata_queued_cmd *qc)
3612{
3613 struct ata_port *ap = qc->ap;
cedc9a47
JG
3614 struct scatterlist *sg = qc->__sg;
3615 struct scatterlist *lsg = &sg[qc->n_elem - 1];
e1410f2d 3616 int n_elem, pre_n_elem, dir, trim_sg = 0;
1da177e4
LT
3617
3618 VPRINTK("ENTER, ata%u\n", ap->id);
a4631474 3619 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
1da177e4 3620
cedc9a47
JG
3621 /* we must lengthen transfers to end on a 32-bit boundary */
3622 qc->pad_len = lsg->length & 3;
3623 if (qc->pad_len) {
3624 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3625 struct scatterlist *psg = &qc->pad_sgent;
3626 unsigned int offset;
3627
a4631474 3628 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
3629
3630 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3631
3632 /*
3633 * psg->page/offset are used to copy to-be-written
3634 * data in this function or read data in ata_sg_clean.
3635 */
3636 offset = lsg->offset + lsg->length - qc->pad_len;
3637 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3638 psg->offset = offset_in_page(offset);
3639
3640 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3641 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3642 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
dfa15988 3643 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
3644 }
3645
3646 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3647 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3648 /* trim last sg */
3649 lsg->length -= qc->pad_len;
e1410f2d
JG
3650 if (lsg->length == 0)
3651 trim_sg = 1;
cedc9a47
JG
3652
3653 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3654 qc->n_elem - 1, lsg->length, qc->pad_len);
3655 }
3656
e1410f2d
JG
3657 pre_n_elem = qc->n_elem;
3658 if (trim_sg && pre_n_elem)
3659 pre_n_elem--;
3660
3661 if (!pre_n_elem) {
3662 n_elem = 0;
3663 goto skip_map;
3664 }
3665
1da177e4 3666 dir = qc->dma_dir;
2f1f610b 3667 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
537a95d9
TH
3668 if (n_elem < 1) {
3669 /* restore last sg */
3670 lsg->length += qc->pad_len;
1da177e4 3671 return -1;
537a95d9 3672 }
1da177e4
LT
3673
3674 DPRINTK("%d sg elements mapped\n", n_elem);
3675
e1410f2d 3676skip_map:
1da177e4
LT
3677 qc->n_elem = n_elem;
3678
3679 return 0;
3680}
3681
0baab86b 3682/**
c893a3ae 3683 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
3684 * @buf: Buffer to swap
3685 * @buf_words: Number of 16-bit words in buffer.
3686 *
3687 * Swap halves of 16-bit words if needed to convert from
3688 * little-endian byte order to native cpu byte order, or
3689 * vice-versa.
3690 *
3691 * LOCKING:
6f0ef4fa 3692 * Inherited from caller.
0baab86b 3693 */
1da177e4
LT
3694void swap_buf_le16(u16 *buf, unsigned int buf_words)
3695{
3696#ifdef __BIG_ENDIAN
3697 unsigned int i;
3698
3699 for (i = 0; i < buf_words; i++)
3700 buf[i] = le16_to_cpu(buf[i]);
3701#endif /* __BIG_ENDIAN */
3702}
3703
6ae4cfb5
AL
3704/**
3705 * ata_mmio_data_xfer - Transfer data by MMIO
bf717b11 3706 * @adev: device for this I/O
6ae4cfb5
AL
3707 * @buf: data buffer
3708 * @buflen: buffer length
344babaa 3709 * @write_data: read/write
6ae4cfb5
AL
3710 *
3711 * Transfer data from/to the device data register by MMIO.
3712 *
3713 * LOCKING:
3714 * Inherited from caller.
6ae4cfb5
AL
3715 */
3716
88574551 3717void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
a6b2c5d4 3718 unsigned int buflen, int write_data)
1da177e4 3719{
a6b2c5d4 3720 struct ata_port *ap = adev->ap;
1da177e4
LT
3721 unsigned int i;
3722 unsigned int words = buflen >> 1;
3723 u16 *buf16 = (u16 *) buf;
3724 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3725
6ae4cfb5 3726 /* Transfer multiple of 2 bytes */
1da177e4
LT
3727 if (write_data) {
3728 for (i = 0; i < words; i++)
3729 writew(le16_to_cpu(buf16[i]), mmio);
3730 } else {
3731 for (i = 0; i < words; i++)
3732 buf16[i] = cpu_to_le16(readw(mmio));
3733 }
6ae4cfb5
AL
3734
3735 /* Transfer trailing 1 byte, if any. */
3736 if (unlikely(buflen & 0x01)) {
3737 u16 align_buf[1] = { 0 };
3738 unsigned char *trailing_buf = buf + buflen - 1;
3739
3740 if (write_data) {
3741 memcpy(align_buf, trailing_buf, 1);
3742 writew(le16_to_cpu(align_buf[0]), mmio);
3743 } else {
3744 align_buf[0] = cpu_to_le16(readw(mmio));
3745 memcpy(trailing_buf, align_buf, 1);
3746 }
3747 }
1da177e4
LT
3748}
3749
6ae4cfb5
AL
3750/**
3751 * ata_pio_data_xfer - Transfer data by PIO
a6b2c5d4 3752 * @adev: device to target
6ae4cfb5
AL
3753 * @buf: data buffer
3754 * @buflen: buffer length
344babaa 3755 * @write_data: read/write
6ae4cfb5
AL
3756 *
3757 * Transfer data from/to the device data register by PIO.
3758 *
3759 * LOCKING:
3760 * Inherited from caller.
6ae4cfb5
AL
3761 */
3762
88574551 3763void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
a6b2c5d4 3764 unsigned int buflen, int write_data)
1da177e4 3765{
a6b2c5d4 3766 struct ata_port *ap = adev->ap;
6ae4cfb5 3767 unsigned int words = buflen >> 1;
1da177e4 3768
6ae4cfb5 3769 /* Transfer multiple of 2 bytes */
1da177e4 3770 if (write_data)
6ae4cfb5 3771 outsw(ap->ioaddr.data_addr, buf, words);
1da177e4 3772 else
6ae4cfb5
AL
3773 insw(ap->ioaddr.data_addr, buf, words);
3774
3775 /* Transfer trailing 1 byte, if any. */
3776 if (unlikely(buflen & 0x01)) {
3777 u16 align_buf[1] = { 0 };
3778 unsigned char *trailing_buf = buf + buflen - 1;
3779
3780 if (write_data) {
3781 memcpy(align_buf, trailing_buf, 1);
3782 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3783 } else {
3784 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
3785 memcpy(trailing_buf, align_buf, 1);
3786 }
3787 }
1da177e4
LT
3788}
3789
75e99585
AC
3790/**
3791 * ata_pio_data_xfer_noirq - Transfer data by PIO
3792 * @adev: device to target
3793 * @buf: data buffer
3794 * @buflen: buffer length
3795 * @write_data: read/write
3796 *
88574551 3797 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
3798 * transfer with interrupts disabled.
3799 *
3800 * LOCKING:
3801 * Inherited from caller.
3802 */
3803
3804void ata_pio_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
3805 unsigned int buflen, int write_data)
3806{
3807 unsigned long flags;
3808 local_irq_save(flags);
3809 ata_pio_data_xfer(adev, buf, buflen, write_data);
3810 local_irq_restore(flags);
3811}
3812
3813
6ae4cfb5
AL
3814/**
3815 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3816 * @qc: Command on going
3817 *
3818 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3819 *
3820 * LOCKING:
3821 * Inherited from caller.
3822 */
3823
1da177e4
LT
3824static void ata_pio_sector(struct ata_queued_cmd *qc)
3825{
3826 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 3827 struct scatterlist *sg = qc->__sg;
1da177e4
LT
3828 struct ata_port *ap = qc->ap;
3829 struct page *page;
3830 unsigned int offset;
3831 unsigned char *buf;
3832
3833 if (qc->cursect == (qc->nsect - 1))
14be71f4 3834 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
3835
3836 page = sg[qc->cursg].page;
3837 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
3838
3839 /* get the current page and offset */
3840 page = nth_page(page, (offset >> PAGE_SHIFT));
3841 offset %= PAGE_SIZE;
3842
1da177e4
LT
3843 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3844
91b8b313
AL
3845 if (PageHighMem(page)) {
3846 unsigned long flags;
3847
a6b2c5d4 3848 /* FIXME: use a bounce buffer */
91b8b313
AL
3849 local_irq_save(flags);
3850 buf = kmap_atomic(page, KM_IRQ0);
083958d3 3851
91b8b313 3852 /* do the actual data transfer */
a6b2c5d4 3853 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
1da177e4 3854
91b8b313
AL
3855 kunmap_atomic(buf, KM_IRQ0);
3856 local_irq_restore(flags);
3857 } else {
3858 buf = page_address(page);
a6b2c5d4 3859 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
91b8b313 3860 }
1da177e4
LT
3861
3862 qc->cursect++;
3863 qc->cursg_ofs++;
3864
32529e01 3865 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
1da177e4
LT
3866 qc->cursg++;
3867 qc->cursg_ofs = 0;
3868 }
1da177e4 3869}
1da177e4 3870
07f6f7d0
AL
3871/**
3872 * ata_pio_sectors - Transfer one or many 512-byte sectors.
3873 * @qc: Command on going
3874 *
c81e29b4 3875 * Transfer one or many ATA_SECT_SIZE of data from/to the
07f6f7d0
AL
3876 * ATA device for the DRQ request.
3877 *
3878 * LOCKING:
3879 * Inherited from caller.
3880 */
1da177e4 3881
07f6f7d0
AL
3882static void ata_pio_sectors(struct ata_queued_cmd *qc)
3883{
3884 if (is_multi_taskfile(&qc->tf)) {
3885 /* READ/WRITE MULTIPLE */
3886 unsigned int nsect;
3887
587005de 3888 WARN_ON(qc->dev->multi_count == 0);
1da177e4 3889
07f6f7d0
AL
3890 nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count);
3891 while (nsect--)
3892 ata_pio_sector(qc);
3893 } else
3894 ata_pio_sector(qc);
3895}
3896
c71c1857
AL
3897/**
3898 * atapi_send_cdb - Write CDB bytes to hardware
3899 * @ap: Port to which ATAPI device is attached.
3900 * @qc: Taskfile currently active
3901 *
3902 * When device has indicated its readiness to accept
3903 * a CDB, this function is called. Send the CDB.
3904 *
3905 * LOCKING:
3906 * caller.
3907 */
3908
3909static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
3910{
3911 /* send SCSI cdb */
3912 DPRINTK("send cdb\n");
db024d53 3913 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 3914
a6b2c5d4 3915 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
3916 ata_altstatus(ap); /* flush */
3917
3918 switch (qc->tf.protocol) {
3919 case ATA_PROT_ATAPI:
3920 ap->hsm_task_state = HSM_ST;
3921 break;
3922 case ATA_PROT_ATAPI_NODATA:
3923 ap->hsm_task_state = HSM_ST_LAST;
3924 break;
3925 case ATA_PROT_ATAPI_DMA:
3926 ap->hsm_task_state = HSM_ST_LAST;
3927 /* initiate bmdma */
3928 ap->ops->bmdma_start(qc);
3929 break;
3930 }
1da177e4
LT
3931}
3932
6ae4cfb5
AL
3933/**
3934 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
3935 * @qc: Command on going
3936 * @bytes: number of bytes
3937 *
3938 * Transfer Transfer data from/to the ATAPI device.
3939 *
3940 * LOCKING:
3941 * Inherited from caller.
3942 *
3943 */
3944
1da177e4
LT
3945static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
3946{
3947 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 3948 struct scatterlist *sg = qc->__sg;
1da177e4
LT
3949 struct ata_port *ap = qc->ap;
3950 struct page *page;
3951 unsigned char *buf;
3952 unsigned int offset, count;
3953
563a6e1f 3954 if (qc->curbytes + bytes >= qc->nbytes)
14be71f4 3955 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
3956
3957next_sg:
563a6e1f 3958 if (unlikely(qc->cursg >= qc->n_elem)) {
7fb6ec28 3959 /*
563a6e1f
AL
3960 * The end of qc->sg is reached and the device expects
3961 * more data to transfer. In order not to overrun qc->sg
3962 * and fulfill length specified in the byte count register,
3963 * - for read case, discard trailing data from the device
3964 * - for write case, padding zero data to the device
3965 */
3966 u16 pad_buf[1] = { 0 };
3967 unsigned int words = bytes >> 1;
3968 unsigned int i;
3969
3970 if (words) /* warning if bytes > 1 */
f15a1daf
TH
3971 ata_dev_printk(qc->dev, KERN_WARNING,
3972 "%u bytes trailing data\n", bytes);
563a6e1f
AL
3973
3974 for (i = 0; i < words; i++)
a6b2c5d4 3975 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
563a6e1f 3976
14be71f4 3977 ap->hsm_task_state = HSM_ST_LAST;
563a6e1f
AL
3978 return;
3979 }
3980
cedc9a47 3981 sg = &qc->__sg[qc->cursg];
1da177e4 3982
1da177e4
LT
3983 page = sg->page;
3984 offset = sg->offset + qc->cursg_ofs;
3985
3986 /* get the current page and offset */
3987 page = nth_page(page, (offset >> PAGE_SHIFT));
3988 offset %= PAGE_SIZE;
3989
6952df03 3990 /* don't overrun current sg */
32529e01 3991 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
3992
3993 /* don't cross page boundaries */
3994 count = min(count, (unsigned int)PAGE_SIZE - offset);
3995
7282aa4b
AL
3996 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3997
91b8b313
AL
3998 if (PageHighMem(page)) {
3999 unsigned long flags;
4000
a6b2c5d4 4001 /* FIXME: use bounce buffer */
91b8b313
AL
4002 local_irq_save(flags);
4003 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4004
91b8b313 4005 /* do the actual data transfer */
a6b2c5d4 4006 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
7282aa4b 4007
91b8b313
AL
4008 kunmap_atomic(buf, KM_IRQ0);
4009 local_irq_restore(flags);
4010 } else {
4011 buf = page_address(page);
a6b2c5d4 4012 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
91b8b313 4013 }
1da177e4
LT
4014
4015 bytes -= count;
4016 qc->curbytes += count;
4017 qc->cursg_ofs += count;
4018
32529e01 4019 if (qc->cursg_ofs == sg->length) {
1da177e4
LT
4020 qc->cursg++;
4021 qc->cursg_ofs = 0;
4022 }
4023
563a6e1f 4024 if (bytes)
1da177e4 4025 goto next_sg;
1da177e4
LT
4026}
4027
6ae4cfb5
AL
4028/**
4029 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
4030 * @qc: Command on going
4031 *
4032 * Transfer Transfer data from/to the ATAPI device.
4033 *
4034 * LOCKING:
4035 * Inherited from caller.
6ae4cfb5
AL
4036 */
4037
1da177e4
LT
4038static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4039{
4040 struct ata_port *ap = qc->ap;
4041 struct ata_device *dev = qc->dev;
4042 unsigned int ireason, bc_lo, bc_hi, bytes;
4043 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4044
eec4c3f3
AL
4045 /* Abuse qc->result_tf for temp storage of intermediate TF
4046 * here to save some kernel stack usage.
4047 * For normal completion, qc->result_tf is not relevant. For
4048 * error, qc->result_tf is later overwritten by ata_qc_complete().
4049 * So, the correctness of qc->result_tf is not affected.
4050 */
4051 ap->ops->tf_read(ap, &qc->result_tf);
4052 ireason = qc->result_tf.nsect;
4053 bc_lo = qc->result_tf.lbam;
4054 bc_hi = qc->result_tf.lbah;
1da177e4
LT
4055 bytes = (bc_hi << 8) | bc_lo;
4056
4057 /* shall be cleared to zero, indicating xfer of data */
4058 if (ireason & (1 << 0))
4059 goto err_out;
4060
4061 /* make sure transfer direction matches expected */
4062 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4063 if (do_write != i_write)
4064 goto err_out;
4065
312f7da2
AL
4066 VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
4067
1da177e4
LT
4068 __atapi_pio_bytes(qc, bytes);
4069
4070 return;
4071
4072err_out:
f15a1daf 4073 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
11a56d24 4074 qc->err_mask |= AC_ERR_HSM;
14be71f4 4075 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
4076}
4077
4078/**
c234fb00
AL
4079 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4080 * @ap: the target ata_port
4081 * @qc: qc on going
1da177e4 4082 *
c234fb00
AL
4083 * RETURNS:
4084 * 1 if ok in workqueue, 0 otherwise.
1da177e4 4085 */
c234fb00
AL
4086
4087static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 4088{
c234fb00
AL
4089 if (qc->tf.flags & ATA_TFLAG_POLLING)
4090 return 1;
1da177e4 4091
c234fb00
AL
4092 if (ap->hsm_task_state == HSM_ST_FIRST) {
4093 if (qc->tf.protocol == ATA_PROT_PIO &&
4094 (qc->tf.flags & ATA_TFLAG_WRITE))
4095 return 1;
1da177e4 4096
c234fb00
AL
4097 if (is_atapi_taskfile(&qc->tf) &&
4098 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4099 return 1;
fe79e683
AL
4100 }
4101
c234fb00
AL
4102 return 0;
4103}
1da177e4 4104
c17ea20d
TH
4105/**
4106 * ata_hsm_qc_complete - finish a qc running on standard HSM
4107 * @qc: Command to complete
4108 * @in_wq: 1 if called from workqueue, 0 otherwise
4109 *
4110 * Finish @qc which is running on standard HSM.
4111 *
4112 * LOCKING:
cca3974e 4113 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
4114 * Otherwise, none on entry and grabs host lock.
4115 */
4116static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4117{
4118 struct ata_port *ap = qc->ap;
4119 unsigned long flags;
4120
4121 if (ap->ops->error_handler) {
4122 if (in_wq) {
ba6a1308 4123 spin_lock_irqsave(ap->lock, flags);
c17ea20d 4124
cca3974e
JG
4125 /* EH might have kicked in while host lock is
4126 * released.
c17ea20d
TH
4127 */
4128 qc = ata_qc_from_tag(ap, qc->tag);
4129 if (qc) {
4130 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
4131 ata_irq_on(ap);
4132 ata_qc_complete(qc);
4133 } else
4134 ata_port_freeze(ap);
4135 }
4136
ba6a1308 4137 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4138 } else {
4139 if (likely(!(qc->err_mask & AC_ERR_HSM)))
4140 ata_qc_complete(qc);
4141 else
4142 ata_port_freeze(ap);
4143 }
4144 } else {
4145 if (in_wq) {
ba6a1308 4146 spin_lock_irqsave(ap->lock, flags);
c17ea20d
TH
4147 ata_irq_on(ap);
4148 ata_qc_complete(qc);
ba6a1308 4149 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4150 } else
4151 ata_qc_complete(qc);
4152 }
1da177e4 4153
c81e29b4 4154 ata_altstatus(ap); /* flush */
c17ea20d
TH
4155}
4156
bb5cb290
AL
4157/**
4158 * ata_hsm_move - move the HSM to the next state.
4159 * @ap: the target ata_port
4160 * @qc: qc on going
4161 * @status: current device status
4162 * @in_wq: 1 if called from workqueue, 0 otherwise
4163 *
4164 * RETURNS:
4165 * 1 when poll next status needed, 0 otherwise.
4166 */
9a1004d0
TH
4167int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4168 u8 status, int in_wq)
e2cec771 4169{
bb5cb290
AL
4170 unsigned long flags = 0;
4171 int poll_next;
4172
6912ccd5
AL
4173 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4174
bb5cb290
AL
4175 /* Make sure ata_qc_issue_prot() does not throw things
4176 * like DMA polling into the workqueue. Notice that
4177 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4178 */
c234fb00 4179 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 4180
e2cec771 4181fsm_start:
999bb6f4
AL
4182 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
4183 ap->id, qc->tf.protocol, ap->hsm_task_state, status);
4184
e2cec771
AL
4185 switch (ap->hsm_task_state) {
4186 case HSM_ST_FIRST:
bb5cb290
AL
4187 /* Send first data block or PACKET CDB */
4188
4189 /* If polling, we will stay in the work queue after
4190 * sending the data. Otherwise, interrupt handler
4191 * takes over after sending the data.
4192 */
4193 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4194
e2cec771 4195 /* check device status */
3655d1d3
AL
4196 if (unlikely((status & ATA_DRQ) == 0)) {
4197 /* handle BSY=0, DRQ=0 as error */
4198 if (likely(status & (ATA_ERR | ATA_DF)))
4199 /* device stops HSM for abort/error */
4200 qc->err_mask |= AC_ERR_DEV;
4201 else
4202 /* HSM violation. Let EH handle this */
4203 qc->err_mask |= AC_ERR_HSM;
4204
14be71f4 4205 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 4206 goto fsm_start;
1da177e4
LT
4207 }
4208
71601958
AL
4209 /* Device should not ask for data transfer (DRQ=1)
4210 * when it finds something wrong.
eee6c32f
AL
4211 * We ignore DRQ here and stop the HSM by
4212 * changing hsm_task_state to HSM_ST_ERR and
4213 * let the EH abort the command or reset the device.
71601958
AL
4214 */
4215 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4216 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4217 ap->id, status);
3655d1d3 4218 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4219 ap->hsm_task_state = HSM_ST_ERR;
4220 goto fsm_start;
71601958 4221 }
1da177e4 4222
bb5cb290
AL
4223 /* Send the CDB (atapi) or the first data block (ata pio out).
4224 * During the state transition, interrupt handler shouldn't
4225 * be invoked before the data transfer is complete and
4226 * hsm_task_state is changed. Hence, the following locking.
4227 */
4228 if (in_wq)
ba6a1308 4229 spin_lock_irqsave(ap->lock, flags);
1da177e4 4230
bb5cb290
AL
4231 if (qc->tf.protocol == ATA_PROT_PIO) {
4232 /* PIO data out protocol.
4233 * send first data block.
4234 */
0565c26d 4235
bb5cb290
AL
4236 /* ata_pio_sectors() might change the state
4237 * to HSM_ST_LAST. so, the state is changed here
4238 * before ata_pio_sectors().
4239 */
4240 ap->hsm_task_state = HSM_ST;
4241 ata_pio_sectors(qc);
4242 ata_altstatus(ap); /* flush */
4243 } else
4244 /* send CDB */
4245 atapi_send_cdb(ap, qc);
4246
4247 if (in_wq)
ba6a1308 4248 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
4249
4250 /* if polling, ata_pio_task() handles the rest.
4251 * otherwise, interrupt handler takes over from here.
4252 */
e2cec771 4253 break;
1c848984 4254
e2cec771
AL
4255 case HSM_ST:
4256 /* complete command or read/write the data register */
4257 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4258 /* ATAPI PIO protocol */
4259 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
4260 /* No more data to transfer or device error.
4261 * Device error will be tagged in HSM_ST_LAST.
4262 */
e2cec771
AL
4263 ap->hsm_task_state = HSM_ST_LAST;
4264 goto fsm_start;
4265 }
1da177e4 4266
71601958
AL
4267 /* Device should not ask for data transfer (DRQ=1)
4268 * when it finds something wrong.
eee6c32f
AL
4269 * We ignore DRQ here and stop the HSM by
4270 * changing hsm_task_state to HSM_ST_ERR and
4271 * let the EH abort the command or reset the device.
71601958
AL
4272 */
4273 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4274 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4275 ap->id, status);
3655d1d3 4276 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4277 ap->hsm_task_state = HSM_ST_ERR;
4278 goto fsm_start;
71601958 4279 }
1da177e4 4280
e2cec771 4281 atapi_pio_bytes(qc);
7fb6ec28 4282
e2cec771
AL
4283 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4284 /* bad ireason reported by device */
4285 goto fsm_start;
1da177e4 4286
e2cec771
AL
4287 } else {
4288 /* ATA PIO protocol */
4289 if (unlikely((status & ATA_DRQ) == 0)) {
4290 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
4291 if (likely(status & (ATA_ERR | ATA_DF)))
4292 /* device stops HSM for abort/error */
4293 qc->err_mask |= AC_ERR_DEV;
4294 else
55a8e2c8
TH
4295 /* HSM violation. Let EH handle this.
4296 * Phantom devices also trigger this
4297 * condition. Mark hint.
4298 */
4299 qc->err_mask |= AC_ERR_HSM |
4300 AC_ERR_NODEV_HINT;
3655d1d3 4301
e2cec771
AL
4302 ap->hsm_task_state = HSM_ST_ERR;
4303 goto fsm_start;
4304 }
1da177e4 4305
eee6c32f
AL
4306 /* For PIO reads, some devices may ask for
4307 * data transfer (DRQ=1) alone with ERR=1.
4308 * We respect DRQ here and transfer one
4309 * block of junk data before changing the
4310 * hsm_task_state to HSM_ST_ERR.
4311 *
4312 * For PIO writes, ERR=1 DRQ=1 doesn't make
4313 * sense since the data block has been
4314 * transferred to the device.
71601958
AL
4315 */
4316 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
4317 /* data might be corrputed */
4318 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
4319
4320 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4321 ata_pio_sectors(qc);
4322 ata_altstatus(ap);
4323 status = ata_wait_idle(ap);
4324 }
4325
3655d1d3
AL
4326 if (status & (ATA_BUSY | ATA_DRQ))
4327 qc->err_mask |= AC_ERR_HSM;
4328
eee6c32f
AL
4329 /* ata_pio_sectors() might change the
4330 * state to HSM_ST_LAST. so, the state
4331 * is changed after ata_pio_sectors().
4332 */
4333 ap->hsm_task_state = HSM_ST_ERR;
4334 goto fsm_start;
71601958
AL
4335 }
4336
e2cec771
AL
4337 ata_pio_sectors(qc);
4338
4339 if (ap->hsm_task_state == HSM_ST_LAST &&
4340 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4341 /* all data read */
4342 ata_altstatus(ap);
52a32205 4343 status = ata_wait_idle(ap);
e2cec771
AL
4344 goto fsm_start;
4345 }
4346 }
4347
4348 ata_altstatus(ap); /* flush */
bb5cb290 4349 poll_next = 1;
1da177e4
LT
4350 break;
4351
14be71f4 4352 case HSM_ST_LAST:
6912ccd5
AL
4353 if (unlikely(!ata_ok(status))) {
4354 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
4355 ap->hsm_task_state = HSM_ST_ERR;
4356 goto fsm_start;
4357 }
4358
4359 /* no more data to transfer */
4332a771
AL
4360 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
4361 ap->id, qc->dev->devno, status);
e2cec771 4362
6912ccd5
AL
4363 WARN_ON(qc->err_mask);
4364
e2cec771 4365 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 4366
e2cec771 4367 /* complete taskfile transaction */
c17ea20d 4368 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
4369
4370 poll_next = 0;
1da177e4
LT
4371 break;
4372
14be71f4 4373 case HSM_ST_ERR:
e2cec771
AL
4374 /* make sure qc->err_mask is available to
4375 * know what's wrong and recover
4376 */
4377 WARN_ON(qc->err_mask == 0);
4378
4379 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 4380
999bb6f4 4381 /* complete taskfile transaction */
c17ea20d 4382 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
4383
4384 poll_next = 0;
e2cec771
AL
4385 break;
4386 default:
bb5cb290 4387 poll_next = 0;
6912ccd5 4388 BUG();
1da177e4
LT
4389 }
4390
bb5cb290 4391 return poll_next;
1da177e4
LT
4392}
4393
1da177e4 4394static void ata_pio_task(void *_data)
8061f5f0 4395{
c91af2c8
TH
4396 struct ata_queued_cmd *qc = _data;
4397 struct ata_port *ap = qc->ap;
8061f5f0 4398 u8 status;
a1af3734 4399 int poll_next;
8061f5f0 4400
7fb6ec28 4401fsm_start:
a1af3734 4402 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 4403
a1af3734
AL
4404 /*
4405 * This is purely heuristic. This is a fast path.
4406 * Sometimes when we enter, BSY will be cleared in
4407 * a chk-status or two. If not, the drive is probably seeking
4408 * or something. Snooze for a couple msecs, then
4409 * chk-status again. If still busy, queue delayed work.
4410 */
4411 status = ata_busy_wait(ap, ATA_BUSY, 5);
4412 if (status & ATA_BUSY) {
4413 msleep(2);
4414 status = ata_busy_wait(ap, ATA_BUSY, 10);
4415 if (status & ATA_BUSY) {
31ce6dae 4416 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
a1af3734
AL
4417 return;
4418 }
8061f5f0
TH
4419 }
4420
a1af3734
AL
4421 /* move the HSM */
4422 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 4423
a1af3734
AL
4424 /* another command or interrupt handler
4425 * may be running at this point.
4426 */
4427 if (poll_next)
7fb6ec28 4428 goto fsm_start;
8061f5f0
TH
4429}
4430
1da177e4
LT
4431/**
4432 * ata_qc_new - Request an available ATA command, for queueing
4433 * @ap: Port associated with device @dev
4434 * @dev: Device from whom we request an available command structure
4435 *
4436 * LOCKING:
0cba632b 4437 * None.
1da177e4
LT
4438 */
4439
4440static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4441{
4442 struct ata_queued_cmd *qc = NULL;
4443 unsigned int i;
4444
e3180499 4445 /* no command while frozen */
b51e9e5d 4446 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
4447 return NULL;
4448
2ab7db1f
TH
4449 /* the last tag is reserved for internal command. */
4450 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 4451 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 4452 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
4453 break;
4454 }
4455
4456 if (qc)
4457 qc->tag = i;
4458
4459 return qc;
4460}
4461
4462/**
4463 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
4464 * @dev: Device from whom we request an available command structure
4465 *
4466 * LOCKING:
0cba632b 4467 * None.
1da177e4
LT
4468 */
4469
3373efd8 4470struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 4471{
3373efd8 4472 struct ata_port *ap = dev->ap;
1da177e4
LT
4473 struct ata_queued_cmd *qc;
4474
4475 qc = ata_qc_new(ap);
4476 if (qc) {
1da177e4
LT
4477 qc->scsicmd = NULL;
4478 qc->ap = ap;
4479 qc->dev = dev;
1da177e4 4480
2c13b7ce 4481 ata_qc_reinit(qc);
1da177e4
LT
4482 }
4483
4484 return qc;
4485}
4486
1da177e4
LT
4487/**
4488 * ata_qc_free - free unused ata_queued_cmd
4489 * @qc: Command to complete
4490 *
4491 * Designed to free unused ata_queued_cmd object
4492 * in case something prevents using it.
4493 *
4494 * LOCKING:
cca3974e 4495 * spin_lock_irqsave(host lock)
1da177e4
LT
4496 */
4497void ata_qc_free(struct ata_queued_cmd *qc)
4498{
4ba946e9
TH
4499 struct ata_port *ap = qc->ap;
4500 unsigned int tag;
4501
a4631474 4502 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 4503
4ba946e9
TH
4504 qc->flags = 0;
4505 tag = qc->tag;
4506 if (likely(ata_tag_valid(tag))) {
4ba946e9 4507 qc->tag = ATA_TAG_POISON;
6cec4a39 4508 clear_bit(tag, &ap->qc_allocated);
4ba946e9 4509 }
1da177e4
LT
4510}
4511
76014427 4512void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 4513{
dedaf2b0
TH
4514 struct ata_port *ap = qc->ap;
4515
a4631474
TH
4516 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4517 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
4518
4519 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4520 ata_sg_clean(qc);
4521
7401abf2 4522 /* command should be marked inactive atomically with qc completion */
dedaf2b0
TH
4523 if (qc->tf.protocol == ATA_PROT_NCQ)
4524 ap->sactive &= ~(1 << qc->tag);
4525 else
4526 ap->active_tag = ATA_TAG_POISON;
7401abf2 4527
3f3791d3
AL
4528 /* atapi: mark qc as inactive to prevent the interrupt handler
4529 * from completing the command twice later, before the error handler
4530 * is called. (when rc != 0 and atapi request sense is needed)
4531 */
4532 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 4533 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 4534
1da177e4 4535 /* call completion callback */
77853bf2 4536 qc->complete_fn(qc);
1da177e4
LT
4537}
4538
39599a53
TH
4539static void fill_result_tf(struct ata_queued_cmd *qc)
4540{
4541 struct ata_port *ap = qc->ap;
4542
4543 ap->ops->tf_read(ap, &qc->result_tf);
4544 qc->result_tf.flags = qc->tf.flags;
4545}
4546
f686bcb8
TH
4547/**
4548 * ata_qc_complete - Complete an active ATA command
4549 * @qc: Command to complete
4550 * @err_mask: ATA Status register contents
4551 *
4552 * Indicate to the mid and upper layers that an ATA
4553 * command has completed, with either an ok or not-ok status.
4554 *
4555 * LOCKING:
cca3974e 4556 * spin_lock_irqsave(host lock)
f686bcb8
TH
4557 */
4558void ata_qc_complete(struct ata_queued_cmd *qc)
4559{
4560 struct ata_port *ap = qc->ap;
4561
4562 /* XXX: New EH and old EH use different mechanisms to
4563 * synchronize EH with regular execution path.
4564 *
4565 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4566 * Normal execution path is responsible for not accessing a
4567 * failed qc. libata core enforces the rule by returning NULL
4568 * from ata_qc_from_tag() for failed qcs.
4569 *
4570 * Old EH depends on ata_qc_complete() nullifying completion
4571 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4572 * not synchronize with interrupt handler. Only PIO task is
4573 * taken care of.
4574 */
4575 if (ap->ops->error_handler) {
b51e9e5d 4576 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
4577
4578 if (unlikely(qc->err_mask))
4579 qc->flags |= ATA_QCFLAG_FAILED;
4580
4581 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4582 if (!ata_tag_internal(qc->tag)) {
4583 /* always fill result TF for failed qc */
39599a53 4584 fill_result_tf(qc);
f686bcb8
TH
4585 ata_qc_schedule_eh(qc);
4586 return;
4587 }
4588 }
4589
4590 /* read result TF if requested */
4591 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4592 fill_result_tf(qc);
f686bcb8
TH
4593
4594 __ata_qc_complete(qc);
4595 } else {
4596 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4597 return;
4598
4599 /* read result TF if failed or requested */
4600 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4601 fill_result_tf(qc);
f686bcb8
TH
4602
4603 __ata_qc_complete(qc);
4604 }
4605}
4606
dedaf2b0
TH
4607/**
4608 * ata_qc_complete_multiple - Complete multiple qcs successfully
4609 * @ap: port in question
4610 * @qc_active: new qc_active mask
4611 * @finish_qc: LLDD callback invoked before completing a qc
4612 *
4613 * Complete in-flight commands. This functions is meant to be
4614 * called from low-level driver's interrupt routine to complete
4615 * requests normally. ap->qc_active and @qc_active is compared
4616 * and commands are completed accordingly.
4617 *
4618 * LOCKING:
cca3974e 4619 * spin_lock_irqsave(host lock)
dedaf2b0
TH
4620 *
4621 * RETURNS:
4622 * Number of completed commands on success, -errno otherwise.
4623 */
4624int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
4625 void (*finish_qc)(struct ata_queued_cmd *))
4626{
4627 int nr_done = 0;
4628 u32 done_mask;
4629 int i;
4630
4631 done_mask = ap->qc_active ^ qc_active;
4632
4633 if (unlikely(done_mask & qc_active)) {
4634 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4635 "(%08x->%08x)\n", ap->qc_active, qc_active);
4636 return -EINVAL;
4637 }
4638
4639 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4640 struct ata_queued_cmd *qc;
4641
4642 if (!(done_mask & (1 << i)))
4643 continue;
4644
4645 if ((qc = ata_qc_from_tag(ap, i))) {
4646 if (finish_qc)
4647 finish_qc(qc);
4648 ata_qc_complete(qc);
4649 nr_done++;
4650 }
4651 }
4652
4653 return nr_done;
4654}
4655
1da177e4
LT
4656static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4657{
4658 struct ata_port *ap = qc->ap;
4659
4660 switch (qc->tf.protocol) {
3dc1d881 4661 case ATA_PROT_NCQ:
1da177e4
LT
4662 case ATA_PROT_DMA:
4663 case ATA_PROT_ATAPI_DMA:
4664 return 1;
4665
4666 case ATA_PROT_ATAPI:
4667 case ATA_PROT_PIO:
1da177e4
LT
4668 if (ap->flags & ATA_FLAG_PIO_DMA)
4669 return 1;
4670
4671 /* fall through */
4672
4673 default:
4674 return 0;
4675 }
4676
4677 /* never reached */
4678}
4679
4680/**
4681 * ata_qc_issue - issue taskfile to device
4682 * @qc: command to issue to device
4683 *
4684 * Prepare an ATA command to submission to device.
4685 * This includes mapping the data into a DMA-able
4686 * area, filling in the S/G table, and finally
4687 * writing the taskfile to hardware, starting the command.
4688 *
4689 * LOCKING:
cca3974e 4690 * spin_lock_irqsave(host lock)
1da177e4 4691 */
8e0e694a 4692void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
4693{
4694 struct ata_port *ap = qc->ap;
4695
dedaf2b0
TH
4696 /* Make sure only one non-NCQ command is outstanding. The
4697 * check is skipped for old EH because it reuses active qc to
4698 * request ATAPI sense.
4699 */
4700 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
4701
4702 if (qc->tf.protocol == ATA_PROT_NCQ) {
4703 WARN_ON(ap->sactive & (1 << qc->tag));
4704 ap->sactive |= 1 << qc->tag;
4705 } else {
4706 WARN_ON(ap->sactive);
4707 ap->active_tag = qc->tag;
4708 }
4709
e4a70e76 4710 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 4711 ap->qc_active |= 1 << qc->tag;
e4a70e76 4712
1da177e4
LT
4713 if (ata_should_dma_map(qc)) {
4714 if (qc->flags & ATA_QCFLAG_SG) {
4715 if (ata_sg_setup(qc))
8e436af9 4716 goto sg_err;
1da177e4
LT
4717 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4718 if (ata_sg_setup_one(qc))
8e436af9 4719 goto sg_err;
1da177e4
LT
4720 }
4721 } else {
4722 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4723 }
4724
4725 ap->ops->qc_prep(qc);
4726
8e0e694a
TH
4727 qc->err_mask |= ap->ops->qc_issue(qc);
4728 if (unlikely(qc->err_mask))
4729 goto err;
4730 return;
1da177e4 4731
8e436af9
TH
4732sg_err:
4733 qc->flags &= ~ATA_QCFLAG_DMAMAP;
8e0e694a
TH
4734 qc->err_mask |= AC_ERR_SYSTEM;
4735err:
4736 ata_qc_complete(qc);
1da177e4
LT
4737}
4738
4739/**
4740 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4741 * @qc: command to issue to device
4742 *
4743 * Using various libata functions and hooks, this function
4744 * starts an ATA command. ATA commands are grouped into
4745 * classes called "protocols", and issuing each type of protocol
4746 * is slightly different.
4747 *
0baab86b
EF
4748 * May be used as the qc_issue() entry in ata_port_operations.
4749 *
1da177e4 4750 * LOCKING:
cca3974e 4751 * spin_lock_irqsave(host lock)
1da177e4
LT
4752 *
4753 * RETURNS:
9a3d9eb0 4754 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
4755 */
4756
9a3d9eb0 4757unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
4758{
4759 struct ata_port *ap = qc->ap;
4760
e50362ec
AL
4761 /* Use polling pio if the LLD doesn't handle
4762 * interrupt driven pio and atapi CDB interrupt.
4763 */
4764 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4765 switch (qc->tf.protocol) {
4766 case ATA_PROT_PIO:
4767 case ATA_PROT_ATAPI:
4768 case ATA_PROT_ATAPI_NODATA:
4769 qc->tf.flags |= ATA_TFLAG_POLLING;
4770 break;
4771 case ATA_PROT_ATAPI_DMA:
4772 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 4773 /* see ata_dma_blacklisted() */
e50362ec
AL
4774 BUG();
4775 break;
4776 default:
4777 break;
4778 }
4779 }
4780
312f7da2 4781 /* select the device */
1da177e4
LT
4782 ata_dev_select(ap, qc->dev->devno, 1, 0);
4783
312f7da2 4784 /* start the command */
1da177e4
LT
4785 switch (qc->tf.protocol) {
4786 case ATA_PROT_NODATA:
312f7da2
AL
4787 if (qc->tf.flags & ATA_TFLAG_POLLING)
4788 ata_qc_set_polling(qc);
4789
e5338254 4790 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
4791 ap->hsm_task_state = HSM_ST_LAST;
4792
4793 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 4794 ata_port_queue_task(ap, ata_pio_task, qc, 0);
312f7da2 4795
1da177e4
LT
4796 break;
4797
4798 case ATA_PROT_DMA:
587005de 4799 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 4800
1da177e4
LT
4801 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4802 ap->ops->bmdma_setup(qc); /* set up bmdma */
4803 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 4804 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4805 break;
4806
312f7da2
AL
4807 case ATA_PROT_PIO:
4808 if (qc->tf.flags & ATA_TFLAG_POLLING)
4809 ata_qc_set_polling(qc);
1da177e4 4810
e5338254 4811 ata_tf_to_host(ap, &qc->tf);
312f7da2 4812
54f00389
AL
4813 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4814 /* PIO data out protocol */
4815 ap->hsm_task_state = HSM_ST_FIRST;
31ce6dae 4816 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
4817
4818 /* always send first data block using
e27486db 4819 * the ata_pio_task() codepath.
54f00389 4820 */
312f7da2 4821 } else {
54f00389
AL
4822 /* PIO data in protocol */
4823 ap->hsm_task_state = HSM_ST;
4824
4825 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 4826 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
4827
4828 /* if polling, ata_pio_task() handles the rest.
4829 * otherwise, interrupt handler takes over from here.
4830 */
312f7da2
AL
4831 }
4832
1da177e4
LT
4833 break;
4834
1da177e4 4835 case ATA_PROT_ATAPI:
1da177e4 4836 case ATA_PROT_ATAPI_NODATA:
312f7da2
AL
4837 if (qc->tf.flags & ATA_TFLAG_POLLING)
4838 ata_qc_set_polling(qc);
4839
e5338254 4840 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 4841
312f7da2
AL
4842 ap->hsm_task_state = HSM_ST_FIRST;
4843
4844 /* send cdb by polling if no cdb interrupt */
4845 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
4846 (qc->tf.flags & ATA_TFLAG_POLLING))
31ce6dae 4847 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
4848 break;
4849
4850 case ATA_PROT_ATAPI_DMA:
587005de 4851 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 4852
1da177e4
LT
4853 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4854 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
4855 ap->hsm_task_state = HSM_ST_FIRST;
4856
4857 /* send cdb by polling if no cdb interrupt */
4858 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
31ce6dae 4859 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
4860 break;
4861
4862 default:
4863 WARN_ON(1);
9a3d9eb0 4864 return AC_ERR_SYSTEM;
1da177e4
LT
4865 }
4866
4867 return 0;
4868}
4869
1da177e4
LT
4870/**
4871 * ata_host_intr - Handle host interrupt for given (port, task)
4872 * @ap: Port on which interrupt arrived (possibly...)
4873 * @qc: Taskfile currently active in engine
4874 *
4875 * Handle host interrupt for given queued command. Currently,
4876 * only DMA interrupts are handled. All other commands are
4877 * handled via polling with interrupts disabled (nIEN bit).
4878 *
4879 * LOCKING:
cca3974e 4880 * spin_lock_irqsave(host lock)
1da177e4
LT
4881 *
4882 * RETURNS:
4883 * One if interrupt was handled, zero if not (shared irq).
4884 */
4885
4886inline unsigned int ata_host_intr (struct ata_port *ap,
4887 struct ata_queued_cmd *qc)
4888{
312f7da2 4889 u8 status, host_stat = 0;
1da177e4 4890
312f7da2
AL
4891 VPRINTK("ata%u: protocol %d task_state %d\n",
4892 ap->id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 4893
312f7da2
AL
4894 /* Check whether we are expecting interrupt in this state */
4895 switch (ap->hsm_task_state) {
4896 case HSM_ST_FIRST:
6912ccd5
AL
4897 /* Some pre-ATAPI-4 devices assert INTRQ
4898 * at this state when ready to receive CDB.
4899 */
1da177e4 4900
312f7da2
AL
4901 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
4902 * The flag was turned on only for atapi devices.
4903 * No need to check is_atapi_taskfile(&qc->tf) again.
4904 */
4905 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 4906 goto idle_irq;
1da177e4 4907 break;
312f7da2
AL
4908 case HSM_ST_LAST:
4909 if (qc->tf.protocol == ATA_PROT_DMA ||
4910 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
4911 /* check status of DMA engine */
4912 host_stat = ap->ops->bmdma_status(ap);
4913 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
4914
4915 /* if it's not our irq... */
4916 if (!(host_stat & ATA_DMA_INTR))
4917 goto idle_irq;
4918
4919 /* before we do anything else, clear DMA-Start bit */
4920 ap->ops->bmdma_stop(qc);
a4f16610
AL
4921
4922 if (unlikely(host_stat & ATA_DMA_ERR)) {
4923 /* error when transfering data to/from memory */
4924 qc->err_mask |= AC_ERR_HOST_BUS;
4925 ap->hsm_task_state = HSM_ST_ERR;
4926 }
312f7da2
AL
4927 }
4928 break;
4929 case HSM_ST:
4930 break;
1da177e4
LT
4931 default:
4932 goto idle_irq;
4933 }
4934
312f7da2
AL
4935 /* check altstatus */
4936 status = ata_altstatus(ap);
4937 if (status & ATA_BUSY)
4938 goto idle_irq;
1da177e4 4939
312f7da2
AL
4940 /* check main status, clearing INTRQ */
4941 status = ata_chk_status(ap);
4942 if (unlikely(status & ATA_BUSY))
4943 goto idle_irq;
1da177e4 4944
312f7da2
AL
4945 /* ack bmdma irq events */
4946 ap->ops->irq_clear(ap);
1da177e4 4947
bb5cb290 4948 ata_hsm_move(ap, qc, status, 0);
1da177e4
LT
4949 return 1; /* irq handled */
4950
4951idle_irq:
4952 ap->stats.idle_irq++;
4953
4954#ifdef ATA_IRQ_TRAP
4955 if ((ap->stats.idle_irq % 1000) == 0) {
1da177e4 4956 ata_irq_ack(ap, 0); /* debug trap */
f15a1daf 4957 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 4958 return 1;
1da177e4
LT
4959 }
4960#endif
4961 return 0; /* irq not handled */
4962}
4963
4964/**
4965 * ata_interrupt - Default ATA host interrupt handler
0cba632b 4966 * @irq: irq line (unused)
cca3974e 4967 * @dev_instance: pointer to our ata_host information structure
1da177e4 4968 *
0cba632b
JG
4969 * Default interrupt handler for PCI IDE devices. Calls
4970 * ata_host_intr() for each port that is not disabled.
4971 *
1da177e4 4972 * LOCKING:
cca3974e 4973 * Obtains host lock during operation.
1da177e4
LT
4974 *
4975 * RETURNS:
0cba632b 4976 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
4977 */
4978
7d12e780 4979irqreturn_t ata_interrupt (int irq, void *dev_instance)
1da177e4 4980{
cca3974e 4981 struct ata_host *host = dev_instance;
1da177e4
LT
4982 unsigned int i;
4983 unsigned int handled = 0;
4984 unsigned long flags;
4985
4986 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 4987 spin_lock_irqsave(&host->lock, flags);
1da177e4 4988
cca3974e 4989 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
4990 struct ata_port *ap;
4991
cca3974e 4992 ap = host->ports[i];
c1389503 4993 if (ap &&
029f5468 4994 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
4995 struct ata_queued_cmd *qc;
4996
4997 qc = ata_qc_from_tag(ap, ap->active_tag);
312f7da2 4998 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 4999 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
5000 handled |= ata_host_intr(ap, qc);
5001 }
5002 }
5003
cca3974e 5004 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
5005
5006 return IRQ_RETVAL(handled);
5007}
5008
34bf2170
TH
5009/**
5010 * sata_scr_valid - test whether SCRs are accessible
5011 * @ap: ATA port to test SCR accessibility for
5012 *
5013 * Test whether SCRs are accessible for @ap.
5014 *
5015 * LOCKING:
5016 * None.
5017 *
5018 * RETURNS:
5019 * 1 if SCRs are accessible, 0 otherwise.
5020 */
5021int sata_scr_valid(struct ata_port *ap)
5022{
5023 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
5024}
5025
5026/**
5027 * sata_scr_read - read SCR register of the specified port
5028 * @ap: ATA port to read SCR for
5029 * @reg: SCR to read
5030 * @val: Place to store read value
5031 *
5032 * Read SCR register @reg of @ap into *@val. This function is
5033 * guaranteed to succeed if the cable type of the port is SATA
5034 * and the port implements ->scr_read.
5035 *
5036 * LOCKING:
5037 * None.
5038 *
5039 * RETURNS:
5040 * 0 on success, negative errno on failure.
5041 */
5042int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
5043{
5044 if (sata_scr_valid(ap)) {
5045 *val = ap->ops->scr_read(ap, reg);
5046 return 0;
5047 }
5048 return -EOPNOTSUPP;
5049}
5050
5051/**
5052 * sata_scr_write - write SCR register of the specified port
5053 * @ap: ATA port to write SCR for
5054 * @reg: SCR to write
5055 * @val: value to write
5056 *
5057 * Write @val to SCR register @reg of @ap. This function is
5058 * guaranteed to succeed if the cable type of the port is SATA
5059 * and the port implements ->scr_read.
5060 *
5061 * LOCKING:
5062 * None.
5063 *
5064 * RETURNS:
5065 * 0 on success, negative errno on failure.
5066 */
5067int sata_scr_write(struct ata_port *ap, int reg, u32 val)
5068{
5069 if (sata_scr_valid(ap)) {
5070 ap->ops->scr_write(ap, reg, val);
5071 return 0;
5072 }
5073 return -EOPNOTSUPP;
5074}
5075
5076/**
5077 * sata_scr_write_flush - write SCR register of the specified port and flush
5078 * @ap: ATA port to write SCR for
5079 * @reg: SCR to write
5080 * @val: value to write
5081 *
5082 * This function is identical to sata_scr_write() except that this
5083 * function performs flush after writing to the register.
5084 *
5085 * LOCKING:
5086 * None.
5087 *
5088 * RETURNS:
5089 * 0 on success, negative errno on failure.
5090 */
5091int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
5092{
5093 if (sata_scr_valid(ap)) {
5094 ap->ops->scr_write(ap, reg, val);
5095 ap->ops->scr_read(ap, reg);
5096 return 0;
5097 }
5098 return -EOPNOTSUPP;
5099}
5100
5101/**
5102 * ata_port_online - test whether the given port is online
5103 * @ap: ATA port to test
5104 *
5105 * Test whether @ap is online. Note that this function returns 0
5106 * if online status of @ap cannot be obtained, so
5107 * ata_port_online(ap) != !ata_port_offline(ap).
5108 *
5109 * LOCKING:
5110 * None.
5111 *
5112 * RETURNS:
5113 * 1 if the port online status is available and online.
5114 */
5115int ata_port_online(struct ata_port *ap)
5116{
5117 u32 sstatus;
5118
5119 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
5120 return 1;
5121 return 0;
5122}
5123
5124/**
5125 * ata_port_offline - test whether the given port is offline
5126 * @ap: ATA port to test
5127 *
5128 * Test whether @ap is offline. Note that this function returns
5129 * 0 if offline status of @ap cannot be obtained, so
5130 * ata_port_online(ap) != !ata_port_offline(ap).
5131 *
5132 * LOCKING:
5133 * None.
5134 *
5135 * RETURNS:
5136 * 1 if the port offline status is available and offline.
5137 */
5138int ata_port_offline(struct ata_port *ap)
5139{
5140 u32 sstatus;
5141
5142 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
5143 return 1;
5144 return 0;
5145}
0baab86b 5146
77b08fb5 5147int ata_flush_cache(struct ata_device *dev)
9b847548 5148{
977e6b9f 5149 unsigned int err_mask;
9b847548
JA
5150 u8 cmd;
5151
5152 if (!ata_try_flush_cache(dev))
5153 return 0;
5154
6fc49adb 5155 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
5156 cmd = ATA_CMD_FLUSH_EXT;
5157 else
5158 cmd = ATA_CMD_FLUSH;
5159
977e6b9f
TH
5160 err_mask = ata_do_simple_cmd(dev, cmd);
5161 if (err_mask) {
5162 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5163 return -EIO;
5164 }
5165
5166 return 0;
9b847548
JA
5167}
5168
cca3974e
JG
5169static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5170 unsigned int action, unsigned int ehi_flags,
5171 int wait)
500530f6
TH
5172{
5173 unsigned long flags;
5174 int i, rc;
5175
cca3974e
JG
5176 for (i = 0; i < host->n_ports; i++) {
5177 struct ata_port *ap = host->ports[i];
500530f6
TH
5178
5179 /* Previous resume operation might still be in
5180 * progress. Wait for PM_PENDING to clear.
5181 */
5182 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5183 ata_port_wait_eh(ap);
5184 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5185 }
5186
5187 /* request PM ops to EH */
5188 spin_lock_irqsave(ap->lock, flags);
5189
5190 ap->pm_mesg = mesg;
5191 if (wait) {
5192 rc = 0;
5193 ap->pm_result = &rc;
5194 }
5195
5196 ap->pflags |= ATA_PFLAG_PM_PENDING;
5197 ap->eh_info.action |= action;
5198 ap->eh_info.flags |= ehi_flags;
5199
5200 ata_port_schedule_eh(ap);
5201
5202 spin_unlock_irqrestore(ap->lock, flags);
5203
5204 /* wait and check result */
5205 if (wait) {
5206 ata_port_wait_eh(ap);
5207 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5208 if (rc)
5209 return rc;
5210 }
5211 }
5212
5213 return 0;
5214}
5215
5216/**
cca3974e
JG
5217 * ata_host_suspend - suspend host
5218 * @host: host to suspend
500530f6
TH
5219 * @mesg: PM message
5220 *
cca3974e 5221 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
5222 * function requests EH to perform PM operations and waits for EH
5223 * to finish.
5224 *
5225 * LOCKING:
5226 * Kernel thread context (may sleep).
5227 *
5228 * RETURNS:
5229 * 0 on success, -errno on failure.
5230 */
cca3974e 5231int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6
TH
5232{
5233 int i, j, rc;
5234
cca3974e 5235 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
500530f6
TH
5236 if (rc)
5237 goto fail;
5238
5239 /* EH is quiescent now. Fail if we have any ready device.
5240 * This happens if hotplug occurs between completion of device
5241 * suspension and here.
5242 */
cca3974e
JG
5243 for (i = 0; i < host->n_ports; i++) {
5244 struct ata_port *ap = host->ports[i];
500530f6
TH
5245
5246 for (j = 0; j < ATA_MAX_DEVICES; j++) {
5247 struct ata_device *dev = &ap->device[j];
5248
5249 if (ata_dev_ready(dev)) {
5250 ata_port_printk(ap, KERN_WARNING,
5251 "suspend failed, device %d "
5252 "still active\n", dev->devno);
5253 rc = -EBUSY;
5254 goto fail;
5255 }
5256 }
5257 }
5258
cca3974e 5259 host->dev->power.power_state = mesg;
500530f6
TH
5260 return 0;
5261
5262 fail:
cca3974e 5263 ata_host_resume(host);
500530f6
TH
5264 return rc;
5265}
5266
5267/**
cca3974e
JG
5268 * ata_host_resume - resume host
5269 * @host: host to resume
500530f6 5270 *
cca3974e 5271 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
5272 * function requests EH to perform PM operations and returns.
5273 * Note that all resume operations are performed parallely.
5274 *
5275 * LOCKING:
5276 * Kernel thread context (may sleep).
5277 */
cca3974e 5278void ata_host_resume(struct ata_host *host)
500530f6 5279{
cca3974e
JG
5280 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
5281 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5282 host->dev->power.power_state = PMSG_ON;
500530f6
TH
5283}
5284
c893a3ae
RD
5285/**
5286 * ata_port_start - Set port up for dma.
5287 * @ap: Port to initialize
5288 *
5289 * Called just after data structures for each port are
5290 * initialized. Allocates space for PRD table.
5291 *
5292 * May be used as the port_start() entry in ata_port_operations.
5293 *
5294 * LOCKING:
5295 * Inherited from caller.
5296 */
5297
1da177e4
LT
5298int ata_port_start (struct ata_port *ap)
5299{
2f1f610b 5300 struct device *dev = ap->dev;
6037d6bb 5301 int rc;
1da177e4
LT
5302
5303 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
5304 if (!ap->prd)
5305 return -ENOMEM;
5306
6037d6bb
JG
5307 rc = ata_pad_alloc(ap, dev);
5308 if (rc) {
cedc9a47 5309 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
6037d6bb 5310 return rc;
cedc9a47
JG
5311 }
5312
1da177e4
LT
5313 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
5314
5315 return 0;
5316}
5317
0baab86b
EF
5318
5319/**
5320 * ata_port_stop - Undo ata_port_start()
5321 * @ap: Port to shut down
5322 *
5323 * Frees the PRD table.
5324 *
5325 * May be used as the port_stop() entry in ata_port_operations.
5326 *
5327 * LOCKING:
6f0ef4fa 5328 * Inherited from caller.
0baab86b
EF
5329 */
5330
1da177e4
LT
5331void ata_port_stop (struct ata_port *ap)
5332{
2f1f610b 5333 struct device *dev = ap->dev;
1da177e4
LT
5334
5335 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
6037d6bb 5336 ata_pad_free(ap, dev);
1da177e4
LT
5337}
5338
cca3974e 5339void ata_host_stop (struct ata_host *host)
aa8f0dc6 5340{
cca3974e
JG
5341 if (host->mmio_base)
5342 iounmap(host->mmio_base);
aa8f0dc6
JG
5343}
5344
3ef3b43d
TH
5345/**
5346 * ata_dev_init - Initialize an ata_device structure
5347 * @dev: Device structure to initialize
5348 *
5349 * Initialize @dev in preparation for probing.
5350 *
5351 * LOCKING:
5352 * Inherited from caller.
5353 */
5354void ata_dev_init(struct ata_device *dev)
5355{
5356 struct ata_port *ap = dev->ap;
72fa4b74
TH
5357 unsigned long flags;
5358
5a04bf4b
TH
5359 /* SATA spd limit is bound to the first device */
5360 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5361
72fa4b74
TH
5362 /* High bits of dev->flags are used to record warm plug
5363 * requests which occur asynchronously. Synchronize using
cca3974e 5364 * host lock.
72fa4b74 5365 */
ba6a1308 5366 spin_lock_irqsave(ap->lock, flags);
72fa4b74 5367 dev->flags &= ~ATA_DFLAG_INIT_MASK;
ba6a1308 5368 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 5369
72fa4b74
TH
5370 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5371 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
5372 dev->pio_mask = UINT_MAX;
5373 dev->mwdma_mask = UINT_MAX;
5374 dev->udma_mask = UINT_MAX;
5375}
5376
1da177e4 5377/**
155a8a9c 5378 * ata_port_init - Initialize an ata_port structure
1da177e4 5379 * @ap: Structure to initialize
cca3974e 5380 * @host: Collection of hosts to which @ap belongs
1da177e4
LT
5381 * @ent: Probe information provided by low-level driver
5382 * @port_no: Port number associated with this ata_port
5383 *
155a8a9c 5384 * Initialize a new ata_port structure.
0cba632b 5385 *
1da177e4 5386 * LOCKING:
0cba632b 5387 * Inherited from caller.
1da177e4 5388 */
cca3974e 5389void ata_port_init(struct ata_port *ap, struct ata_host *host,
155a8a9c 5390 const struct ata_probe_ent *ent, unsigned int port_no)
1da177e4
LT
5391{
5392 unsigned int i;
5393
cca3974e 5394 ap->lock = &host->lock;
198e0fed 5395 ap->flags = ATA_FLAG_DISABLED;
155a8a9c 5396 ap->id = ata_unique_id++;
1da177e4 5397 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 5398 ap->host = host;
2f1f610b 5399 ap->dev = ent->dev;
1da177e4 5400 ap->port_no = port_no;
fea63e38
TH
5401 if (port_no == 1 && ent->pinfo2) {
5402 ap->pio_mask = ent->pinfo2->pio_mask;
5403 ap->mwdma_mask = ent->pinfo2->mwdma_mask;
5404 ap->udma_mask = ent->pinfo2->udma_mask;
5405 ap->flags |= ent->pinfo2->flags;
5406 ap->ops = ent->pinfo2->port_ops;
5407 } else {
5408 ap->pio_mask = ent->pio_mask;
5409 ap->mwdma_mask = ent->mwdma_mask;
5410 ap->udma_mask = ent->udma_mask;
5411 ap->flags |= ent->port_flags;
5412 ap->ops = ent->port_ops;
5413 }
5a04bf4b 5414 ap->hw_sata_spd_limit = UINT_MAX;
1da177e4
LT
5415 ap->active_tag = ATA_TAG_POISON;
5416 ap->last_ctl = 0xFF;
bd5d825c
BP
5417
5418#if defined(ATA_VERBOSE_DEBUG)
5419 /* turn on all debugging levels */
5420 ap->msg_enable = 0x00FF;
5421#elif defined(ATA_DEBUG)
5422 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 5423#else
0dd4b21f 5424 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 5425#endif
1da177e4 5426
86e45b6b 5427 INIT_WORK(&ap->port_task, NULL, NULL);
580b2102 5428 INIT_WORK(&ap->hotplug_task, ata_scsi_hotplug, ap);
3057ac3c 5429 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan, ap);
a72ec4ce 5430 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 5431 init_waitqueue_head(&ap->eh_wait_q);
1da177e4 5432
838df628
TH
5433 /* set cable type */
5434 ap->cbl = ATA_CBL_NONE;
5435 if (ap->flags & ATA_FLAG_SATA)
5436 ap->cbl = ATA_CBL_SATA;
5437
acf356b1
TH
5438 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5439 struct ata_device *dev = &ap->device[i];
38d87234 5440 dev->ap = ap;
72fa4b74 5441 dev->devno = i;
3ef3b43d 5442 ata_dev_init(dev);
acf356b1 5443 }
1da177e4
LT
5444
5445#ifdef ATA_IRQ_TRAP
5446 ap->stats.unhandled_irq = 1;
5447 ap->stats.idle_irq = 1;
5448#endif
5449
5450 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
5451}
5452
155a8a9c 5453/**
4608c160
TH
5454 * ata_port_init_shost - Initialize SCSI host associated with ATA port
5455 * @ap: ATA port to initialize SCSI host for
5456 * @shost: SCSI host associated with @ap
155a8a9c 5457 *
4608c160 5458 * Initialize SCSI host @shost associated with ATA port @ap.
155a8a9c
BK
5459 *
5460 * LOCKING:
5461 * Inherited from caller.
5462 */
4608c160 5463static void ata_port_init_shost(struct ata_port *ap, struct Scsi_Host *shost)
155a8a9c 5464{
cca3974e 5465 ap->scsi_host = shost;
155a8a9c 5466
4608c160
TH
5467 shost->unique_id = ap->id;
5468 shost->max_id = 16;
5469 shost->max_lun = 1;
5470 shost->max_channel = 1;
5471 shost->max_cmd_len = 12;
155a8a9c
BK
5472}
5473
1da177e4 5474/**
996139f1 5475 * ata_port_add - Attach low-level ATA driver to system
1da177e4 5476 * @ent: Information provided by low-level driver
cca3974e 5477 * @host: Collections of ports to which we add
1da177e4
LT
5478 * @port_no: Port number associated with this host
5479 *
0cba632b
JG
5480 * Attach low-level ATA driver to system.
5481 *
1da177e4 5482 * LOCKING:
0cba632b 5483 * PCI/etc. bus probe sem.
1da177e4
LT
5484 *
5485 * RETURNS:
0cba632b 5486 * New ata_port on success, for NULL on error.
1da177e4 5487 */
996139f1 5488static struct ata_port * ata_port_add(const struct ata_probe_ent *ent,
cca3974e 5489 struct ata_host *host,
1da177e4
LT
5490 unsigned int port_no)
5491{
996139f1 5492 struct Scsi_Host *shost;
1da177e4 5493 struct ata_port *ap;
1da177e4
LT
5494
5495 DPRINTK("ENTER\n");
aec5c3c1 5496
52783c5d 5497 if (!ent->port_ops->error_handler &&
cca3974e 5498 !(ent->port_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
aec5c3c1
TH
5499 printk(KERN_ERR "ata%u: no reset mechanism available\n",
5500 port_no);
5501 return NULL;
5502 }
5503
996139f1
JG
5504 shost = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
5505 if (!shost)
1da177e4
LT
5506 return NULL;
5507
996139f1 5508 shost->transportt = &ata_scsi_transport_template;
30afc84c 5509
996139f1 5510 ap = ata_shost_to_port(shost);
1da177e4 5511
cca3974e 5512 ata_port_init(ap, host, ent, port_no);
996139f1 5513 ata_port_init_shost(ap, shost);
1da177e4 5514
1da177e4 5515 return ap;
1da177e4
LT
5516}
5517
b03732f0 5518/**
cca3974e
JG
5519 * ata_sas_host_init - Initialize a host struct
5520 * @host: host to initialize
5521 * @dev: device host is attached to
5522 * @flags: host flags
5523 * @ops: port_ops
b03732f0
BK
5524 *
5525 * LOCKING:
5526 * PCI/etc. bus probe sem.
5527 *
5528 */
5529
cca3974e
JG
5530void ata_host_init(struct ata_host *host, struct device *dev,
5531 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 5532{
cca3974e
JG
5533 spin_lock_init(&host->lock);
5534 host->dev = dev;
5535 host->flags = flags;
5536 host->ops = ops;
b03732f0
BK
5537}
5538
1da177e4 5539/**
0cba632b
JG
5540 * ata_device_add - Register hardware device with ATA and SCSI layers
5541 * @ent: Probe information describing hardware device to be registered
5542 *
5543 * This function processes the information provided in the probe
5544 * information struct @ent, allocates the necessary ATA and SCSI
5545 * host information structures, initializes them, and registers
5546 * everything with requisite kernel subsystems.
5547 *
5548 * This function requests irqs, probes the ATA bus, and probes
5549 * the SCSI bus.
1da177e4
LT
5550 *
5551 * LOCKING:
0cba632b 5552 * PCI/etc. bus probe sem.
1da177e4
LT
5553 *
5554 * RETURNS:
0cba632b 5555 * Number of ports registered. Zero on error (no ports registered).
1da177e4 5556 */
057ace5e 5557int ata_device_add(const struct ata_probe_ent *ent)
1da177e4 5558{
6d0500df 5559 unsigned int i;
1da177e4 5560 struct device *dev = ent->dev;
cca3974e 5561 struct ata_host *host;
39b07ce6 5562 int rc;
1da177e4
LT
5563
5564 DPRINTK("ENTER\n");
02f076aa
AC
5565
5566 if (ent->irq == 0) {
5567 dev_printk(KERN_ERR, dev, "is not available: No interrupt assigned.\n");
5568 return 0;
5569 }
1da177e4 5570 /* alloc a container for our list of ATA ports (buses) */
cca3974e
JG
5571 host = kzalloc(sizeof(struct ata_host) +
5572 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
5573 if (!host)
1da177e4 5574 return 0;
1da177e4 5575
cca3974e
JG
5576 ata_host_init(host, dev, ent->_host_flags, ent->port_ops);
5577 host->n_ports = ent->n_ports;
5578 host->irq = ent->irq;
5579 host->irq2 = ent->irq2;
5580 host->mmio_base = ent->mmio_base;
5581 host->private_data = ent->private_data;
1da177e4
LT
5582
5583 /* register each port bound to this device */
cca3974e 5584 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5585 struct ata_port *ap;
5586 unsigned long xfer_mode_mask;
2ec7df04 5587 int irq_line = ent->irq;
1da177e4 5588
cca3974e 5589 ap = ata_port_add(ent, host, i);
c38778c3 5590 host->ports[i] = ap;
1da177e4
LT
5591 if (!ap)
5592 goto err_out;
5593
dd5b06c4
TH
5594 /* dummy? */
5595 if (ent->dummy_port_mask & (1 << i)) {
5596 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
5597 ap->ops = &ata_dummy_port_ops;
5598 continue;
5599 }
5600
5601 /* start port */
5602 rc = ap->ops->port_start(ap);
5603 if (rc) {
cca3974e
JG
5604 host->ports[i] = NULL;
5605 scsi_host_put(ap->scsi_host);
dd5b06c4
TH
5606 goto err_out;
5607 }
5608
2ec7df04
AC
5609 /* Report the secondary IRQ for second channel legacy */
5610 if (i == 1 && ent->irq2)
5611 irq_line = ent->irq2;
5612
1da177e4
LT
5613 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
5614 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
5615 (ap->pio_mask << ATA_SHIFT_PIO);
5616
5617 /* print per-port info to dmesg */
f15a1daf 5618 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%lX "
2ec7df04 5619 "ctl 0x%lX bmdma 0x%lX irq %d\n",
f15a1daf
TH
5620 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
5621 ata_mode_string(xfer_mode_mask),
5622 ap->ioaddr.cmd_addr,
5623 ap->ioaddr.ctl_addr,
5624 ap->ioaddr.bmdma_addr,
2ec7df04 5625 irq_line);
1da177e4
LT
5626
5627 ata_chk_status(ap);
cca3974e 5628 host->ops->irq_clear(ap);
e3180499 5629 ata_eh_freeze_port(ap); /* freeze port before requesting IRQ */
1da177e4
LT
5630 }
5631
2ec7df04 5632 /* obtain irq, that may be shared between channels */
39b07ce6 5633 rc = request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
cca3974e 5634 DRV_NAME, host);
39b07ce6
JG
5635 if (rc) {
5636 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5637 ent->irq, rc);
1da177e4 5638 goto err_out;
39b07ce6 5639 }
1da177e4 5640
2ec7df04
AC
5641 /* do we have a second IRQ for the other channel, eg legacy mode */
5642 if (ent->irq2) {
5643 /* We will get weird core code crashes later if this is true
5644 so trap it now */
5645 BUG_ON(ent->irq == ent->irq2);
5646
5647 rc = request_irq(ent->irq2, ent->port_ops->irq_handler, ent->irq_flags,
cca3974e 5648 DRV_NAME, host);
2ec7df04
AC
5649 if (rc) {
5650 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5651 ent->irq2, rc);
5652 goto err_out_free_irq;
5653 }
5654 }
5655
1da177e4
LT
5656 /* perform each probe synchronously */
5657 DPRINTK("probe begin\n");
cca3974e
JG
5658 for (i = 0; i < host->n_ports; i++) {
5659 struct ata_port *ap = host->ports[i];
5a04bf4b 5660 u32 scontrol;
1da177e4
LT
5661 int rc;
5662
5a04bf4b
TH
5663 /* init sata_spd_limit to the current value */
5664 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
5665 int spd = (scontrol >> 4) & 0xf;
5666 ap->hw_sata_spd_limit &= (1 << spd) - 1;
5667 }
5668 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5669
cca3974e 5670 rc = scsi_add_host(ap->scsi_host, dev);
1da177e4 5671 if (rc) {
f15a1daf 5672 ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
1da177e4
LT
5673 /* FIXME: do something useful here */
5674 /* FIXME: handle unconditional calls to
5675 * scsi_scan_host and ata_host_remove, below,
5676 * at the very least
5677 */
5678 }
3e706399 5679
52783c5d 5680 if (ap->ops->error_handler) {
1cdaf534 5681 struct ata_eh_info *ehi = &ap->eh_info;
3e706399
TH
5682 unsigned long flags;
5683
5684 ata_port_probe(ap);
5685
5686 /* kick EH for boot probing */
ba6a1308 5687 spin_lock_irqsave(ap->lock, flags);
3e706399 5688
1cdaf534
TH
5689 ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
5690 ehi->action |= ATA_EH_SOFTRESET;
5691 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
3e706399 5692
b51e9e5d 5693 ap->pflags |= ATA_PFLAG_LOADING;
3e706399
TH
5694 ata_port_schedule_eh(ap);
5695
ba6a1308 5696 spin_unlock_irqrestore(ap->lock, flags);
3e706399
TH
5697
5698 /* wait for EH to finish */
5699 ata_port_wait_eh(ap);
5700 } else {
5701 DPRINTK("ata%u: bus probe begin\n", ap->id);
5702 rc = ata_bus_probe(ap);
5703 DPRINTK("ata%u: bus probe end\n", ap->id);
5704
5705 if (rc) {
5706 /* FIXME: do something useful here?
5707 * Current libata behavior will
5708 * tear down everything when
5709 * the module is removed
5710 * or the h/w is unplugged.
5711 */
5712 }
5713 }
1da177e4
LT
5714 }
5715
5716 /* probes are done, now scan each port's disk(s) */
c893a3ae 5717 DPRINTK("host probe begin\n");
cca3974e
JG
5718 for (i = 0; i < host->n_ports; i++) {
5719 struct ata_port *ap = host->ports[i];
1da177e4 5720
644dd0cc 5721 ata_scsi_scan_host(ap);
1da177e4
LT
5722 }
5723
cca3974e 5724 dev_set_drvdata(dev, host);
1da177e4
LT
5725
5726 VPRINTK("EXIT, returning %u\n", ent->n_ports);
5727 return ent->n_ports; /* success */
5728
2ec7df04 5729err_out_free_irq:
cca3974e 5730 free_irq(ent->irq, host);
1da177e4 5731err_out:
cca3974e
JG
5732 for (i = 0; i < host->n_ports; i++) {
5733 struct ata_port *ap = host->ports[i];
77f3f879
TH
5734 if (ap) {
5735 ap->ops->port_stop(ap);
cca3974e 5736 scsi_host_put(ap->scsi_host);
77f3f879 5737 }
1da177e4 5738 }
6d0500df 5739
cca3974e 5740 kfree(host);
1da177e4
LT
5741 VPRINTK("EXIT, returning 0\n");
5742 return 0;
5743}
5744
720ba126
TH
5745/**
5746 * ata_port_detach - Detach ATA port in prepration of device removal
5747 * @ap: ATA port to be detached
5748 *
5749 * Detach all ATA devices and the associated SCSI devices of @ap;
5750 * then, remove the associated SCSI host. @ap is guaranteed to
5751 * be quiescent on return from this function.
5752 *
5753 * LOCKING:
5754 * Kernel thread context (may sleep).
5755 */
5756void ata_port_detach(struct ata_port *ap)
5757{
5758 unsigned long flags;
5759 int i;
5760
5761 if (!ap->ops->error_handler)
c3cf30a9 5762 goto skip_eh;
720ba126
TH
5763
5764 /* tell EH we're leaving & flush EH */
ba6a1308 5765 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 5766 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 5767 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5768
5769 ata_port_wait_eh(ap);
5770
5771 /* EH is now guaranteed to see UNLOADING, so no new device
5772 * will be attached. Disable all existing devices.
5773 */
ba6a1308 5774 spin_lock_irqsave(ap->lock, flags);
720ba126
TH
5775
5776 for (i = 0; i < ATA_MAX_DEVICES; i++)
5777 ata_dev_disable(&ap->device[i]);
5778
ba6a1308 5779 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5780
5781 /* Final freeze & EH. All in-flight commands are aborted. EH
5782 * will be skipped and retrials will be terminated with bad
5783 * target.
5784 */
ba6a1308 5785 spin_lock_irqsave(ap->lock, flags);
720ba126 5786 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 5787 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5788
5789 ata_port_wait_eh(ap);
5790
5791 /* Flush hotplug task. The sequence is similar to
5792 * ata_port_flush_task().
5793 */
5794 flush_workqueue(ata_aux_wq);
5795 cancel_delayed_work(&ap->hotplug_task);
5796 flush_workqueue(ata_aux_wq);
5797
c3cf30a9 5798 skip_eh:
720ba126 5799 /* remove the associated SCSI host */
cca3974e 5800 scsi_remove_host(ap->scsi_host);
720ba126
TH
5801}
5802
17b14451 5803/**
cca3974e
JG
5804 * ata_host_remove - PCI layer callback for device removal
5805 * @host: ATA host set that was removed
17b14451 5806 *
2e9edbf8 5807 * Unregister all objects associated with this host set. Free those
17b14451
AC
5808 * objects.
5809 *
5810 * LOCKING:
5811 * Inherited from calling layer (may sleep).
5812 */
5813
cca3974e 5814void ata_host_remove(struct ata_host *host)
17b14451 5815{
17b14451
AC
5816 unsigned int i;
5817
cca3974e
JG
5818 for (i = 0; i < host->n_ports; i++)
5819 ata_port_detach(host->ports[i]);
17b14451 5820
cca3974e
JG
5821 free_irq(host->irq, host);
5822 if (host->irq2)
5823 free_irq(host->irq2, host);
17b14451 5824
cca3974e
JG
5825 for (i = 0; i < host->n_ports; i++) {
5826 struct ata_port *ap = host->ports[i];
17b14451 5827
cca3974e 5828 ata_scsi_release(ap->scsi_host);
17b14451
AC
5829
5830 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
5831 struct ata_ioports *ioaddr = &ap->ioaddr;
5832
2ec7df04
AC
5833 /* FIXME: Add -ac IDE pci mods to remove these special cases */
5834 if (ioaddr->cmd_addr == ATA_PRIMARY_CMD)
5835 release_region(ATA_PRIMARY_CMD, 8);
5836 else if (ioaddr->cmd_addr == ATA_SECONDARY_CMD)
5837 release_region(ATA_SECONDARY_CMD, 8);
17b14451
AC
5838 }
5839
cca3974e 5840 scsi_host_put(ap->scsi_host);
17b14451
AC
5841 }
5842
cca3974e
JG
5843 if (host->ops->host_stop)
5844 host->ops->host_stop(host);
17b14451 5845
cca3974e 5846 kfree(host);
17b14451
AC
5847}
5848
1da177e4
LT
5849/**
5850 * ata_scsi_release - SCSI layer callback hook for host unload
4f931374 5851 * @shost: libata host to be unloaded
1da177e4
LT
5852 *
5853 * Performs all duties necessary to shut down a libata port...
5854 * Kill port kthread, disable port, and release resources.
5855 *
5856 * LOCKING:
5857 * Inherited from SCSI layer.
5858 *
5859 * RETURNS:
5860 * One.
5861 */
5862
cca3974e 5863int ata_scsi_release(struct Scsi_Host *shost)
1da177e4 5864{
cca3974e 5865 struct ata_port *ap = ata_shost_to_port(shost);
1da177e4
LT
5866
5867 DPRINTK("ENTER\n");
5868
5869 ap->ops->port_disable(ap);
6543bc07 5870 ap->ops->port_stop(ap);
1da177e4
LT
5871
5872 DPRINTK("EXIT\n");
5873 return 1;
5874}
5875
f6d950e2
BK
5876struct ata_probe_ent *
5877ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
5878{
5879 struct ata_probe_ent *probe_ent;
5880
5881 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
5882 if (!probe_ent) {
5883 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
5884 kobject_name(&(dev->kobj)));
5885 return NULL;
5886 }
5887
5888 INIT_LIST_HEAD(&probe_ent->node);
5889 probe_ent->dev = dev;
5890
5891 probe_ent->sht = port->sht;
cca3974e 5892 probe_ent->port_flags = port->flags;
f6d950e2
BK
5893 probe_ent->pio_mask = port->pio_mask;
5894 probe_ent->mwdma_mask = port->mwdma_mask;
5895 probe_ent->udma_mask = port->udma_mask;
5896 probe_ent->port_ops = port->port_ops;
d639ca94 5897 probe_ent->private_data = port->private_data;
f6d950e2
BK
5898
5899 return probe_ent;
5900}
5901
1da177e4
LT
5902/**
5903 * ata_std_ports - initialize ioaddr with standard port offsets.
5904 * @ioaddr: IO address structure to be initialized
0baab86b
EF
5905 *
5906 * Utility function which initializes data_addr, error_addr,
5907 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
5908 * device_addr, status_addr, and command_addr to standard offsets
5909 * relative to cmd_addr.
5910 *
5911 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 5912 */
0baab86b 5913
1da177e4
LT
5914void ata_std_ports(struct ata_ioports *ioaddr)
5915{
5916 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
5917 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
5918 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
5919 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
5920 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
5921 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
5922 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
5923 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
5924 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
5925 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
5926}
5927
0baab86b 5928
374b1873
JG
5929#ifdef CONFIG_PCI
5930
cca3974e 5931void ata_pci_host_stop (struct ata_host *host)
374b1873 5932{
cca3974e 5933 struct pci_dev *pdev = to_pci_dev(host->dev);
374b1873 5934
cca3974e 5935 pci_iounmap(pdev, host->mmio_base);
374b1873
JG
5936}
5937
1da177e4
LT
5938/**
5939 * ata_pci_remove_one - PCI layer callback for device removal
5940 * @pdev: PCI device that was removed
5941 *
5942 * PCI layer indicates to libata via this hook that
6f0ef4fa 5943 * hot-unplug or module unload event has occurred.
1da177e4
LT
5944 * Handle this by unregistering all objects associated
5945 * with this PCI device. Free those objects. Then finally
5946 * release PCI resources and disable device.
5947 *
5948 * LOCKING:
5949 * Inherited from PCI layer (may sleep).
5950 */
5951
5952void ata_pci_remove_one (struct pci_dev *pdev)
5953{
5954 struct device *dev = pci_dev_to_dev(pdev);
cca3974e 5955 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 5956
cca3974e 5957 ata_host_remove(host);
f0eb62b8 5958
1da177e4
LT
5959 pci_release_regions(pdev);
5960 pci_disable_device(pdev);
5961 dev_set_drvdata(dev, NULL);
5962}
5963
5964/* move to PCI subsystem */
057ace5e 5965int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
5966{
5967 unsigned long tmp = 0;
5968
5969 switch (bits->width) {
5970 case 1: {
5971 u8 tmp8 = 0;
5972 pci_read_config_byte(pdev, bits->reg, &tmp8);
5973 tmp = tmp8;
5974 break;
5975 }
5976 case 2: {
5977 u16 tmp16 = 0;
5978 pci_read_config_word(pdev, bits->reg, &tmp16);
5979 tmp = tmp16;
5980 break;
5981 }
5982 case 4: {
5983 u32 tmp32 = 0;
5984 pci_read_config_dword(pdev, bits->reg, &tmp32);
5985 tmp = tmp32;
5986 break;
5987 }
5988
5989 default:
5990 return -EINVAL;
5991 }
5992
5993 tmp &= bits->mask;
5994
5995 return (tmp == bits->val) ? 1 : 0;
5996}
9b847548 5997
3c5100c1 5998void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
5999{
6000 pci_save_state(pdev);
500530f6 6001
3c5100c1 6002 if (mesg.event == PM_EVENT_SUSPEND) {
500530f6
TH
6003 pci_disable_device(pdev);
6004 pci_set_power_state(pdev, PCI_D3hot);
6005 }
9b847548
JA
6006}
6007
500530f6 6008void ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548
JA
6009{
6010 pci_set_power_state(pdev, PCI_D0);
6011 pci_restore_state(pdev);
6012 pci_enable_device(pdev);
6013 pci_set_master(pdev);
500530f6
TH
6014}
6015
3c5100c1 6016int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 6017{
cca3974e 6018 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
6019 int rc = 0;
6020
cca3974e 6021 rc = ata_host_suspend(host, mesg);
500530f6
TH
6022 if (rc)
6023 return rc;
6024
3c5100c1 6025 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
6026
6027 return 0;
6028}
6029
6030int ata_pci_device_resume(struct pci_dev *pdev)
6031{
cca3974e 6032 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
6033
6034 ata_pci_device_do_resume(pdev);
cca3974e 6035 ata_host_resume(host);
9b847548
JA
6036 return 0;
6037}
1da177e4
LT
6038#endif /* CONFIG_PCI */
6039
6040
1da177e4
LT
6041static int __init ata_init(void)
6042{
a8601e5f 6043 ata_probe_timeout *= HZ;
1da177e4
LT
6044 ata_wq = create_workqueue("ata");
6045 if (!ata_wq)
6046 return -ENOMEM;
6047
453b07ac
TH
6048 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6049 if (!ata_aux_wq) {
6050 destroy_workqueue(ata_wq);
6051 return -ENOMEM;
6052 }
6053
1da177e4
LT
6054 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6055 return 0;
6056}
6057
6058static void __exit ata_exit(void)
6059{
6060 destroy_workqueue(ata_wq);
453b07ac 6061 destroy_workqueue(ata_aux_wq);
1da177e4
LT
6062}
6063
a4625085 6064subsys_initcall(ata_init);
1da177e4
LT
6065module_exit(ata_exit);
6066
67846b30 6067static unsigned long ratelimit_time;
34af946a 6068static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
6069
6070int ata_ratelimit(void)
6071{
6072 int rc;
6073 unsigned long flags;
6074
6075 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6076
6077 if (time_after(jiffies, ratelimit_time)) {
6078 rc = 1;
6079 ratelimit_time = jiffies + (HZ/5);
6080 } else
6081 rc = 0;
6082
6083 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6084
6085 return rc;
6086}
6087
c22daff4
TH
6088/**
6089 * ata_wait_register - wait until register value changes
6090 * @reg: IO-mapped register
6091 * @mask: Mask to apply to read register value
6092 * @val: Wait condition
6093 * @interval_msec: polling interval in milliseconds
6094 * @timeout_msec: timeout in milliseconds
6095 *
6096 * Waiting for some bits of register to change is a common
6097 * operation for ATA controllers. This function reads 32bit LE
6098 * IO-mapped register @reg and tests for the following condition.
6099 *
6100 * (*@reg & mask) != val
6101 *
6102 * If the condition is met, it returns; otherwise, the process is
6103 * repeated after @interval_msec until timeout.
6104 *
6105 * LOCKING:
6106 * Kernel thread context (may sleep)
6107 *
6108 * RETURNS:
6109 * The final register value.
6110 */
6111u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6112 unsigned long interval_msec,
6113 unsigned long timeout_msec)
6114{
6115 unsigned long timeout;
6116 u32 tmp;
6117
6118 tmp = ioread32(reg);
6119
6120 /* Calculate timeout _after_ the first read to make sure
6121 * preceding writes reach the controller before starting to
6122 * eat away the timeout.
6123 */
6124 timeout = jiffies + (timeout_msec * HZ) / 1000;
6125
6126 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6127 msleep(interval_msec);
6128 tmp = ioread32(reg);
6129 }
6130
6131 return tmp;
6132}
6133
dd5b06c4
TH
6134/*
6135 * Dummy port_ops
6136 */
6137static void ata_dummy_noret(struct ata_port *ap) { }
6138static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
6139static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6140
6141static u8 ata_dummy_check_status(struct ata_port *ap)
6142{
6143 return ATA_DRDY;
6144}
6145
6146static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6147{
6148 return AC_ERR_SYSTEM;
6149}
6150
6151const struct ata_port_operations ata_dummy_port_ops = {
6152 .port_disable = ata_port_disable,
6153 .check_status = ata_dummy_check_status,
6154 .check_altstatus = ata_dummy_check_status,
6155 .dev_select = ata_noop_dev_select,
6156 .qc_prep = ata_noop_qc_prep,
6157 .qc_issue = ata_dummy_qc_issue,
6158 .freeze = ata_dummy_noret,
6159 .thaw = ata_dummy_noret,
6160 .error_handler = ata_dummy_noret,
6161 .post_internal_cmd = ata_dummy_qc_noret,
6162 .irq_clear = ata_dummy_noret,
6163 .port_start = ata_dummy_ret0,
6164 .port_stop = ata_dummy_noret,
6165};
6166
1da177e4
LT
6167/*
6168 * libata is essentially a library of internal helper functions for
6169 * low-level ATA host controller drivers. As such, the API/ABI is
6170 * likely to change as new drivers are added and updated.
6171 * Do not depend on ABI/API stability.
6172 */
6173
e9c83914
TH
6174EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6175EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6176EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 6177EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
1da177e4
LT
6178EXPORT_SYMBOL_GPL(ata_std_bios_param);
6179EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 6180EXPORT_SYMBOL_GPL(ata_host_init);
1da177e4 6181EXPORT_SYMBOL_GPL(ata_device_add);
720ba126 6182EXPORT_SYMBOL_GPL(ata_port_detach);
cca3974e 6183EXPORT_SYMBOL_GPL(ata_host_remove);
1da177e4
LT
6184EXPORT_SYMBOL_GPL(ata_sg_init);
6185EXPORT_SYMBOL_GPL(ata_sg_init_one);
9a1004d0 6186EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 6187EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 6188EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 6189EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
6190EXPORT_SYMBOL_GPL(ata_tf_load);
6191EXPORT_SYMBOL_GPL(ata_tf_read);
6192EXPORT_SYMBOL_GPL(ata_noop_dev_select);
6193EXPORT_SYMBOL_GPL(ata_std_dev_select);
6194EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6195EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6196EXPORT_SYMBOL_GPL(ata_check_status);
6197EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
6198EXPORT_SYMBOL_GPL(ata_exec_command);
6199EXPORT_SYMBOL_GPL(ata_port_start);
6200EXPORT_SYMBOL_GPL(ata_port_stop);
aa8f0dc6 6201EXPORT_SYMBOL_GPL(ata_host_stop);
1da177e4 6202EXPORT_SYMBOL_GPL(ata_interrupt);
a6b2c5d4
AC
6203EXPORT_SYMBOL_GPL(ata_mmio_data_xfer);
6204EXPORT_SYMBOL_GPL(ata_pio_data_xfer);
75e99585 6205EXPORT_SYMBOL_GPL(ata_pio_data_xfer_noirq);
1da177e4 6206EXPORT_SYMBOL_GPL(ata_qc_prep);
e46834cd 6207EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
6208EXPORT_SYMBOL_GPL(ata_bmdma_setup);
6209EXPORT_SYMBOL_GPL(ata_bmdma_start);
6210EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
6211EXPORT_SYMBOL_GPL(ata_bmdma_status);
6212EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
6213EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
6214EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
6215EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
6216EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
6217EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 6218EXPORT_SYMBOL_GPL(ata_port_probe);
3c567b7d 6219EXPORT_SYMBOL_GPL(sata_set_spd);
d7bb4cc7
TH
6220EXPORT_SYMBOL_GPL(sata_phy_debounce);
6221EXPORT_SYMBOL_GPL(sata_phy_resume);
1da177e4
LT
6222EXPORT_SYMBOL_GPL(sata_phy_reset);
6223EXPORT_SYMBOL_GPL(__sata_phy_reset);
6224EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 6225EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 6226EXPORT_SYMBOL_GPL(ata_std_softreset);
b6103f6d 6227EXPORT_SYMBOL_GPL(sata_port_hardreset);
c2bd5804
TH
6228EXPORT_SYMBOL_GPL(sata_std_hardreset);
6229EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
6230EXPORT_SYMBOL_GPL(ata_dev_classify);
6231EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 6232EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 6233EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 6234EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 6235EXPORT_SYMBOL_GPL(ata_busy_sleep);
86e45b6b 6236EXPORT_SYMBOL_GPL(ata_port_queue_task);
1da177e4
LT
6237EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6238EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 6239EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 6240EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 6241EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4
LT
6242EXPORT_SYMBOL_GPL(ata_scsi_release);
6243EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
6244EXPORT_SYMBOL_GPL(sata_scr_valid);
6245EXPORT_SYMBOL_GPL(sata_scr_read);
6246EXPORT_SYMBOL_GPL(sata_scr_write);
6247EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6248EXPORT_SYMBOL_GPL(ata_port_online);
6249EXPORT_SYMBOL_GPL(ata_port_offline);
cca3974e
JG
6250EXPORT_SYMBOL_GPL(ata_host_suspend);
6251EXPORT_SYMBOL_GPL(ata_host_resume);
6a62a04d
TH
6252EXPORT_SYMBOL_GPL(ata_id_string);
6253EXPORT_SYMBOL_GPL(ata_id_c_string);
6919a0a6 6254EXPORT_SYMBOL_GPL(ata_device_blacklisted);
1da177e4
LT
6255EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6256
1bc4ccff 6257EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
452503f9
AC
6258EXPORT_SYMBOL_GPL(ata_timing_compute);
6259EXPORT_SYMBOL_GPL(ata_timing_merge);
6260
1da177e4
LT
6261#ifdef CONFIG_PCI
6262EXPORT_SYMBOL_GPL(pci_test_config_bits);
374b1873 6263EXPORT_SYMBOL_GPL(ata_pci_host_stop);
1da177e4
LT
6264EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
6265EXPORT_SYMBOL_GPL(ata_pci_init_one);
6266EXPORT_SYMBOL_GPL(ata_pci_remove_one);
500530f6
TH
6267EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6268EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
6269EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6270EXPORT_SYMBOL_GPL(ata_pci_device_resume);
67951ade
AC
6271EXPORT_SYMBOL_GPL(ata_pci_default_filter);
6272EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 6273#endif /* CONFIG_PCI */
9b847548 6274
9b847548
JA
6275EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
6276EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
ece1d636 6277
ece1d636 6278EXPORT_SYMBOL_GPL(ata_eng_timeout);
7b70fc03
TH
6279EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6280EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499
TH
6281EXPORT_SYMBOL_GPL(ata_port_freeze);
6282EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6283EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
6284EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6285EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 6286EXPORT_SYMBOL_GPL(ata_do_eh);