]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/ata/libata-core.c
[PATCH] sata_promise: new EH conversion, take 2
[net-next-2.6.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
1da177e4
LT
33 */
34
1da177e4
LT
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/mm.h>
41#include <linux/highmem.h>
42#include <linux/spinlock.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/timer.h>
46#include <linux/interrupt.h>
47#include <linux/completion.h>
48#include <linux/suspend.h>
49#include <linux/workqueue.h>
67846b30 50#include <linux/jiffies.h>
378f058c 51#include <linux/scatterlist.h>
1da177e4 52#include <scsi/scsi.h>
193515d5 53#include <scsi/scsi_cmnd.h>
1da177e4
LT
54#include <scsi/scsi_host.h>
55#include <linux/libata.h>
56#include <asm/io.h>
57#include <asm/semaphore.h>
58#include <asm/byteorder.h>
59
60#include "libata.h"
61
d7bb4cc7 62/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
63const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
64const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
65const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 66
3373efd8
TH
67static unsigned int ata_dev_init_params(struct ata_device *dev,
68 u16 heads, u16 sectors);
69static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
70static void ata_dev_xfermask(struct ata_device *dev);
1da177e4
LT
71
72static unsigned int ata_unique_id = 1;
73static struct workqueue_struct *ata_wq;
74
453b07ac
TH
75struct workqueue_struct *ata_aux_wq;
76
418dc1f5 77int atapi_enabled = 1;
1623c81e
JG
78module_param(atapi_enabled, int, 0444);
79MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
80
95de719a
AL
81int atapi_dmadir = 0;
82module_param(atapi_dmadir, int, 0444);
83MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
84
c3c013a2
JG
85int libata_fua = 0;
86module_param_named(fua, libata_fua, int, 0444);
87MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
88
a8601e5f
AM
89static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
90module_param(ata_probe_timeout, int, 0444);
91MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
92
1da177e4
LT
93MODULE_AUTHOR("Jeff Garzik");
94MODULE_DESCRIPTION("Library module for ATA devices");
95MODULE_LICENSE("GPL");
96MODULE_VERSION(DRV_VERSION);
97
0baab86b 98
1da177e4
LT
99/**
100 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
101 * @tf: Taskfile to convert
102 * @fis: Buffer into which data will output
103 * @pmp: Port multiplier port
104 *
105 * Converts a standard ATA taskfile to a Serial ATA
106 * FIS structure (Register - Host to Device).
107 *
108 * LOCKING:
109 * Inherited from caller.
110 */
111
057ace5e 112void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
1da177e4
LT
113{
114 fis[0] = 0x27; /* Register - Host to Device FIS */
115 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
116 bit 7 indicates Command FIS */
117 fis[2] = tf->command;
118 fis[3] = tf->feature;
119
120 fis[4] = tf->lbal;
121 fis[5] = tf->lbam;
122 fis[6] = tf->lbah;
123 fis[7] = tf->device;
124
125 fis[8] = tf->hob_lbal;
126 fis[9] = tf->hob_lbam;
127 fis[10] = tf->hob_lbah;
128 fis[11] = tf->hob_feature;
129
130 fis[12] = tf->nsect;
131 fis[13] = tf->hob_nsect;
132 fis[14] = 0;
133 fis[15] = tf->ctl;
134
135 fis[16] = 0;
136 fis[17] = 0;
137 fis[18] = 0;
138 fis[19] = 0;
139}
140
141/**
142 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
143 * @fis: Buffer from which data will be input
144 * @tf: Taskfile to output
145 *
e12a1be6 146 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
147 *
148 * LOCKING:
149 * Inherited from caller.
150 */
151
057ace5e 152void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
153{
154 tf->command = fis[2]; /* status */
155 tf->feature = fis[3]; /* error */
156
157 tf->lbal = fis[4];
158 tf->lbam = fis[5];
159 tf->lbah = fis[6];
160 tf->device = fis[7];
161
162 tf->hob_lbal = fis[8];
163 tf->hob_lbam = fis[9];
164 tf->hob_lbah = fis[10];
165
166 tf->nsect = fis[12];
167 tf->hob_nsect = fis[13];
168}
169
8cbd6df1
AL
170static const u8 ata_rw_cmds[] = {
171 /* pio multi */
172 ATA_CMD_READ_MULTI,
173 ATA_CMD_WRITE_MULTI,
174 ATA_CMD_READ_MULTI_EXT,
175 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
176 0,
177 0,
178 0,
179 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
180 /* pio */
181 ATA_CMD_PIO_READ,
182 ATA_CMD_PIO_WRITE,
183 ATA_CMD_PIO_READ_EXT,
184 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
185 0,
186 0,
187 0,
188 0,
8cbd6df1
AL
189 /* dma */
190 ATA_CMD_READ,
191 ATA_CMD_WRITE,
192 ATA_CMD_READ_EXT,
9a3dccc4
TH
193 ATA_CMD_WRITE_EXT,
194 0,
195 0,
196 0,
197 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 198};
1da177e4
LT
199
200/**
8cbd6df1 201 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
202 * @tf: command to examine and configure
203 * @dev: device tf belongs to
1da177e4 204 *
2e9edbf8 205 * Examine the device configuration and tf->flags to calculate
8cbd6df1 206 * the proper read/write commands and protocol to use.
1da177e4
LT
207 *
208 * LOCKING:
209 * caller.
210 */
bd056d7e 211static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 212{
9a3dccc4 213 u8 cmd;
1da177e4 214
9a3dccc4 215 int index, fua, lba48, write;
2e9edbf8 216
9a3dccc4 217 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
218 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
219 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 220
8cbd6df1
AL
221 if (dev->flags & ATA_DFLAG_PIO) {
222 tf->protocol = ATA_PROT_PIO;
9a3dccc4 223 index = dev->multi_count ? 0 : 8;
bd056d7e 224 } else if (lba48 && (dev->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
225 /* Unable to use DMA due to host limitation */
226 tf->protocol = ATA_PROT_PIO;
0565c26d 227 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
228 } else {
229 tf->protocol = ATA_PROT_DMA;
9a3dccc4 230 index = 16;
8cbd6df1 231 }
1da177e4 232
9a3dccc4
TH
233 cmd = ata_rw_cmds[index + fua + lba48 + write];
234 if (cmd) {
235 tf->command = cmd;
236 return 0;
237 }
238 return -1;
1da177e4
LT
239}
240
35b649fe
TH
241/**
242 * ata_tf_read_block - Read block address from ATA taskfile
243 * @tf: ATA taskfile of interest
244 * @dev: ATA device @tf belongs to
245 *
246 * LOCKING:
247 * None.
248 *
249 * Read block address from @tf. This function can handle all
250 * three address formats - LBA, LBA48 and CHS. tf->protocol and
251 * flags select the address format to use.
252 *
253 * RETURNS:
254 * Block address read from @tf.
255 */
256u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
257{
258 u64 block = 0;
259
260 if (tf->flags & ATA_TFLAG_LBA) {
261 if (tf->flags & ATA_TFLAG_LBA48) {
262 block |= (u64)tf->hob_lbah << 40;
263 block |= (u64)tf->hob_lbam << 32;
264 block |= tf->hob_lbal << 24;
265 } else
266 block |= (tf->device & 0xf) << 24;
267
268 block |= tf->lbah << 16;
269 block |= tf->lbam << 8;
270 block |= tf->lbal;
271 } else {
272 u32 cyl, head, sect;
273
274 cyl = tf->lbam | (tf->lbah << 8);
275 head = tf->device & 0xf;
276 sect = tf->lbal;
277
278 block = (cyl * dev->heads + head) * dev->sectors + sect;
279 }
280
281 return block;
282}
283
bd056d7e
TH
284/**
285 * ata_build_rw_tf - Build ATA taskfile for given read/write request
286 * @tf: Target ATA taskfile
287 * @dev: ATA device @tf belongs to
288 * @block: Block address
289 * @n_block: Number of blocks
290 * @tf_flags: RW/FUA etc...
291 * @tag: tag
292 *
293 * LOCKING:
294 * None.
295 *
296 * Build ATA taskfile @tf for read/write request described by
297 * @block, @n_block, @tf_flags and @tag on @dev.
298 *
299 * RETURNS:
300 *
301 * 0 on success, -ERANGE if the request is too large for @dev,
302 * -EINVAL if the request is invalid.
303 */
304int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
305 u64 block, u32 n_block, unsigned int tf_flags,
306 unsigned int tag)
307{
308 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
309 tf->flags |= tf_flags;
310
311 if ((dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ_OFF |
70e6ad0c
TH
312 ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ &&
313 likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
314 /* yay, NCQ */
315 if (!lba_48_ok(block, n_block))
316 return -ERANGE;
317
318 tf->protocol = ATA_PROT_NCQ;
319 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
320
321 if (tf->flags & ATA_TFLAG_WRITE)
322 tf->command = ATA_CMD_FPDMA_WRITE;
323 else
324 tf->command = ATA_CMD_FPDMA_READ;
325
326 tf->nsect = tag << 3;
327 tf->hob_feature = (n_block >> 8) & 0xff;
328 tf->feature = n_block & 0xff;
329
330 tf->hob_lbah = (block >> 40) & 0xff;
331 tf->hob_lbam = (block >> 32) & 0xff;
332 tf->hob_lbal = (block >> 24) & 0xff;
333 tf->lbah = (block >> 16) & 0xff;
334 tf->lbam = (block >> 8) & 0xff;
335 tf->lbal = block & 0xff;
336
337 tf->device = 1 << 6;
338 if (tf->flags & ATA_TFLAG_FUA)
339 tf->device |= 1 << 7;
340 } else if (dev->flags & ATA_DFLAG_LBA) {
341 tf->flags |= ATA_TFLAG_LBA;
342
343 if (lba_28_ok(block, n_block)) {
344 /* use LBA28 */
345 tf->device |= (block >> 24) & 0xf;
346 } else if (lba_48_ok(block, n_block)) {
347 if (!(dev->flags & ATA_DFLAG_LBA48))
348 return -ERANGE;
349
350 /* use LBA48 */
351 tf->flags |= ATA_TFLAG_LBA48;
352
353 tf->hob_nsect = (n_block >> 8) & 0xff;
354
355 tf->hob_lbah = (block >> 40) & 0xff;
356 tf->hob_lbam = (block >> 32) & 0xff;
357 tf->hob_lbal = (block >> 24) & 0xff;
358 } else
359 /* request too large even for LBA48 */
360 return -ERANGE;
361
362 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
363 return -EINVAL;
364
365 tf->nsect = n_block & 0xff;
366
367 tf->lbah = (block >> 16) & 0xff;
368 tf->lbam = (block >> 8) & 0xff;
369 tf->lbal = block & 0xff;
370
371 tf->device |= ATA_LBA;
372 } else {
373 /* CHS */
374 u32 sect, head, cyl, track;
375
376 /* The request -may- be too large for CHS addressing. */
377 if (!lba_28_ok(block, n_block))
378 return -ERANGE;
379
380 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
381 return -EINVAL;
382
383 /* Convert LBA to CHS */
384 track = (u32)block / dev->sectors;
385 cyl = track / dev->heads;
386 head = track % dev->heads;
387 sect = (u32)block % dev->sectors + 1;
388
389 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
390 (u32)block, track, cyl, head, sect);
391
392 /* Check whether the converted CHS can fit.
393 Cylinder: 0-65535
394 Head: 0-15
395 Sector: 1-255*/
396 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
397 return -ERANGE;
398
399 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
400 tf->lbal = sect;
401 tf->lbam = cyl;
402 tf->lbah = cyl >> 8;
403 tf->device |= head;
404 }
405
406 return 0;
407}
408
cb95d562
TH
409/**
410 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
411 * @pio_mask: pio_mask
412 * @mwdma_mask: mwdma_mask
413 * @udma_mask: udma_mask
414 *
415 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
416 * unsigned int xfer_mask.
417 *
418 * LOCKING:
419 * None.
420 *
421 * RETURNS:
422 * Packed xfer_mask.
423 */
424static unsigned int ata_pack_xfermask(unsigned int pio_mask,
425 unsigned int mwdma_mask,
426 unsigned int udma_mask)
427{
428 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
429 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
430 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
431}
432
c0489e4e
TH
433/**
434 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
435 * @xfer_mask: xfer_mask to unpack
436 * @pio_mask: resulting pio_mask
437 * @mwdma_mask: resulting mwdma_mask
438 * @udma_mask: resulting udma_mask
439 *
440 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
441 * Any NULL distination masks will be ignored.
442 */
443static void ata_unpack_xfermask(unsigned int xfer_mask,
444 unsigned int *pio_mask,
445 unsigned int *mwdma_mask,
446 unsigned int *udma_mask)
447{
448 if (pio_mask)
449 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
450 if (mwdma_mask)
451 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
452 if (udma_mask)
453 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
454}
455
cb95d562 456static const struct ata_xfer_ent {
be9a50c8 457 int shift, bits;
cb95d562
TH
458 u8 base;
459} ata_xfer_tbl[] = {
460 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
461 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
462 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
463 { -1, },
464};
465
466/**
467 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
468 * @xfer_mask: xfer_mask of interest
469 *
470 * Return matching XFER_* value for @xfer_mask. Only the highest
471 * bit of @xfer_mask is considered.
472 *
473 * LOCKING:
474 * None.
475 *
476 * RETURNS:
477 * Matching XFER_* value, 0 if no match found.
478 */
479static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
480{
481 int highbit = fls(xfer_mask) - 1;
482 const struct ata_xfer_ent *ent;
483
484 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
485 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
486 return ent->base + highbit - ent->shift;
487 return 0;
488}
489
490/**
491 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
492 * @xfer_mode: XFER_* of interest
493 *
494 * Return matching xfer_mask for @xfer_mode.
495 *
496 * LOCKING:
497 * None.
498 *
499 * RETURNS:
500 * Matching xfer_mask, 0 if no match found.
501 */
502static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
503{
504 const struct ata_xfer_ent *ent;
505
506 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
507 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
508 return 1 << (ent->shift + xfer_mode - ent->base);
509 return 0;
510}
511
512/**
513 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
514 * @xfer_mode: XFER_* of interest
515 *
516 * Return matching xfer_shift for @xfer_mode.
517 *
518 * LOCKING:
519 * None.
520 *
521 * RETURNS:
522 * Matching xfer_shift, -1 if no match found.
523 */
524static int ata_xfer_mode2shift(unsigned int xfer_mode)
525{
526 const struct ata_xfer_ent *ent;
527
528 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
529 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
530 return ent->shift;
531 return -1;
532}
533
1da177e4 534/**
1da7b0d0
TH
535 * ata_mode_string - convert xfer_mask to string
536 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
537 *
538 * Determine string which represents the highest speed
1da7b0d0 539 * (highest bit in @modemask).
1da177e4
LT
540 *
541 * LOCKING:
542 * None.
543 *
544 * RETURNS:
545 * Constant C string representing highest speed listed in
1da7b0d0 546 * @mode_mask, or the constant C string "<n/a>".
1da177e4 547 */
1da7b0d0 548static const char *ata_mode_string(unsigned int xfer_mask)
1da177e4 549{
75f554bc
TH
550 static const char * const xfer_mode_str[] = {
551 "PIO0",
552 "PIO1",
553 "PIO2",
554 "PIO3",
555 "PIO4",
b352e57d
AC
556 "PIO5",
557 "PIO6",
75f554bc
TH
558 "MWDMA0",
559 "MWDMA1",
560 "MWDMA2",
b352e57d
AC
561 "MWDMA3",
562 "MWDMA4",
75f554bc
TH
563 "UDMA/16",
564 "UDMA/25",
565 "UDMA/33",
566 "UDMA/44",
567 "UDMA/66",
568 "UDMA/100",
569 "UDMA/133",
570 "UDMA7",
571 };
1da7b0d0 572 int highbit;
1da177e4 573
1da7b0d0
TH
574 highbit = fls(xfer_mask) - 1;
575 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
576 return xfer_mode_str[highbit];
1da177e4 577 return "<n/a>";
1da177e4
LT
578}
579
4c360c81
TH
580static const char *sata_spd_string(unsigned int spd)
581{
582 static const char * const spd_str[] = {
583 "1.5 Gbps",
584 "3.0 Gbps",
585 };
586
587 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
588 return "<unknown>";
589 return spd_str[spd - 1];
590}
591
3373efd8 592void ata_dev_disable(struct ata_device *dev)
0b8efb0a 593{
0dd4b21f 594 if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
f15a1daf 595 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
0b8efb0a
TH
596 dev->class++;
597 }
598}
599
1da177e4
LT
600/**
601 * ata_pio_devchk - PATA device presence detection
602 * @ap: ATA channel to examine
603 * @device: Device to examine (starting at zero)
604 *
605 * This technique was originally described in
606 * Hale Landis's ATADRVR (www.ata-atapi.com), and
607 * later found its way into the ATA/ATAPI spec.
608 *
609 * Write a pattern to the ATA shadow registers,
610 * and if a device is present, it will respond by
611 * correctly storing and echoing back the
612 * ATA shadow register contents.
613 *
614 * LOCKING:
615 * caller.
616 */
617
618static unsigned int ata_pio_devchk(struct ata_port *ap,
619 unsigned int device)
620{
621 struct ata_ioports *ioaddr = &ap->ioaddr;
622 u8 nsect, lbal;
623
624 ap->ops->dev_select(ap, device);
625
626 outb(0x55, ioaddr->nsect_addr);
627 outb(0xaa, ioaddr->lbal_addr);
628
629 outb(0xaa, ioaddr->nsect_addr);
630 outb(0x55, ioaddr->lbal_addr);
631
632 outb(0x55, ioaddr->nsect_addr);
633 outb(0xaa, ioaddr->lbal_addr);
634
635 nsect = inb(ioaddr->nsect_addr);
636 lbal = inb(ioaddr->lbal_addr);
637
638 if ((nsect == 0x55) && (lbal == 0xaa))
639 return 1; /* we found a device */
640
641 return 0; /* nothing found */
642}
643
644/**
645 * ata_mmio_devchk - PATA device presence detection
646 * @ap: ATA channel to examine
647 * @device: Device to examine (starting at zero)
648 *
649 * This technique was originally described in
650 * Hale Landis's ATADRVR (www.ata-atapi.com), and
651 * later found its way into the ATA/ATAPI spec.
652 *
653 * Write a pattern to the ATA shadow registers,
654 * and if a device is present, it will respond by
655 * correctly storing and echoing back the
656 * ATA shadow register contents.
657 *
658 * LOCKING:
659 * caller.
660 */
661
662static unsigned int ata_mmio_devchk(struct ata_port *ap,
663 unsigned int device)
664{
665 struct ata_ioports *ioaddr = &ap->ioaddr;
666 u8 nsect, lbal;
667
668 ap->ops->dev_select(ap, device);
669
670 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
671 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
672
673 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
674 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
675
676 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
677 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
678
679 nsect = readb((void __iomem *) ioaddr->nsect_addr);
680 lbal = readb((void __iomem *) ioaddr->lbal_addr);
681
682 if ((nsect == 0x55) && (lbal == 0xaa))
683 return 1; /* we found a device */
684
685 return 0; /* nothing found */
686}
687
688/**
689 * ata_devchk - PATA device presence detection
690 * @ap: ATA channel to examine
691 * @device: Device to examine (starting at zero)
692 *
693 * Dispatch ATA device presence detection, depending
694 * on whether we are using PIO or MMIO to talk to the
695 * ATA shadow registers.
696 *
697 * LOCKING:
698 * caller.
699 */
700
701static unsigned int ata_devchk(struct ata_port *ap,
702 unsigned int device)
703{
704 if (ap->flags & ATA_FLAG_MMIO)
705 return ata_mmio_devchk(ap, device);
706 return ata_pio_devchk(ap, device);
707}
708
709/**
710 * ata_dev_classify - determine device type based on ATA-spec signature
711 * @tf: ATA taskfile register set for device to be identified
712 *
713 * Determine from taskfile register contents whether a device is
714 * ATA or ATAPI, as per "Signature and persistence" section
715 * of ATA/PI spec (volume 1, sect 5.14).
716 *
717 * LOCKING:
718 * None.
719 *
720 * RETURNS:
721 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
722 * the event of failure.
723 */
724
057ace5e 725unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
726{
727 /* Apple's open source Darwin code hints that some devices only
728 * put a proper signature into the LBA mid/high registers,
729 * So, we only check those. It's sufficient for uniqueness.
730 */
731
732 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
733 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
734 DPRINTK("found ATA device by sig\n");
735 return ATA_DEV_ATA;
736 }
737
738 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
739 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
740 DPRINTK("found ATAPI device by sig\n");
741 return ATA_DEV_ATAPI;
742 }
743
744 DPRINTK("unknown device\n");
745 return ATA_DEV_UNKNOWN;
746}
747
748/**
749 * ata_dev_try_classify - Parse returned ATA device signature
750 * @ap: ATA channel to examine
751 * @device: Device to examine (starting at zero)
b4dc7623 752 * @r_err: Value of error register on completion
1da177e4
LT
753 *
754 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
755 * an ATA/ATAPI-defined set of values is placed in the ATA
756 * shadow registers, indicating the results of device detection
757 * and diagnostics.
758 *
759 * Select the ATA device, and read the values from the ATA shadow
760 * registers. Then parse according to the Error register value,
761 * and the spec-defined values examined by ata_dev_classify().
762 *
763 * LOCKING:
764 * caller.
b4dc7623
TH
765 *
766 * RETURNS:
767 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4
LT
768 */
769
b4dc7623
TH
770static unsigned int
771ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
1da177e4 772{
1da177e4
LT
773 struct ata_taskfile tf;
774 unsigned int class;
775 u8 err;
776
777 ap->ops->dev_select(ap, device);
778
779 memset(&tf, 0, sizeof(tf));
780
1da177e4 781 ap->ops->tf_read(ap, &tf);
0169e284 782 err = tf.feature;
b4dc7623
TH
783 if (r_err)
784 *r_err = err;
1da177e4 785
93590859
AC
786 /* see if device passed diags: if master then continue and warn later */
787 if (err == 0 && device == 0)
788 /* diagnostic fail : do nothing _YET_ */
789 ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
790 else if (err == 1)
1da177e4
LT
791 /* do nothing */ ;
792 else if ((device == 0) && (err == 0x81))
793 /* do nothing */ ;
794 else
b4dc7623 795 return ATA_DEV_NONE;
1da177e4 796
b4dc7623 797 /* determine if device is ATA or ATAPI */
1da177e4 798 class = ata_dev_classify(&tf);
b4dc7623 799
1da177e4 800 if (class == ATA_DEV_UNKNOWN)
b4dc7623 801 return ATA_DEV_NONE;
1da177e4 802 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
b4dc7623
TH
803 return ATA_DEV_NONE;
804 return class;
1da177e4
LT
805}
806
807/**
6a62a04d 808 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
809 * @id: IDENTIFY DEVICE results we will examine
810 * @s: string into which data is output
811 * @ofs: offset into identify device page
812 * @len: length of string to return. must be an even number.
813 *
814 * The strings in the IDENTIFY DEVICE page are broken up into
815 * 16-bit chunks. Run through the string, and output each
816 * 8-bit chunk linearly, regardless of platform.
817 *
818 * LOCKING:
819 * caller.
820 */
821
6a62a04d
TH
822void ata_id_string(const u16 *id, unsigned char *s,
823 unsigned int ofs, unsigned int len)
1da177e4
LT
824{
825 unsigned int c;
826
827 while (len > 0) {
828 c = id[ofs] >> 8;
829 *s = c;
830 s++;
831
832 c = id[ofs] & 0xff;
833 *s = c;
834 s++;
835
836 ofs++;
837 len -= 2;
838 }
839}
840
0e949ff3 841/**
6a62a04d 842 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
843 * @id: IDENTIFY DEVICE results we will examine
844 * @s: string into which data is output
845 * @ofs: offset into identify device page
846 * @len: length of string to return. must be an odd number.
847 *
6a62a04d 848 * This function is identical to ata_id_string except that it
0e949ff3
TH
849 * trims trailing spaces and terminates the resulting string with
850 * null. @len must be actual maximum length (even number) + 1.
851 *
852 * LOCKING:
853 * caller.
854 */
6a62a04d
TH
855void ata_id_c_string(const u16 *id, unsigned char *s,
856 unsigned int ofs, unsigned int len)
0e949ff3
TH
857{
858 unsigned char *p;
859
860 WARN_ON(!(len & 1));
861
6a62a04d 862 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
863
864 p = s + strnlen(s, len - 1);
865 while (p > s && p[-1] == ' ')
866 p--;
867 *p = '\0';
868}
0baab86b 869
2940740b
TH
870static u64 ata_id_n_sectors(const u16 *id)
871{
872 if (ata_id_has_lba(id)) {
873 if (ata_id_has_lba48(id))
874 return ata_id_u64(id, 100);
875 else
876 return ata_id_u32(id, 60);
877 } else {
878 if (ata_id_current_chs_valid(id))
879 return ata_id_u32(id, 57);
880 else
881 return id[1] * id[3] * id[6];
882 }
883}
884
0baab86b
EF
885/**
886 * ata_noop_dev_select - Select device 0/1 on ATA bus
887 * @ap: ATA channel to manipulate
888 * @device: ATA device (numbered from zero) to select
889 *
890 * This function performs no actual function.
891 *
892 * May be used as the dev_select() entry in ata_port_operations.
893 *
894 * LOCKING:
895 * caller.
896 */
1da177e4
LT
897void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
898{
899}
900
0baab86b 901
1da177e4
LT
902/**
903 * ata_std_dev_select - Select device 0/1 on ATA bus
904 * @ap: ATA channel to manipulate
905 * @device: ATA device (numbered from zero) to select
906 *
907 * Use the method defined in the ATA specification to
908 * make either device 0, or device 1, active on the
0baab86b
EF
909 * ATA channel. Works with both PIO and MMIO.
910 *
911 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
912 *
913 * LOCKING:
914 * caller.
915 */
916
917void ata_std_dev_select (struct ata_port *ap, unsigned int device)
918{
919 u8 tmp;
920
921 if (device == 0)
922 tmp = ATA_DEVICE_OBS;
923 else
924 tmp = ATA_DEVICE_OBS | ATA_DEV1;
925
926 if (ap->flags & ATA_FLAG_MMIO) {
927 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
928 } else {
929 outb(tmp, ap->ioaddr.device_addr);
930 }
931 ata_pause(ap); /* needed; also flushes, for mmio */
932}
933
934/**
935 * ata_dev_select - Select device 0/1 on ATA bus
936 * @ap: ATA channel to manipulate
937 * @device: ATA device (numbered from zero) to select
938 * @wait: non-zero to wait for Status register BSY bit to clear
939 * @can_sleep: non-zero if context allows sleeping
940 *
941 * Use the method defined in the ATA specification to
942 * make either device 0, or device 1, active on the
943 * ATA channel.
944 *
945 * This is a high-level version of ata_std_dev_select(),
946 * which additionally provides the services of inserting
947 * the proper pauses and status polling, where needed.
948 *
949 * LOCKING:
950 * caller.
951 */
952
953void ata_dev_select(struct ata_port *ap, unsigned int device,
954 unsigned int wait, unsigned int can_sleep)
955{
88574551 956 if (ata_msg_probe(ap))
0dd4b21f 957 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, ata%u: "
88574551 958 "device %u, wait %u\n", ap->id, device, wait);
1da177e4
LT
959
960 if (wait)
961 ata_wait_idle(ap);
962
963 ap->ops->dev_select(ap, device);
964
965 if (wait) {
966 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
967 msleep(150);
968 ata_wait_idle(ap);
969 }
970}
971
972/**
973 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 974 * @id: IDENTIFY DEVICE page to dump
1da177e4 975 *
0bd3300a
TH
976 * Dump selected 16-bit words from the given IDENTIFY DEVICE
977 * page.
1da177e4
LT
978 *
979 * LOCKING:
980 * caller.
981 */
982
0bd3300a 983static inline void ata_dump_id(const u16 *id)
1da177e4
LT
984{
985 DPRINTK("49==0x%04x "
986 "53==0x%04x "
987 "63==0x%04x "
988 "64==0x%04x "
989 "75==0x%04x \n",
0bd3300a
TH
990 id[49],
991 id[53],
992 id[63],
993 id[64],
994 id[75]);
1da177e4
LT
995 DPRINTK("80==0x%04x "
996 "81==0x%04x "
997 "82==0x%04x "
998 "83==0x%04x "
999 "84==0x%04x \n",
0bd3300a
TH
1000 id[80],
1001 id[81],
1002 id[82],
1003 id[83],
1004 id[84]);
1da177e4
LT
1005 DPRINTK("88==0x%04x "
1006 "93==0x%04x\n",
0bd3300a
TH
1007 id[88],
1008 id[93]);
1da177e4
LT
1009}
1010
cb95d562
TH
1011/**
1012 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1013 * @id: IDENTIFY data to compute xfer mask from
1014 *
1015 * Compute the xfermask for this device. This is not as trivial
1016 * as it seems if we must consider early devices correctly.
1017 *
1018 * FIXME: pre IDE drive timing (do we care ?).
1019 *
1020 * LOCKING:
1021 * None.
1022 *
1023 * RETURNS:
1024 * Computed xfermask
1025 */
1026static unsigned int ata_id_xfermask(const u16 *id)
1027{
1028 unsigned int pio_mask, mwdma_mask, udma_mask;
1029
1030 /* Usual case. Word 53 indicates word 64 is valid */
1031 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1032 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1033 pio_mask <<= 3;
1034 pio_mask |= 0x7;
1035 } else {
1036 /* If word 64 isn't valid then Word 51 high byte holds
1037 * the PIO timing number for the maximum. Turn it into
1038 * a mask.
1039 */
46767aeb
AC
1040 u8 mode = id[ATA_ID_OLD_PIO_MODES] & 0xFF;
1041 if (mode < 5) /* Valid PIO range */
1042 pio_mask = (2 << mode) - 1;
1043 else
1044 pio_mask = 1;
cb95d562
TH
1045
1046 /* But wait.. there's more. Design your standards by
1047 * committee and you too can get a free iordy field to
1048 * process. However its the speeds not the modes that
1049 * are supported... Note drivers using the timing API
1050 * will get this right anyway
1051 */
1052 }
1053
1054 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1055
b352e57d
AC
1056 if (ata_id_is_cfa(id)) {
1057 /*
1058 * Process compact flash extended modes
1059 */
1060 int pio = id[163] & 0x7;
1061 int dma = (id[163] >> 3) & 7;
1062
1063 if (pio)
1064 pio_mask |= (1 << 5);
1065 if (pio > 1)
1066 pio_mask |= (1 << 6);
1067 if (dma)
1068 mwdma_mask |= (1 << 3);
1069 if (dma > 1)
1070 mwdma_mask |= (1 << 4);
1071 }
1072
fb21f0d0
TH
1073 udma_mask = 0;
1074 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1075 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1076
1077 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1078}
1079
86e45b6b
TH
1080/**
1081 * ata_port_queue_task - Queue port_task
1082 * @ap: The ata_port to queue port_task for
e2a7f77a 1083 * @fn: workqueue function to be scheduled
65f27f38 1084 * @data: data for @fn to use
e2a7f77a 1085 * @delay: delay time for workqueue function
86e45b6b
TH
1086 *
1087 * Schedule @fn(@data) for execution after @delay jiffies using
1088 * port_task. There is one port_task per port and it's the
1089 * user(low level driver)'s responsibility to make sure that only
1090 * one task is active at any given time.
1091 *
1092 * libata core layer takes care of synchronization between
1093 * port_task and EH. ata_port_queue_task() may be ignored for EH
1094 * synchronization.
1095 *
1096 * LOCKING:
1097 * Inherited from caller.
1098 */
65f27f38 1099void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
86e45b6b
TH
1100 unsigned long delay)
1101{
1102 int rc;
1103
b51e9e5d 1104 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
86e45b6b
TH
1105 return;
1106
65f27f38
DH
1107 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1108 ap->port_task_data = data;
86e45b6b 1109
52bad64d 1110 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1111
1112 /* rc == 0 means that another user is using port task */
1113 WARN_ON(rc == 0);
1114}
1115
1116/**
1117 * ata_port_flush_task - Flush port_task
1118 * @ap: The ata_port to flush port_task for
1119 *
1120 * After this function completes, port_task is guranteed not to
1121 * be running or scheduled.
1122 *
1123 * LOCKING:
1124 * Kernel thread context (may sleep)
1125 */
1126void ata_port_flush_task(struct ata_port *ap)
1127{
1128 unsigned long flags;
1129
1130 DPRINTK("ENTER\n");
1131
ba6a1308 1132 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 1133 ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK;
ba6a1308 1134 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b
TH
1135
1136 DPRINTK("flush #1\n");
1137 flush_workqueue(ata_wq);
1138
1139 /*
1140 * At this point, if a task is running, it's guaranteed to see
1141 * the FLUSH flag; thus, it will never queue pio tasks again.
1142 * Cancel and flush.
1143 */
1144 if (!cancel_delayed_work(&ap->port_task)) {
0dd4b21f 1145 if (ata_msg_ctl(ap))
88574551
TH
1146 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
1147 __FUNCTION__);
86e45b6b
TH
1148 flush_workqueue(ata_wq);
1149 }
1150
ba6a1308 1151 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 1152 ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK;
ba6a1308 1153 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b 1154
0dd4b21f
BP
1155 if (ata_msg_ctl(ap))
1156 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
86e45b6b
TH
1157}
1158
77853bf2 1159void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1160{
77853bf2 1161 struct completion *waiting = qc->private_data;
a2a7a662 1162
a2a7a662 1163 complete(waiting);
a2a7a662
TH
1164}
1165
1166/**
2432697b 1167 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1168 * @dev: Device to which the command is sent
1169 * @tf: Taskfile registers for the command and the result
d69cf37d 1170 * @cdb: CDB for packet command
a2a7a662 1171 * @dma_dir: Data tranfer direction of the command
2432697b
TH
1172 * @sg: sg list for the data buffer of the command
1173 * @n_elem: Number of sg entries
a2a7a662
TH
1174 *
1175 * Executes libata internal command with timeout. @tf contains
1176 * command on entry and result on return. Timeout and error
1177 * conditions are reported via return value. No recovery action
1178 * is taken after a command times out. It's caller's duty to
1179 * clean up after timeout.
1180 *
1181 * LOCKING:
1182 * None. Should be called with kernel context, might sleep.
551e8889
TH
1183 *
1184 * RETURNS:
1185 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1186 */
2432697b
TH
1187unsigned ata_exec_internal_sg(struct ata_device *dev,
1188 struct ata_taskfile *tf, const u8 *cdb,
1189 int dma_dir, struct scatterlist *sg,
1190 unsigned int n_elem)
a2a7a662 1191{
3373efd8 1192 struct ata_port *ap = dev->ap;
a2a7a662
TH
1193 u8 command = tf->command;
1194 struct ata_queued_cmd *qc;
2ab7db1f 1195 unsigned int tag, preempted_tag;
dedaf2b0 1196 u32 preempted_sactive, preempted_qc_active;
60be6b9a 1197 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1198 unsigned long flags;
77853bf2 1199 unsigned int err_mask;
d95a717f 1200 int rc;
a2a7a662 1201
ba6a1308 1202 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1203
e3180499 1204 /* no internal command while frozen */
b51e9e5d 1205 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1206 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1207 return AC_ERR_SYSTEM;
1208 }
1209
2ab7db1f 1210 /* initialize internal qc */
a2a7a662 1211
2ab7db1f
TH
1212 /* XXX: Tag 0 is used for drivers with legacy EH as some
1213 * drivers choke if any other tag is given. This breaks
1214 * ata_tag_internal() test for those drivers. Don't use new
1215 * EH stuff without converting to it.
1216 */
1217 if (ap->ops->error_handler)
1218 tag = ATA_TAG_INTERNAL;
1219 else
1220 tag = 0;
1221
6cec4a39 1222 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1223 BUG();
f69499f4 1224 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1225
1226 qc->tag = tag;
1227 qc->scsicmd = NULL;
1228 qc->ap = ap;
1229 qc->dev = dev;
1230 ata_qc_reinit(qc);
1231
1232 preempted_tag = ap->active_tag;
dedaf2b0
TH
1233 preempted_sactive = ap->sactive;
1234 preempted_qc_active = ap->qc_active;
2ab7db1f 1235 ap->active_tag = ATA_TAG_POISON;
dedaf2b0
TH
1236 ap->sactive = 0;
1237 ap->qc_active = 0;
2ab7db1f
TH
1238
1239 /* prepare & issue qc */
a2a7a662 1240 qc->tf = *tf;
d69cf37d
TH
1241 if (cdb)
1242 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1243 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1244 qc->dma_dir = dma_dir;
1245 if (dma_dir != DMA_NONE) {
2432697b
TH
1246 unsigned int i, buflen = 0;
1247
1248 for (i = 0; i < n_elem; i++)
1249 buflen += sg[i].length;
1250
1251 ata_sg_init(qc, sg, n_elem);
a2a7a662
TH
1252 qc->nsect = buflen / ATA_SECT_SIZE;
1253 }
1254
77853bf2 1255 qc->private_data = &wait;
a2a7a662
TH
1256 qc->complete_fn = ata_qc_complete_internal;
1257
8e0e694a 1258 ata_qc_issue(qc);
a2a7a662 1259
ba6a1308 1260 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1261
a8601e5f 1262 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
d95a717f
TH
1263
1264 ata_port_flush_task(ap);
41ade50c 1265
d95a717f 1266 if (!rc) {
ba6a1308 1267 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1268
1269 /* We're racing with irq here. If we lose, the
1270 * following test prevents us from completing the qc
d95a717f
TH
1271 * twice. If we win, the port is frozen and will be
1272 * cleaned up by ->post_internal_cmd().
a2a7a662 1273 */
77853bf2 1274 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1275 qc->err_mask |= AC_ERR_TIMEOUT;
1276
1277 if (ap->ops->error_handler)
1278 ata_port_freeze(ap);
1279 else
1280 ata_qc_complete(qc);
f15a1daf 1281
0dd4b21f
BP
1282 if (ata_msg_warn(ap))
1283 ata_dev_printk(dev, KERN_WARNING,
88574551 1284 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1285 }
1286
ba6a1308 1287 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1288 }
1289
d95a717f
TH
1290 /* do post_internal_cmd */
1291 if (ap->ops->post_internal_cmd)
1292 ap->ops->post_internal_cmd(qc);
1293
1294 if (qc->flags & ATA_QCFLAG_FAILED && !qc->err_mask) {
0dd4b21f 1295 if (ata_msg_warn(ap))
88574551 1296 ata_dev_printk(dev, KERN_WARNING,
0dd4b21f 1297 "zero err_mask for failed "
88574551 1298 "internal command, assuming AC_ERR_OTHER\n");
d95a717f
TH
1299 qc->err_mask |= AC_ERR_OTHER;
1300 }
1301
15869303 1302 /* finish up */
ba6a1308 1303 spin_lock_irqsave(ap->lock, flags);
15869303 1304
e61e0672 1305 *tf = qc->result_tf;
77853bf2
TH
1306 err_mask = qc->err_mask;
1307
1308 ata_qc_free(qc);
2ab7db1f 1309 ap->active_tag = preempted_tag;
dedaf2b0
TH
1310 ap->sactive = preempted_sactive;
1311 ap->qc_active = preempted_qc_active;
77853bf2 1312
1f7dd3e9
TH
1313 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1314 * Until those drivers are fixed, we detect the condition
1315 * here, fail the command with AC_ERR_SYSTEM and reenable the
1316 * port.
1317 *
1318 * Note that this doesn't change any behavior as internal
1319 * command failure results in disabling the device in the
1320 * higher layer for LLDDs without new reset/EH callbacks.
1321 *
1322 * Kill the following code as soon as those drivers are fixed.
1323 */
198e0fed 1324 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1325 err_mask |= AC_ERR_SYSTEM;
1326 ata_port_probe(ap);
1327 }
1328
ba6a1308 1329 spin_unlock_irqrestore(ap->lock, flags);
15869303 1330
77853bf2 1331 return err_mask;
a2a7a662
TH
1332}
1333
2432697b
TH
1334/**
1335 * ata_exec_internal_sg - execute libata internal command
1336 * @dev: Device to which the command is sent
1337 * @tf: Taskfile registers for the command and the result
1338 * @cdb: CDB for packet command
1339 * @dma_dir: Data tranfer direction of the command
1340 * @buf: Data buffer of the command
1341 * @buflen: Length of data buffer
1342 *
1343 * Wrapper around ata_exec_internal_sg() which takes simple
1344 * buffer instead of sg list.
1345 *
1346 * LOCKING:
1347 * None. Should be called with kernel context, might sleep.
1348 *
1349 * RETURNS:
1350 * Zero on success, AC_ERR_* mask on failure
1351 */
1352unsigned ata_exec_internal(struct ata_device *dev,
1353 struct ata_taskfile *tf, const u8 *cdb,
1354 int dma_dir, void *buf, unsigned int buflen)
1355{
1356 struct scatterlist sg;
1357
1358 sg_init_one(&sg, buf, buflen);
1359
1360 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, &sg, 1);
1361}
1362
977e6b9f
TH
1363/**
1364 * ata_do_simple_cmd - execute simple internal command
1365 * @dev: Device to which the command is sent
1366 * @cmd: Opcode to execute
1367 *
1368 * Execute a 'simple' command, that only consists of the opcode
1369 * 'cmd' itself, without filling any other registers
1370 *
1371 * LOCKING:
1372 * Kernel thread context (may sleep).
1373 *
1374 * RETURNS:
1375 * Zero on success, AC_ERR_* mask on failure
e58eb583 1376 */
77b08fb5 1377unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1378{
1379 struct ata_taskfile tf;
e58eb583
TH
1380
1381 ata_tf_init(dev, &tf);
1382
1383 tf.command = cmd;
1384 tf.flags |= ATA_TFLAG_DEVICE;
1385 tf.protocol = ATA_PROT_NODATA;
1386
977e6b9f 1387 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
e58eb583
TH
1388}
1389
1bc4ccff
AC
1390/**
1391 * ata_pio_need_iordy - check if iordy needed
1392 * @adev: ATA device
1393 *
1394 * Check if the current speed of the device requires IORDY. Used
1395 * by various controllers for chip configuration.
1396 */
1397
1398unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1399{
1400 int pio;
1401 int speed = adev->pio_mode - XFER_PIO_0;
1402
1403 if (speed < 2)
1404 return 0;
1405 if (speed > 2)
1406 return 1;
2e9edbf8 1407
1bc4ccff
AC
1408 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1409
1410 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1411 pio = adev->id[ATA_ID_EIDE_PIO];
1412 /* Is the speed faster than the drive allows non IORDY ? */
1413 if (pio) {
1414 /* This is cycle times not frequency - watch the logic! */
1415 if (pio > 240) /* PIO2 is 240nS per cycle */
1416 return 1;
1417 return 0;
1418 }
1419 }
1420 return 0;
1421}
1422
1da177e4 1423/**
49016aca 1424 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1425 * @dev: target device
1426 * @p_class: pointer to class of the target device (may be changed)
bff04647 1427 * @flags: ATA_READID_* flags
fe635c7e 1428 * @id: buffer to read IDENTIFY data into
1da177e4 1429 *
49016aca
TH
1430 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1431 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1432 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1433 * for pre-ATA4 drives.
1da177e4
LT
1434 *
1435 * LOCKING:
49016aca
TH
1436 * Kernel thread context (may sleep)
1437 *
1438 * RETURNS:
1439 * 0 on success, -errno otherwise.
1da177e4 1440 */
a9beec95 1441int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1442 unsigned int flags, u16 *id)
1da177e4 1443{
3373efd8 1444 struct ata_port *ap = dev->ap;
49016aca 1445 unsigned int class = *p_class;
a0123703 1446 struct ata_taskfile tf;
49016aca
TH
1447 unsigned int err_mask = 0;
1448 const char *reason;
1449 int rc;
1da177e4 1450
0dd4b21f 1451 if (ata_msg_ctl(ap))
88574551
TH
1452 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1453 __FUNCTION__, ap->id, dev->devno);
1da177e4 1454
49016aca 1455 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1da177e4 1456
49016aca 1457 retry:
3373efd8 1458 ata_tf_init(dev, &tf);
a0123703 1459
49016aca
TH
1460 switch (class) {
1461 case ATA_DEV_ATA:
a0123703 1462 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1463 break;
1464 case ATA_DEV_ATAPI:
a0123703 1465 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1466 break;
1467 default:
1468 rc = -ENODEV;
1469 reason = "unsupported class";
1470 goto err_out;
1da177e4
LT
1471 }
1472
a0123703 1473 tf.protocol = ATA_PROT_PIO;
800b3996 1474 tf.flags |= ATA_TFLAG_POLLING; /* for polling presence detection */
1da177e4 1475
3373efd8 1476 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
49016aca 1477 id, sizeof(id[0]) * ATA_ID_WORDS);
a0123703 1478 if (err_mask) {
800b3996 1479 if (err_mask & AC_ERR_NODEV_HINT) {
55a8e2c8
TH
1480 DPRINTK("ata%u.%d: NODEV after polling detection\n",
1481 ap->id, dev->devno);
1482 return -ENOENT;
1483 }
1484
49016aca
TH
1485 rc = -EIO;
1486 reason = "I/O error";
1da177e4
LT
1487 goto err_out;
1488 }
1489
49016aca 1490 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1491
49016aca 1492 /* sanity check */
a4f5749b
TH
1493 rc = -EINVAL;
1494 reason = "device reports illegal type";
1495
1496 if (class == ATA_DEV_ATA) {
1497 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1498 goto err_out;
1499 } else {
1500 if (ata_id_is_ata(id))
1501 goto err_out;
49016aca
TH
1502 }
1503
bff04647 1504 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
1505 /*
1506 * The exact sequence expected by certain pre-ATA4 drives is:
1507 * SRST RESET
1508 * IDENTIFY
1509 * INITIALIZE DEVICE PARAMETERS
1510 * anything else..
1511 * Some drives were very specific about that exact sequence.
1512 */
1513 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 1514 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
1515 if (err_mask) {
1516 rc = -EIO;
1517 reason = "INIT_DEV_PARAMS failed";
1518 goto err_out;
1519 }
1520
1521 /* current CHS translation info (id[53-58]) might be
1522 * changed. reread the identify device info.
1523 */
bff04647 1524 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
1525 goto retry;
1526 }
1527 }
1528
1529 *p_class = class;
fe635c7e 1530
49016aca
TH
1531 return 0;
1532
1533 err_out:
88574551 1534 if (ata_msg_warn(ap))
0dd4b21f 1535 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 1536 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
1537 return rc;
1538}
1539
3373efd8 1540static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 1541{
3373efd8 1542 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
1543}
1544
a6e6ce8e
TH
1545static void ata_dev_config_ncq(struct ata_device *dev,
1546 char *desc, size_t desc_sz)
1547{
1548 struct ata_port *ap = dev->ap;
1549 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1550
1551 if (!ata_id_has_ncq(dev->id)) {
1552 desc[0] = '\0';
1553 return;
1554 }
6919a0a6
AC
1555 if (ata_device_blacklisted(dev) & ATA_HORKAGE_NONCQ) {
1556 snprintf(desc, desc_sz, "NCQ (not used)");
1557 return;
1558 }
a6e6ce8e 1559 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 1560 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
1561 dev->flags |= ATA_DFLAG_NCQ;
1562 }
1563
1564 if (hdepth >= ddepth)
1565 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1566 else
1567 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1568}
1569
e6d902a3
BK
1570static void ata_set_port_max_cmd_len(struct ata_port *ap)
1571{
1572 int i;
1573
cca3974e
JG
1574 if (ap->scsi_host) {
1575 unsigned int len = 0;
1576
e6d902a3 1577 for (i = 0; i < ATA_MAX_DEVICES; i++)
cca3974e
JG
1578 len = max(len, ap->device[i].cdb_len);
1579
1580 ap->scsi_host->max_cmd_len = len;
e6d902a3
BK
1581 }
1582}
1583
49016aca 1584/**
ffeae418 1585 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
1586 * @dev: Target device to configure
1587 *
1588 * Configure @dev according to @dev->id. Generic and low-level
1589 * driver specific fixups are also applied.
49016aca
TH
1590 *
1591 * LOCKING:
ffeae418
TH
1592 * Kernel thread context (may sleep)
1593 *
1594 * RETURNS:
1595 * 0 on success, -errno otherwise
49016aca 1596 */
efdaedc4 1597int ata_dev_configure(struct ata_device *dev)
49016aca 1598{
3373efd8 1599 struct ata_port *ap = dev->ap;
efdaedc4 1600 int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO;
1148c3a7 1601 const u16 *id = dev->id;
ff8854b2 1602 unsigned int xfer_mask;
b352e57d 1603 char revbuf[7]; /* XYZ-99\0 */
e6d902a3 1604 int rc;
49016aca 1605
0dd4b21f 1606 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
88574551
TH
1607 ata_dev_printk(dev, KERN_INFO,
1608 "%s: ENTER/EXIT (host %u, dev %u) -- nodev\n",
1609 __FUNCTION__, ap->id, dev->devno);
ffeae418 1610 return 0;
49016aca
TH
1611 }
1612
0dd4b21f 1613 if (ata_msg_probe(ap))
88574551
TH
1614 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1615 __FUNCTION__, ap->id, dev->devno);
1da177e4 1616
c39f5ebe 1617 /* print device capabilities */
0dd4b21f 1618 if (ata_msg_probe(ap))
88574551
TH
1619 ata_dev_printk(dev, KERN_DEBUG,
1620 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1621 "85:%04x 86:%04x 87:%04x 88:%04x\n",
0dd4b21f 1622 __FUNCTION__,
f15a1daf
TH
1623 id[49], id[82], id[83], id[84],
1624 id[85], id[86], id[87], id[88]);
c39f5ebe 1625
208a9933 1626 /* initialize to-be-configured parameters */
ea1dd4e1 1627 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
1628 dev->max_sectors = 0;
1629 dev->cdb_len = 0;
1630 dev->n_sectors = 0;
1631 dev->cylinders = 0;
1632 dev->heads = 0;
1633 dev->sectors = 0;
1634
1da177e4
LT
1635 /*
1636 * common ATA, ATAPI feature tests
1637 */
1638
ff8854b2 1639 /* find max transfer mode; for printk only */
1148c3a7 1640 xfer_mask = ata_id_xfermask(id);
1da177e4 1641
0dd4b21f
BP
1642 if (ata_msg_probe(ap))
1643 ata_dump_id(id);
1da177e4
LT
1644
1645 /* ATA-specific feature tests */
1646 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
1647 if (ata_id_is_cfa(id)) {
1648 if (id[162] & 1) /* CPRM may make this media unusable */
1649 ata_dev_printk(dev, KERN_WARNING, "ata%u: device %u supports DRM functions and may not be fully accessable.\n",
1650 ap->id, dev->devno);
1651 snprintf(revbuf, 7, "CFA");
1652 }
1653 else
1654 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1655
1148c3a7 1656 dev->n_sectors = ata_id_n_sectors(id);
2940740b 1657
1148c3a7 1658 if (ata_id_has_lba(id)) {
4c2d721a 1659 const char *lba_desc;
a6e6ce8e 1660 char ncq_desc[20];
8bf62ece 1661
4c2d721a
TH
1662 lba_desc = "LBA";
1663 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 1664 if (ata_id_has_lba48(id)) {
8bf62ece 1665 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 1666 lba_desc = "LBA48";
6fc49adb
TH
1667
1668 if (dev->n_sectors >= (1UL << 28) &&
1669 ata_id_has_flush_ext(id))
1670 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 1671 }
8bf62ece 1672
a6e6ce8e
TH
1673 /* config NCQ */
1674 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1675
8bf62ece 1676 /* print device info to dmesg */
5afc8142 1677 if (ata_msg_drv(ap) && print_info)
b352e57d 1678 ata_dev_printk(dev, KERN_INFO, "%s, "
a6e6ce8e 1679 "max %s, %Lu sectors: %s %s\n",
b352e57d 1680 revbuf,
f15a1daf
TH
1681 ata_mode_string(xfer_mask),
1682 (unsigned long long)dev->n_sectors,
a6e6ce8e 1683 lba_desc, ncq_desc);
ffeae418 1684 } else {
8bf62ece
AL
1685 /* CHS */
1686
1687 /* Default translation */
1148c3a7
TH
1688 dev->cylinders = id[1];
1689 dev->heads = id[3];
1690 dev->sectors = id[6];
8bf62ece 1691
1148c3a7 1692 if (ata_id_current_chs_valid(id)) {
8bf62ece 1693 /* Current CHS translation is valid. */
1148c3a7
TH
1694 dev->cylinders = id[54];
1695 dev->heads = id[55];
1696 dev->sectors = id[56];
8bf62ece
AL
1697 }
1698
1699 /* print device info to dmesg */
5afc8142 1700 if (ata_msg_drv(ap) && print_info)
b352e57d 1701 ata_dev_printk(dev, KERN_INFO, "%s, "
f15a1daf 1702 "max %s, %Lu sectors: CHS %u/%u/%u\n",
b352e57d 1703 revbuf,
f15a1daf
TH
1704 ata_mode_string(xfer_mask),
1705 (unsigned long long)dev->n_sectors,
88574551
TH
1706 dev->cylinders, dev->heads,
1707 dev->sectors);
1da177e4
LT
1708 }
1709
07f6f7d0
AL
1710 if (dev->id[59] & 0x100) {
1711 dev->multi_count = dev->id[59] & 0xff;
5afc8142 1712 if (ata_msg_drv(ap) && print_info)
88574551
TH
1713 ata_dev_printk(dev, KERN_INFO,
1714 "ata%u: dev %u multi count %u\n",
1715 ap->id, dev->devno, dev->multi_count);
07f6f7d0
AL
1716 }
1717
6e7846e9 1718 dev->cdb_len = 16;
1da177e4
LT
1719 }
1720
1721 /* ATAPI-specific feature tests */
2c13b7ce 1722 else if (dev->class == ATA_DEV_ATAPI) {
08a556db
AL
1723 char *cdb_intr_string = "";
1724
1148c3a7 1725 rc = atapi_cdb_len(id);
1da177e4 1726 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 1727 if (ata_msg_warn(ap))
88574551
TH
1728 ata_dev_printk(dev, KERN_WARNING,
1729 "unsupported CDB len\n");
ffeae418 1730 rc = -EINVAL;
1da177e4
LT
1731 goto err_out_nosup;
1732 }
6e7846e9 1733 dev->cdb_len = (unsigned int) rc;
1da177e4 1734
08a556db 1735 if (ata_id_cdb_intr(dev->id)) {
312f7da2 1736 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
1737 cdb_intr_string = ", CDB intr";
1738 }
312f7da2 1739
1da177e4 1740 /* print device info to dmesg */
5afc8142 1741 if (ata_msg_drv(ap) && print_info)
12436c30
TH
1742 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1743 ata_mode_string(xfer_mask),
1744 cdb_intr_string);
1da177e4
LT
1745 }
1746
914ed354
TH
1747 /* determine max_sectors */
1748 dev->max_sectors = ATA_MAX_SECTORS;
1749 if (dev->flags & ATA_DFLAG_LBA48)
1750 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
1751
93590859
AC
1752 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
1753 /* Let the user know. We don't want to disallow opens for
1754 rescue purposes, or in case the vendor is just a blithering
1755 idiot */
1756 if (print_info) {
1757 ata_dev_printk(dev, KERN_WARNING,
1758"Drive reports diagnostics failure. This may indicate a drive\n");
1759 ata_dev_printk(dev, KERN_WARNING,
1760"fault or invalid emulation. Contact drive vendor for information.\n");
1761 }
1762 }
1763
e6d902a3 1764 ata_set_port_max_cmd_len(ap);
6e7846e9 1765
4b2f3ede 1766 /* limit bridge transfers to udma5, 200 sectors */
3373efd8 1767 if (ata_dev_knobble(dev)) {
5afc8142 1768 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
1769 ata_dev_printk(dev, KERN_INFO,
1770 "applying bridge limits\n");
5a529139 1771 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
1772 dev->max_sectors = ATA_MAX_SECTORS;
1773 }
1774
1775 if (ap->ops->dev_config)
1776 ap->ops->dev_config(ap, dev);
1777
0dd4b21f
BP
1778 if (ata_msg_probe(ap))
1779 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
1780 __FUNCTION__, ata_chk_status(ap));
ffeae418 1781 return 0;
1da177e4
LT
1782
1783err_out_nosup:
0dd4b21f 1784 if (ata_msg_probe(ap))
88574551
TH
1785 ata_dev_printk(dev, KERN_DEBUG,
1786 "%s: EXIT, err\n", __FUNCTION__);
ffeae418 1787 return rc;
1da177e4
LT
1788}
1789
1790/**
1791 * ata_bus_probe - Reset and probe ATA bus
1792 * @ap: Bus to probe
1793 *
0cba632b
JG
1794 * Master ATA bus probing function. Initiates a hardware-dependent
1795 * bus reset, then attempts to identify any devices found on
1796 * the bus.
1797 *
1da177e4 1798 * LOCKING:
0cba632b 1799 * PCI/etc. bus probe sem.
1da177e4
LT
1800 *
1801 * RETURNS:
96072e69 1802 * Zero on success, negative errno otherwise.
1da177e4
LT
1803 */
1804
80289167 1805int ata_bus_probe(struct ata_port *ap)
1da177e4 1806{
28ca5c57 1807 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1
TH
1808 int tries[ATA_MAX_DEVICES];
1809 int i, rc, down_xfermask;
e82cbdb9 1810 struct ata_device *dev;
1da177e4 1811
28ca5c57 1812 ata_port_probe(ap);
c19ba8af 1813
14d2bac1
TH
1814 for (i = 0; i < ATA_MAX_DEVICES; i++)
1815 tries[i] = ATA_PROBE_MAX_TRIES;
1816
1817 retry:
1818 down_xfermask = 0;
1819
2044470c 1820 /* reset and determine device classes */
52783c5d 1821 ap->ops->phy_reset(ap);
2061a47a 1822
52783c5d
TH
1823 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1824 dev = &ap->device[i];
c19ba8af 1825
52783c5d
TH
1826 if (!(ap->flags & ATA_FLAG_DISABLED) &&
1827 dev->class != ATA_DEV_UNKNOWN)
1828 classes[dev->devno] = dev->class;
1829 else
1830 classes[dev->devno] = ATA_DEV_NONE;
2044470c 1831
52783c5d 1832 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 1833 }
1da177e4 1834
52783c5d 1835 ata_port_probe(ap);
2044470c 1836
b6079ca4
AC
1837 /* after the reset the device state is PIO 0 and the controller
1838 state is undefined. Record the mode */
1839
1840 for (i = 0; i < ATA_MAX_DEVICES; i++)
1841 ap->device[i].pio_mode = XFER_PIO_0;
1842
28ca5c57 1843 /* read IDENTIFY page and configure devices */
1da177e4 1844 for (i = 0; i < ATA_MAX_DEVICES; i++) {
e82cbdb9 1845 dev = &ap->device[i];
28ca5c57 1846
ec573755
TH
1847 if (tries[i])
1848 dev->class = classes[i];
ffeae418 1849
14d2bac1 1850 if (!ata_dev_enabled(dev))
ffeae418 1851 continue;
ffeae418 1852
bff04647
TH
1853 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
1854 dev->id);
14d2bac1
TH
1855 if (rc)
1856 goto fail;
1857
efdaedc4
TH
1858 ap->eh_context.i.flags |= ATA_EHI_PRINTINFO;
1859 rc = ata_dev_configure(dev);
1860 ap->eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
1861 if (rc)
1862 goto fail;
1da177e4
LT
1863 }
1864
e82cbdb9 1865 /* configure transfer mode */
3adcebb2 1866 rc = ata_set_mode(ap, &dev);
51713d35
TH
1867 if (rc) {
1868 down_xfermask = 1;
1869 goto fail;
e82cbdb9 1870 }
1da177e4 1871
e82cbdb9
TH
1872 for (i = 0; i < ATA_MAX_DEVICES; i++)
1873 if (ata_dev_enabled(&ap->device[i]))
1874 return 0;
1da177e4 1875
e82cbdb9
TH
1876 /* no device present, disable port */
1877 ata_port_disable(ap);
1da177e4 1878 ap->ops->port_disable(ap);
96072e69 1879 return -ENODEV;
14d2bac1
TH
1880
1881 fail:
1882 switch (rc) {
1883 case -EINVAL:
1884 case -ENODEV:
1885 tries[dev->devno] = 0;
1886 break;
1887 case -EIO:
3c567b7d 1888 sata_down_spd_limit(ap);
14d2bac1
TH
1889 /* fall through */
1890 default:
1891 tries[dev->devno]--;
1892 if (down_xfermask &&
3373efd8 1893 ata_down_xfermask_limit(dev, tries[dev->devno] == 1))
14d2bac1
TH
1894 tries[dev->devno] = 0;
1895 }
1896
ec573755 1897 if (!tries[dev->devno]) {
3373efd8
TH
1898 ata_down_xfermask_limit(dev, 1);
1899 ata_dev_disable(dev);
ec573755
TH
1900 }
1901
14d2bac1 1902 goto retry;
1da177e4
LT
1903}
1904
1905/**
0cba632b
JG
1906 * ata_port_probe - Mark port as enabled
1907 * @ap: Port for which we indicate enablement
1da177e4 1908 *
0cba632b
JG
1909 * Modify @ap data structure such that the system
1910 * thinks that the entire port is enabled.
1911 *
cca3974e 1912 * LOCKING: host lock, or some other form of
0cba632b 1913 * serialization.
1da177e4
LT
1914 */
1915
1916void ata_port_probe(struct ata_port *ap)
1917{
198e0fed 1918 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
1919}
1920
3be680b7
TH
1921/**
1922 * sata_print_link_status - Print SATA link status
1923 * @ap: SATA port to printk link status about
1924 *
1925 * This function prints link speed and status of a SATA link.
1926 *
1927 * LOCKING:
1928 * None.
1929 */
1930static void sata_print_link_status(struct ata_port *ap)
1931{
6d5f9732 1932 u32 sstatus, scontrol, tmp;
3be680b7 1933
81952c54 1934 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
3be680b7 1935 return;
81952c54 1936 sata_scr_read(ap, SCR_CONTROL, &scontrol);
3be680b7 1937
81952c54 1938 if (ata_port_online(ap)) {
3be680b7 1939 tmp = (sstatus >> 4) & 0xf;
f15a1daf
TH
1940 ata_port_printk(ap, KERN_INFO,
1941 "SATA link up %s (SStatus %X SControl %X)\n",
1942 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 1943 } else {
f15a1daf
TH
1944 ata_port_printk(ap, KERN_INFO,
1945 "SATA link down (SStatus %X SControl %X)\n",
1946 sstatus, scontrol);
3be680b7
TH
1947 }
1948}
1949
1da177e4 1950/**
780a87f7
JG
1951 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1952 * @ap: SATA port associated with target SATA PHY.
1da177e4 1953 *
780a87f7
JG
1954 * This function issues commands to standard SATA Sxxx
1955 * PHY registers, to wake up the phy (and device), and
1956 * clear any reset condition.
1da177e4
LT
1957 *
1958 * LOCKING:
0cba632b 1959 * PCI/etc. bus probe sem.
1da177e4
LT
1960 *
1961 */
1962void __sata_phy_reset(struct ata_port *ap)
1963{
1964 u32 sstatus;
1965 unsigned long timeout = jiffies + (HZ * 5);
1966
1967 if (ap->flags & ATA_FLAG_SATA_RESET) {
cdcca89e 1968 /* issue phy wake/reset */
81952c54 1969 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
62ba2841
TH
1970 /* Couldn't find anything in SATA I/II specs, but
1971 * AHCI-1.1 10.4.2 says at least 1 ms. */
1972 mdelay(1);
1da177e4 1973 }
81952c54
TH
1974 /* phy wake/clear reset */
1975 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1da177e4
LT
1976
1977 /* wait for phy to become ready, if necessary */
1978 do {
1979 msleep(200);
81952c54 1980 sata_scr_read(ap, SCR_STATUS, &sstatus);
1da177e4
LT
1981 if ((sstatus & 0xf) != 1)
1982 break;
1983 } while (time_before(jiffies, timeout));
1984
3be680b7
TH
1985 /* print link status */
1986 sata_print_link_status(ap);
656563e3 1987
3be680b7 1988 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 1989 if (!ata_port_offline(ap))
1da177e4 1990 ata_port_probe(ap);
3be680b7 1991 else
1da177e4 1992 ata_port_disable(ap);
1da177e4 1993
198e0fed 1994 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
1995 return;
1996
1997 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1998 ata_port_disable(ap);
1999 return;
2000 }
2001
2002 ap->cbl = ATA_CBL_SATA;
2003}
2004
2005/**
780a87f7
JG
2006 * sata_phy_reset - Reset SATA bus.
2007 * @ap: SATA port associated with target SATA PHY.
1da177e4 2008 *
780a87f7
JG
2009 * This function resets the SATA bus, and then probes
2010 * the bus for devices.
1da177e4
LT
2011 *
2012 * LOCKING:
0cba632b 2013 * PCI/etc. bus probe sem.
1da177e4
LT
2014 *
2015 */
2016void sata_phy_reset(struct ata_port *ap)
2017{
2018 __sata_phy_reset(ap);
198e0fed 2019 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2020 return;
2021 ata_bus_reset(ap);
2022}
2023
ebdfca6e
AC
2024/**
2025 * ata_dev_pair - return other device on cable
ebdfca6e
AC
2026 * @adev: device
2027 *
2028 * Obtain the other device on the same cable, or if none is
2029 * present NULL is returned
2030 */
2e9edbf8 2031
3373efd8 2032struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2033{
3373efd8 2034 struct ata_port *ap = adev->ap;
ebdfca6e 2035 struct ata_device *pair = &ap->device[1 - adev->devno];
e1211e3f 2036 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2037 return NULL;
2038 return pair;
2039}
2040
1da177e4 2041/**
780a87f7
JG
2042 * ata_port_disable - Disable port.
2043 * @ap: Port to be disabled.
1da177e4 2044 *
780a87f7
JG
2045 * Modify @ap data structure such that the system
2046 * thinks that the entire port is disabled, and should
2047 * never attempt to probe or communicate with devices
2048 * on this port.
2049 *
cca3974e 2050 * LOCKING: host lock, or some other form of
780a87f7 2051 * serialization.
1da177e4
LT
2052 */
2053
2054void ata_port_disable(struct ata_port *ap)
2055{
2056 ap->device[0].class = ATA_DEV_NONE;
2057 ap->device[1].class = ATA_DEV_NONE;
198e0fed 2058 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2059}
2060
1c3fae4d 2061/**
3c567b7d 2062 * sata_down_spd_limit - adjust SATA spd limit downward
1c3fae4d
TH
2063 * @ap: Port to adjust SATA spd limit for
2064 *
2065 * Adjust SATA spd limit of @ap downward. Note that this
2066 * function only adjusts the limit. The change must be applied
3c567b7d 2067 * using sata_set_spd().
1c3fae4d
TH
2068 *
2069 * LOCKING:
2070 * Inherited from caller.
2071 *
2072 * RETURNS:
2073 * 0 on success, negative errno on failure
2074 */
3c567b7d 2075int sata_down_spd_limit(struct ata_port *ap)
1c3fae4d 2076{
81952c54
TH
2077 u32 sstatus, spd, mask;
2078 int rc, highbit;
1c3fae4d 2079
81952c54
TH
2080 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
2081 if (rc)
2082 return rc;
1c3fae4d
TH
2083
2084 mask = ap->sata_spd_limit;
2085 if (mask <= 1)
2086 return -EINVAL;
2087 highbit = fls(mask) - 1;
2088 mask &= ~(1 << highbit);
2089
81952c54 2090 spd = (sstatus >> 4) & 0xf;
1c3fae4d
TH
2091 if (spd <= 1)
2092 return -EINVAL;
2093 spd--;
2094 mask &= (1 << spd) - 1;
2095 if (!mask)
2096 return -EINVAL;
2097
2098 ap->sata_spd_limit = mask;
2099
f15a1daf
TH
2100 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
2101 sata_spd_string(fls(mask)));
1c3fae4d
TH
2102
2103 return 0;
2104}
2105
3c567b7d 2106static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
1c3fae4d
TH
2107{
2108 u32 spd, limit;
2109
2110 if (ap->sata_spd_limit == UINT_MAX)
2111 limit = 0;
2112 else
2113 limit = fls(ap->sata_spd_limit);
2114
2115 spd = (*scontrol >> 4) & 0xf;
2116 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2117
2118 return spd != limit;
2119}
2120
2121/**
3c567b7d 2122 * sata_set_spd_needed - is SATA spd configuration needed
1c3fae4d
TH
2123 * @ap: Port in question
2124 *
2125 * Test whether the spd limit in SControl matches
2126 * @ap->sata_spd_limit. This function is used to determine
2127 * whether hardreset is necessary to apply SATA spd
2128 * configuration.
2129 *
2130 * LOCKING:
2131 * Inherited from caller.
2132 *
2133 * RETURNS:
2134 * 1 if SATA spd configuration is needed, 0 otherwise.
2135 */
3c567b7d 2136int sata_set_spd_needed(struct ata_port *ap)
1c3fae4d
TH
2137{
2138 u32 scontrol;
2139
81952c54 2140 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
1c3fae4d
TH
2141 return 0;
2142
3c567b7d 2143 return __sata_set_spd_needed(ap, &scontrol);
1c3fae4d
TH
2144}
2145
2146/**
3c567b7d 2147 * sata_set_spd - set SATA spd according to spd limit
1c3fae4d
TH
2148 * @ap: Port to set SATA spd for
2149 *
2150 * Set SATA spd of @ap according to sata_spd_limit.
2151 *
2152 * LOCKING:
2153 * Inherited from caller.
2154 *
2155 * RETURNS:
2156 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2157 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2158 */
3c567b7d 2159int sata_set_spd(struct ata_port *ap)
1c3fae4d
TH
2160{
2161 u32 scontrol;
81952c54 2162 int rc;
1c3fae4d 2163
81952c54
TH
2164 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2165 return rc;
1c3fae4d 2166
3c567b7d 2167 if (!__sata_set_spd_needed(ap, &scontrol))
1c3fae4d
TH
2168 return 0;
2169
81952c54
TH
2170 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2171 return rc;
2172
1c3fae4d
TH
2173 return 1;
2174}
2175
452503f9
AC
2176/*
2177 * This mode timing computation functionality is ported over from
2178 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2179 */
2180/*
b352e57d 2181 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2182 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2183 * for UDMA6, which is currently supported only by Maxtor drives.
2184 *
2185 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2186 */
2187
2188static const struct ata_timing ata_timing[] = {
2189
2190 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2191 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2192 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2193 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2194
b352e57d
AC
2195 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2196 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
452503f9
AC
2197 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2198 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2199 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2200
2201/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2e9edbf8 2202
452503f9
AC
2203 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2204 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2205 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2e9edbf8 2206
452503f9
AC
2207 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2208 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2209 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2210
b352e57d
AC
2211 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2212 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
452503f9
AC
2213 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2214 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2215
2216 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2217 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2218 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2219
2220/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2221
2222 { 0xFF }
2223};
2224
2225#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2226#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2227
2228static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2229{
2230 q->setup = EZ(t->setup * 1000, T);
2231 q->act8b = EZ(t->act8b * 1000, T);
2232 q->rec8b = EZ(t->rec8b * 1000, T);
2233 q->cyc8b = EZ(t->cyc8b * 1000, T);
2234 q->active = EZ(t->active * 1000, T);
2235 q->recover = EZ(t->recover * 1000, T);
2236 q->cycle = EZ(t->cycle * 1000, T);
2237 q->udma = EZ(t->udma * 1000, UT);
2238}
2239
2240void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2241 struct ata_timing *m, unsigned int what)
2242{
2243 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2244 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2245 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2246 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2247 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2248 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2249 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2250 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2251}
2252
2253static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2254{
2255 const struct ata_timing *t;
2256
2257 for (t = ata_timing; t->mode != speed; t++)
91190758 2258 if (t->mode == 0xFF)
452503f9 2259 return NULL;
2e9edbf8 2260 return t;
452503f9
AC
2261}
2262
2263int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2264 struct ata_timing *t, int T, int UT)
2265{
2266 const struct ata_timing *s;
2267 struct ata_timing p;
2268
2269 /*
2e9edbf8 2270 * Find the mode.
75b1f2f8 2271 */
452503f9
AC
2272
2273 if (!(s = ata_timing_find_mode(speed)))
2274 return -EINVAL;
2275
75b1f2f8
AL
2276 memcpy(t, s, sizeof(*s));
2277
452503f9
AC
2278 /*
2279 * If the drive is an EIDE drive, it can tell us it needs extended
2280 * PIO/MW_DMA cycle timing.
2281 */
2282
2283 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2284 memset(&p, 0, sizeof(p));
2285 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2286 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2287 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2288 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2289 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2290 }
2291 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2292 }
2293
2294 /*
2295 * Convert the timing to bus clock counts.
2296 */
2297
75b1f2f8 2298 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2299
2300 /*
c893a3ae
RD
2301 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2302 * S.M.A.R.T * and some other commands. We have to ensure that the
2303 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2304 */
2305
2306 if (speed > XFER_PIO_4) {
2307 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2308 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2309 }
2310
2311 /*
c893a3ae 2312 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2313 */
2314
2315 if (t->act8b + t->rec8b < t->cyc8b) {
2316 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2317 t->rec8b = t->cyc8b - t->act8b;
2318 }
2319
2320 if (t->active + t->recover < t->cycle) {
2321 t->active += (t->cycle - (t->active + t->recover)) / 2;
2322 t->recover = t->cycle - t->active;
2323 }
2324
2325 return 0;
2326}
2327
cf176e1a
TH
2328/**
2329 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a
TH
2330 * @dev: Device to adjust xfer masks
2331 * @force_pio0: Force PIO0
2332 *
2333 * Adjust xfer masks of @dev downward. Note that this function
2334 * does not apply the change. Invoking ata_set_mode() afterwards
2335 * will apply the limit.
2336 *
2337 * LOCKING:
2338 * Inherited from caller.
2339 *
2340 * RETURNS:
2341 * 0 on success, negative errno on failure
2342 */
3373efd8 2343int ata_down_xfermask_limit(struct ata_device *dev, int force_pio0)
cf176e1a
TH
2344{
2345 unsigned long xfer_mask;
2346 int highbit;
2347
2348 xfer_mask = ata_pack_xfermask(dev->pio_mask, dev->mwdma_mask,
2349 dev->udma_mask);
2350
2351 if (!xfer_mask)
2352 goto fail;
2353 /* don't gear down to MWDMA from UDMA, go directly to PIO */
2354 if (xfer_mask & ATA_MASK_UDMA)
2355 xfer_mask &= ~ATA_MASK_MWDMA;
2356
2357 highbit = fls(xfer_mask) - 1;
2358 xfer_mask &= ~(1 << highbit);
2359 if (force_pio0)
2360 xfer_mask &= 1 << ATA_SHIFT_PIO;
2361 if (!xfer_mask)
2362 goto fail;
2363
2364 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2365 &dev->udma_mask);
2366
f15a1daf
TH
2367 ata_dev_printk(dev, KERN_WARNING, "limiting speed to %s\n",
2368 ata_mode_string(xfer_mask));
cf176e1a
TH
2369
2370 return 0;
2371
2372 fail:
2373 return -EINVAL;
2374}
2375
3373efd8 2376static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 2377{
baa1e78a 2378 struct ata_eh_context *ehc = &dev->ap->eh_context;
83206a29
TH
2379 unsigned int err_mask;
2380 int rc;
1da177e4 2381
e8384607 2382 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
2383 if (dev->xfer_shift == ATA_SHIFT_PIO)
2384 dev->flags |= ATA_DFLAG_PIO;
2385
3373efd8 2386 err_mask = ata_dev_set_xfermode(dev);
83206a29 2387 if (err_mask) {
f15a1daf
TH
2388 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2389 "(err_mask=0x%x)\n", err_mask);
83206a29
TH
2390 return -EIO;
2391 }
1da177e4 2392
baa1e78a 2393 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3373efd8 2394 rc = ata_dev_revalidate(dev, 0);
baa1e78a 2395 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
5eb45c02 2396 if (rc)
83206a29 2397 return rc;
48a8a14f 2398
23e71c3d
TH
2399 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2400 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 2401
f15a1daf
TH
2402 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2403 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
83206a29 2404 return 0;
1da177e4
LT
2405}
2406
1da177e4
LT
2407/**
2408 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2409 * @ap: port on which timings will be programmed
e82cbdb9 2410 * @r_failed_dev: out paramter for failed device
1da177e4 2411 *
e82cbdb9
TH
2412 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2413 * ata_set_mode() fails, pointer to the failing device is
2414 * returned in @r_failed_dev.
780a87f7 2415 *
1da177e4 2416 * LOCKING:
0cba632b 2417 * PCI/etc. bus probe sem.
e82cbdb9
TH
2418 *
2419 * RETURNS:
2420 * 0 on success, negative errno otherwise
1da177e4 2421 */
1ad8e7f9 2422int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
1da177e4 2423{
e8e0619f 2424 struct ata_device *dev;
e82cbdb9 2425 int i, rc = 0, used_dma = 0, found = 0;
1da177e4 2426
3adcebb2
TH
2427 /* has private set_mode? */
2428 if (ap->ops->set_mode) {
2429 /* FIXME: make ->set_mode handle no device case and
2430 * return error code and failing device on failure.
2431 */
2432 for (i = 0; i < ATA_MAX_DEVICES; i++) {
02670bf3 2433 if (ata_dev_ready(&ap->device[i])) {
3adcebb2
TH
2434 ap->ops->set_mode(ap);
2435 break;
2436 }
2437 }
2438 return 0;
2439 }
2440
a6d5a51c
TH
2441 /* step 1: calculate xfer_mask */
2442 for (i = 0; i < ATA_MAX_DEVICES; i++) {
acf356b1 2443 unsigned int pio_mask, dma_mask;
a6d5a51c 2444
e8e0619f
TH
2445 dev = &ap->device[i];
2446
e1211e3f 2447 if (!ata_dev_enabled(dev))
a6d5a51c
TH
2448 continue;
2449
3373efd8 2450 ata_dev_xfermask(dev);
1da177e4 2451
acf356b1
TH
2452 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2453 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2454 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2455 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 2456
4f65977d 2457 found = 1;
5444a6f4
AC
2458 if (dev->dma_mode)
2459 used_dma = 1;
a6d5a51c 2460 }
4f65977d 2461 if (!found)
e82cbdb9 2462 goto out;
a6d5a51c
TH
2463
2464 /* step 2: always set host PIO timings */
e8e0619f
TH
2465 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2466 dev = &ap->device[i];
2467 if (!ata_dev_enabled(dev))
2468 continue;
2469
2470 if (!dev->pio_mode) {
f15a1daf 2471 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 2472 rc = -EINVAL;
e82cbdb9 2473 goto out;
e8e0619f
TH
2474 }
2475
2476 dev->xfer_mode = dev->pio_mode;
2477 dev->xfer_shift = ATA_SHIFT_PIO;
2478 if (ap->ops->set_piomode)
2479 ap->ops->set_piomode(ap, dev);
2480 }
1da177e4 2481
a6d5a51c 2482 /* step 3: set host DMA timings */
e8e0619f
TH
2483 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2484 dev = &ap->device[i];
2485
2486 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2487 continue;
2488
2489 dev->xfer_mode = dev->dma_mode;
2490 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2491 if (ap->ops->set_dmamode)
2492 ap->ops->set_dmamode(ap, dev);
2493 }
1da177e4
LT
2494
2495 /* step 4: update devices' xfer mode */
83206a29 2496 for (i = 0; i < ATA_MAX_DEVICES; i++) {
e8e0619f 2497 dev = &ap->device[i];
1da177e4 2498
02670bf3
TH
2499 /* don't udpate suspended devices' xfer mode */
2500 if (!ata_dev_ready(dev))
83206a29
TH
2501 continue;
2502
3373efd8 2503 rc = ata_dev_set_mode(dev);
5bbc53f4 2504 if (rc)
e82cbdb9 2505 goto out;
83206a29 2506 }
1da177e4 2507
e8e0619f
TH
2508 /* Record simplex status. If we selected DMA then the other
2509 * host channels are not permitted to do so.
5444a6f4 2510 */
cca3974e
JG
2511 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
2512 ap->host->simplex_claimed = 1;
5444a6f4 2513
e8e0619f 2514 /* step5: chip specific finalisation */
1da177e4
LT
2515 if (ap->ops->post_set_mode)
2516 ap->ops->post_set_mode(ap);
2517
e82cbdb9
TH
2518 out:
2519 if (rc)
2520 *r_failed_dev = dev;
2521 return rc;
1da177e4
LT
2522}
2523
1fdffbce
JG
2524/**
2525 * ata_tf_to_host - issue ATA taskfile to host controller
2526 * @ap: port to which command is being issued
2527 * @tf: ATA taskfile register set
2528 *
2529 * Issues ATA taskfile register set to ATA host controller,
2530 * with proper synchronization with interrupt handler and
2531 * other threads.
2532 *
2533 * LOCKING:
cca3974e 2534 * spin_lock_irqsave(host lock)
1fdffbce
JG
2535 */
2536
2537static inline void ata_tf_to_host(struct ata_port *ap,
2538 const struct ata_taskfile *tf)
2539{
2540 ap->ops->tf_load(ap, tf);
2541 ap->ops->exec_command(ap, tf);
2542}
2543
1da177e4
LT
2544/**
2545 * ata_busy_sleep - sleep until BSY clears, or timeout
2546 * @ap: port containing status register to be polled
2547 * @tmout_pat: impatience timeout
2548 * @tmout: overall timeout
2549 *
780a87f7
JG
2550 * Sleep until ATA Status register bit BSY clears,
2551 * or a timeout occurs.
2552 *
d1adc1bb
TH
2553 * LOCKING:
2554 * Kernel thread context (may sleep).
2555 *
2556 * RETURNS:
2557 * 0 on success, -errno otherwise.
1da177e4 2558 */
d1adc1bb
TH
2559int ata_busy_sleep(struct ata_port *ap,
2560 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
2561{
2562 unsigned long timer_start, timeout;
2563 u8 status;
2564
2565 status = ata_busy_wait(ap, ATA_BUSY, 300);
2566 timer_start = jiffies;
2567 timeout = timer_start + tmout_pat;
d1adc1bb
TH
2568 while (status != 0xff && (status & ATA_BUSY) &&
2569 time_before(jiffies, timeout)) {
1da177e4
LT
2570 msleep(50);
2571 status = ata_busy_wait(ap, ATA_BUSY, 3);
2572 }
2573
d1adc1bb 2574 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 2575 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
2576 "port is slow to respond, please be patient "
2577 "(Status 0x%x)\n", status);
1da177e4
LT
2578
2579 timeout = timer_start + tmout;
d1adc1bb
TH
2580 while (status != 0xff && (status & ATA_BUSY) &&
2581 time_before(jiffies, timeout)) {
1da177e4
LT
2582 msleep(50);
2583 status = ata_chk_status(ap);
2584 }
2585
d1adc1bb
TH
2586 if (status == 0xff)
2587 return -ENODEV;
2588
1da177e4 2589 if (status & ATA_BUSY) {
f15a1daf 2590 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
2591 "(%lu secs, Status 0x%x)\n",
2592 tmout / HZ, status);
d1adc1bb 2593 return -EBUSY;
1da177e4
LT
2594 }
2595
2596 return 0;
2597}
2598
2599static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
2600{
2601 struct ata_ioports *ioaddr = &ap->ioaddr;
2602 unsigned int dev0 = devmask & (1 << 0);
2603 unsigned int dev1 = devmask & (1 << 1);
2604 unsigned long timeout;
2605
2606 /* if device 0 was found in ata_devchk, wait for its
2607 * BSY bit to clear
2608 */
2609 if (dev0)
2610 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2611
2612 /* if device 1 was found in ata_devchk, wait for
2613 * register access, then wait for BSY to clear
2614 */
2615 timeout = jiffies + ATA_TMOUT_BOOT;
2616 while (dev1) {
2617 u8 nsect, lbal;
2618
2619 ap->ops->dev_select(ap, 1);
2620 if (ap->flags & ATA_FLAG_MMIO) {
2621 nsect = readb((void __iomem *) ioaddr->nsect_addr);
2622 lbal = readb((void __iomem *) ioaddr->lbal_addr);
2623 } else {
2624 nsect = inb(ioaddr->nsect_addr);
2625 lbal = inb(ioaddr->lbal_addr);
2626 }
2627 if ((nsect == 1) && (lbal == 1))
2628 break;
2629 if (time_after(jiffies, timeout)) {
2630 dev1 = 0;
2631 break;
2632 }
2633 msleep(50); /* give drive a breather */
2634 }
2635 if (dev1)
2636 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2637
2638 /* is all this really necessary? */
2639 ap->ops->dev_select(ap, 0);
2640 if (dev1)
2641 ap->ops->dev_select(ap, 1);
2642 if (dev0)
2643 ap->ops->dev_select(ap, 0);
2644}
2645
1da177e4
LT
2646static unsigned int ata_bus_softreset(struct ata_port *ap,
2647 unsigned int devmask)
2648{
2649 struct ata_ioports *ioaddr = &ap->ioaddr;
2650
2651 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
2652
2653 /* software reset. causes dev0 to be selected */
2654 if (ap->flags & ATA_FLAG_MMIO) {
2655 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2656 udelay(20); /* FIXME: flush */
2657 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
2658 udelay(20); /* FIXME: flush */
2659 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2660 } else {
2661 outb(ap->ctl, ioaddr->ctl_addr);
2662 udelay(10);
2663 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2664 udelay(10);
2665 outb(ap->ctl, ioaddr->ctl_addr);
2666 }
2667
2668 /* spec mandates ">= 2ms" before checking status.
2669 * We wait 150ms, because that was the magic delay used for
2670 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2671 * between when the ATA command register is written, and then
2672 * status is checked. Because waiting for "a while" before
2673 * checking status is fine, post SRST, we perform this magic
2674 * delay here as well.
09c7ad79
AC
2675 *
2676 * Old drivers/ide uses the 2mS rule and then waits for ready
1da177e4
LT
2677 */
2678 msleep(150);
2679
2e9edbf8 2680 /* Before we perform post reset processing we want to see if
298a41ca
TH
2681 * the bus shows 0xFF because the odd clown forgets the D7
2682 * pulldown resistor.
2683 */
d1adc1bb
TH
2684 if (ata_check_status(ap) == 0xFF)
2685 return 0;
09c7ad79 2686
1da177e4
LT
2687 ata_bus_post_reset(ap, devmask);
2688
2689 return 0;
2690}
2691
2692/**
2693 * ata_bus_reset - reset host port and associated ATA channel
2694 * @ap: port to reset
2695 *
2696 * This is typically the first time we actually start issuing
2697 * commands to the ATA channel. We wait for BSY to clear, then
2698 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2699 * result. Determine what devices, if any, are on the channel
2700 * by looking at the device 0/1 error register. Look at the signature
2701 * stored in each device's taskfile registers, to determine if
2702 * the device is ATA or ATAPI.
2703 *
2704 * LOCKING:
0cba632b 2705 * PCI/etc. bus probe sem.
cca3974e 2706 * Obtains host lock.
1da177e4
LT
2707 *
2708 * SIDE EFFECTS:
198e0fed 2709 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
2710 */
2711
2712void ata_bus_reset(struct ata_port *ap)
2713{
2714 struct ata_ioports *ioaddr = &ap->ioaddr;
2715 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2716 u8 err;
aec5c3c1 2717 unsigned int dev0, dev1 = 0, devmask = 0;
1da177e4
LT
2718
2719 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2720
2721 /* determine if device 0/1 are present */
2722 if (ap->flags & ATA_FLAG_SATA_RESET)
2723 dev0 = 1;
2724 else {
2725 dev0 = ata_devchk(ap, 0);
2726 if (slave_possible)
2727 dev1 = ata_devchk(ap, 1);
2728 }
2729
2730 if (dev0)
2731 devmask |= (1 << 0);
2732 if (dev1)
2733 devmask |= (1 << 1);
2734
2735 /* select device 0 again */
2736 ap->ops->dev_select(ap, 0);
2737
2738 /* issue bus reset */
2739 if (ap->flags & ATA_FLAG_SRST)
aec5c3c1
TH
2740 if (ata_bus_softreset(ap, devmask))
2741 goto err_out;
1da177e4
LT
2742
2743 /*
2744 * determine by signature whether we have ATA or ATAPI devices
2745 */
b4dc7623 2746 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
1da177e4 2747 if ((slave_possible) && (err != 0x81))
b4dc7623 2748 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
1da177e4
LT
2749
2750 /* re-enable interrupts */
2751 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
2752 ata_irq_on(ap);
2753
2754 /* is double-select really necessary? */
2755 if (ap->device[1].class != ATA_DEV_NONE)
2756 ap->ops->dev_select(ap, 1);
2757 if (ap->device[0].class != ATA_DEV_NONE)
2758 ap->ops->dev_select(ap, 0);
2759
2760 /* if no devices were detected, disable this port */
2761 if ((ap->device[0].class == ATA_DEV_NONE) &&
2762 (ap->device[1].class == ATA_DEV_NONE))
2763 goto err_out;
2764
2765 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2766 /* set up device control for ATA_FLAG_SATA_RESET */
2767 if (ap->flags & ATA_FLAG_MMIO)
2768 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
2769 else
2770 outb(ap->ctl, ioaddr->ctl_addr);
2771 }
2772
2773 DPRINTK("EXIT\n");
2774 return;
2775
2776err_out:
f15a1daf 2777 ata_port_printk(ap, KERN_ERR, "disabling port\n");
1da177e4
LT
2778 ap->ops->port_disable(ap);
2779
2780 DPRINTK("EXIT\n");
2781}
2782
d7bb4cc7
TH
2783/**
2784 * sata_phy_debounce - debounce SATA phy status
2785 * @ap: ATA port to debounce SATA phy status for
2786 * @params: timing parameters { interval, duratinon, timeout } in msec
2787 *
2788 * Make sure SStatus of @ap reaches stable state, determined by
2789 * holding the same value where DET is not 1 for @duration polled
2790 * every @interval, before @timeout. Timeout constraints the
2791 * beginning of the stable state. Because, after hot unplugging,
2792 * DET gets stuck at 1 on some controllers, this functions waits
2793 * until timeout then returns 0 if DET is stable at 1.
2794 *
2795 * LOCKING:
2796 * Kernel thread context (may sleep)
2797 *
2798 * RETURNS:
2799 * 0 on success, -errno on failure.
2800 */
2801int sata_phy_debounce(struct ata_port *ap, const unsigned long *params)
7a7921e8 2802{
d7bb4cc7
TH
2803 unsigned long interval_msec = params[0];
2804 unsigned long duration = params[1] * HZ / 1000;
2805 unsigned long timeout = jiffies + params[2] * HZ / 1000;
2806 unsigned long last_jiffies;
2807 u32 last, cur;
2808 int rc;
2809
2810 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2811 return rc;
2812 cur &= 0xf;
2813
2814 last = cur;
2815 last_jiffies = jiffies;
2816
2817 while (1) {
2818 msleep(interval_msec);
2819 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2820 return rc;
2821 cur &= 0xf;
2822
2823 /* DET stable? */
2824 if (cur == last) {
2825 if (cur == 1 && time_before(jiffies, timeout))
2826 continue;
2827 if (time_after(jiffies, last_jiffies + duration))
2828 return 0;
2829 continue;
2830 }
2831
2832 /* unstable, start over */
2833 last = cur;
2834 last_jiffies = jiffies;
2835
2836 /* check timeout */
2837 if (time_after(jiffies, timeout))
2838 return -EBUSY;
2839 }
2840}
2841
2842/**
2843 * sata_phy_resume - resume SATA phy
2844 * @ap: ATA port to resume SATA phy for
2845 * @params: timing parameters { interval, duratinon, timeout } in msec
2846 *
2847 * Resume SATA phy of @ap and debounce it.
2848 *
2849 * LOCKING:
2850 * Kernel thread context (may sleep)
2851 *
2852 * RETURNS:
2853 * 0 on success, -errno on failure.
2854 */
2855int sata_phy_resume(struct ata_port *ap, const unsigned long *params)
2856{
2857 u32 scontrol;
81952c54
TH
2858 int rc;
2859
2860 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2861 return rc;
7a7921e8 2862
852ee16a 2863 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54
TH
2864
2865 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2866 return rc;
7a7921e8 2867
d7bb4cc7
TH
2868 /* Some PHYs react badly if SStatus is pounded immediately
2869 * after resuming. Delay 200ms before debouncing.
2870 */
2871 msleep(200);
7a7921e8 2872
d7bb4cc7 2873 return sata_phy_debounce(ap, params);
7a7921e8
TH
2874}
2875
f5914a46
TH
2876static void ata_wait_spinup(struct ata_port *ap)
2877{
2878 struct ata_eh_context *ehc = &ap->eh_context;
2879 unsigned long end, secs;
2880 int rc;
2881
2882 /* first, debounce phy if SATA */
2883 if (ap->cbl == ATA_CBL_SATA) {
e9c83914 2884 rc = sata_phy_debounce(ap, sata_deb_timing_hotplug);
f5914a46
TH
2885
2886 /* if debounced successfully and offline, no need to wait */
2887 if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap))
2888 return;
2889 }
2890
2891 /* okay, let's give the drive time to spin up */
2892 end = ehc->i.hotplug_timestamp + ATA_SPINUP_WAIT * HZ / 1000;
2893 secs = ((end - jiffies) + HZ - 1) / HZ;
2894
2895 if (time_after(jiffies, end))
2896 return;
2897
2898 if (secs > 5)
2899 ata_port_printk(ap, KERN_INFO, "waiting for device to spin up "
2900 "(%lu secs)\n", secs);
2901
2902 schedule_timeout_uninterruptible(end - jiffies);
2903}
2904
2905/**
2906 * ata_std_prereset - prepare for reset
2907 * @ap: ATA port to be reset
2908 *
2909 * @ap is about to be reset. Initialize it.
2910 *
2911 * LOCKING:
2912 * Kernel thread context (may sleep)
2913 *
2914 * RETURNS:
2915 * 0 on success, -errno otherwise.
2916 */
2917int ata_std_prereset(struct ata_port *ap)
2918{
2919 struct ata_eh_context *ehc = &ap->eh_context;
e9c83914 2920 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
2921 int rc;
2922
28324304
TH
2923 /* handle link resume & hotplug spinup */
2924 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
2925 (ap->flags & ATA_FLAG_HRST_TO_RESUME))
2926 ehc->i.action |= ATA_EH_HARDRESET;
2927
2928 if ((ehc->i.flags & ATA_EHI_HOTPLUGGED) &&
2929 (ap->flags & ATA_FLAG_SKIP_D2H_BSY))
2930 ata_wait_spinup(ap);
f5914a46
TH
2931
2932 /* if we're about to do hardreset, nothing more to do */
2933 if (ehc->i.action & ATA_EH_HARDRESET)
2934 return 0;
2935
2936 /* if SATA, resume phy */
2937 if (ap->cbl == ATA_CBL_SATA) {
f5914a46
TH
2938 rc = sata_phy_resume(ap, timing);
2939 if (rc && rc != -EOPNOTSUPP) {
2940 /* phy resume failed */
2941 ata_port_printk(ap, KERN_WARNING, "failed to resume "
2942 "link for reset (errno=%d)\n", rc);
2943 return rc;
2944 }
2945 }
2946
2947 /* Wait for !BSY if the controller can wait for the first D2H
2948 * Reg FIS and we don't know that no device is attached.
2949 */
2950 if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap))
2951 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2952
2953 return 0;
2954}
2955
c2bd5804
TH
2956/**
2957 * ata_std_softreset - reset host port via ATA SRST
2958 * @ap: port to reset
c2bd5804
TH
2959 * @classes: resulting classes of attached devices
2960 *
52783c5d 2961 * Reset host port using ATA SRST.
c2bd5804
TH
2962 *
2963 * LOCKING:
2964 * Kernel thread context (may sleep)
2965 *
2966 * RETURNS:
2967 * 0 on success, -errno otherwise.
2968 */
2bf2cb26 2969int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
c2bd5804
TH
2970{
2971 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2972 unsigned int devmask = 0, err_mask;
2973 u8 err;
2974
2975 DPRINTK("ENTER\n");
2976
81952c54 2977 if (ata_port_offline(ap)) {
3a39746a
TH
2978 classes[0] = ATA_DEV_NONE;
2979 goto out;
2980 }
2981
c2bd5804
TH
2982 /* determine if device 0/1 are present */
2983 if (ata_devchk(ap, 0))
2984 devmask |= (1 << 0);
2985 if (slave_possible && ata_devchk(ap, 1))
2986 devmask |= (1 << 1);
2987
c2bd5804
TH
2988 /* select device 0 again */
2989 ap->ops->dev_select(ap, 0);
2990
2991 /* issue bus reset */
2992 DPRINTK("about to softreset, devmask=%x\n", devmask);
2993 err_mask = ata_bus_softreset(ap, devmask);
2994 if (err_mask) {
f15a1daf
TH
2995 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
2996 err_mask);
c2bd5804
TH
2997 return -EIO;
2998 }
2999
3000 /* determine by signature whether we have ATA or ATAPI devices */
3001 classes[0] = ata_dev_try_classify(ap, 0, &err);
3002 if (slave_possible && err != 0x81)
3003 classes[1] = ata_dev_try_classify(ap, 1, &err);
3004
3a39746a 3005 out:
c2bd5804
TH
3006 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3007 return 0;
3008}
3009
3010/**
b6103f6d 3011 * sata_port_hardreset - reset port via SATA phy reset
c2bd5804 3012 * @ap: port to reset
b6103f6d 3013 * @timing: timing parameters { interval, duratinon, timeout } in msec
c2bd5804
TH
3014 *
3015 * SATA phy-reset host port using DET bits of SControl register.
c2bd5804
TH
3016 *
3017 * LOCKING:
3018 * Kernel thread context (may sleep)
3019 *
3020 * RETURNS:
3021 * 0 on success, -errno otherwise.
3022 */
b6103f6d 3023int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing)
c2bd5804 3024{
852ee16a 3025 u32 scontrol;
81952c54 3026 int rc;
852ee16a 3027
c2bd5804
TH
3028 DPRINTK("ENTER\n");
3029
3c567b7d 3030 if (sata_set_spd_needed(ap)) {
1c3fae4d
TH
3031 /* SATA spec says nothing about how to reconfigure
3032 * spd. To be on the safe side, turn off phy during
3033 * reconfiguration. This works for at least ICH7 AHCI
3034 * and Sil3124.
3035 */
81952c54 3036 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
b6103f6d 3037 goto out;
81952c54 3038
a34b6fc0 3039 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54
TH
3040
3041 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
b6103f6d 3042 goto out;
1c3fae4d 3043
3c567b7d 3044 sata_set_spd(ap);
1c3fae4d
TH
3045 }
3046
3047 /* issue phy wake/reset */
81952c54 3048 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
b6103f6d 3049 goto out;
81952c54 3050
852ee16a 3051 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54
TH
3052
3053 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
b6103f6d 3054 goto out;
c2bd5804 3055
1c3fae4d 3056 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
3057 * 10.4.2 says at least 1 ms.
3058 */
3059 msleep(1);
3060
1c3fae4d 3061 /* bring phy back */
b6103f6d
TH
3062 rc = sata_phy_resume(ap, timing);
3063 out:
3064 DPRINTK("EXIT, rc=%d\n", rc);
3065 return rc;
3066}
3067
3068/**
3069 * sata_std_hardreset - reset host port via SATA phy reset
3070 * @ap: port to reset
3071 * @class: resulting class of attached device
3072 *
3073 * SATA phy-reset host port using DET bits of SControl register,
3074 * wait for !BSY and classify the attached device.
3075 *
3076 * LOCKING:
3077 * Kernel thread context (may sleep)
3078 *
3079 * RETURNS:
3080 * 0 on success, -errno otherwise.
3081 */
3082int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
3083{
3084 const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context);
3085 int rc;
3086
3087 DPRINTK("ENTER\n");
3088
3089 /* do hardreset */
3090 rc = sata_port_hardreset(ap, timing);
3091 if (rc) {
3092 ata_port_printk(ap, KERN_ERR,
3093 "COMRESET failed (errno=%d)\n", rc);
3094 return rc;
3095 }
c2bd5804 3096
c2bd5804 3097 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 3098 if (ata_port_offline(ap)) {
c2bd5804
TH
3099 *class = ATA_DEV_NONE;
3100 DPRINTK("EXIT, link offline\n");
3101 return 0;
3102 }
3103
3104 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
f15a1daf
TH
3105 ata_port_printk(ap, KERN_ERR,
3106 "COMRESET failed (device not ready)\n");
c2bd5804
TH
3107 return -EIO;
3108 }
3109
3a39746a
TH
3110 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3111
c2bd5804
TH
3112 *class = ata_dev_try_classify(ap, 0, NULL);
3113
3114 DPRINTK("EXIT, class=%u\n", *class);
3115 return 0;
3116}
3117
3118/**
3119 * ata_std_postreset - standard postreset callback
3120 * @ap: the target ata_port
3121 * @classes: classes of attached devices
3122 *
3123 * This function is invoked after a successful reset. Note that
3124 * the device might have been reset more than once using
3125 * different reset methods before postreset is invoked.
c2bd5804 3126 *
c2bd5804
TH
3127 * LOCKING:
3128 * Kernel thread context (may sleep)
3129 */
3130void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
3131{
dc2b3515
TH
3132 u32 serror;
3133
c2bd5804
TH
3134 DPRINTK("ENTER\n");
3135
c2bd5804 3136 /* print link status */
81952c54 3137 sata_print_link_status(ap);
c2bd5804 3138
dc2b3515
TH
3139 /* clear SError */
3140 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
3141 sata_scr_write(ap, SCR_ERROR, serror);
3142
3a39746a 3143 /* re-enable interrupts */
e3180499
TH
3144 if (!ap->ops->error_handler) {
3145 /* FIXME: hack. create a hook instead */
3146 if (ap->ioaddr.ctl_addr)
3147 ata_irq_on(ap);
3148 }
c2bd5804
TH
3149
3150 /* is double-select really necessary? */
3151 if (classes[0] != ATA_DEV_NONE)
3152 ap->ops->dev_select(ap, 1);
3153 if (classes[1] != ATA_DEV_NONE)
3154 ap->ops->dev_select(ap, 0);
3155
3a39746a
TH
3156 /* bail out if no device is present */
3157 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3158 DPRINTK("EXIT, no device\n");
3159 return;
3160 }
3161
3162 /* set up device control */
3163 if (ap->ioaddr.ctl_addr) {
3164 if (ap->flags & ATA_FLAG_MMIO)
3165 writeb(ap->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
3166 else
3167 outb(ap->ctl, ap->ioaddr.ctl_addr);
3168 }
c2bd5804
TH
3169
3170 DPRINTK("EXIT\n");
3171}
3172
623a3128
TH
3173/**
3174 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3175 * @dev: device to compare against
3176 * @new_class: class of the new device
3177 * @new_id: IDENTIFY page of the new device
3178 *
3179 * Compare @new_class and @new_id against @dev and determine
3180 * whether @dev is the device indicated by @new_class and
3181 * @new_id.
3182 *
3183 * LOCKING:
3184 * None.
3185 *
3186 * RETURNS:
3187 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3188 */
3373efd8
TH
3189static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3190 const u16 *new_id)
623a3128
TH
3191{
3192 const u16 *old_id = dev->id;
3193 unsigned char model[2][41], serial[2][21];
3194 u64 new_n_sectors;
3195
3196 if (dev->class != new_class) {
f15a1daf
TH
3197 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3198 dev->class, new_class);
623a3128
TH
3199 return 0;
3200 }
3201
3202 ata_id_c_string(old_id, model[0], ATA_ID_PROD_OFS, sizeof(model[0]));
3203 ata_id_c_string(new_id, model[1], ATA_ID_PROD_OFS, sizeof(model[1]));
3204 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO_OFS, sizeof(serial[0]));
3205 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO_OFS, sizeof(serial[1]));
3206 new_n_sectors = ata_id_n_sectors(new_id);
3207
3208 if (strcmp(model[0], model[1])) {
f15a1daf
TH
3209 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3210 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
3211 return 0;
3212 }
3213
3214 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
3215 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3216 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
3217 return 0;
3218 }
3219
3220 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
f15a1daf
TH
3221 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3222 "%llu != %llu\n",
3223 (unsigned long long)dev->n_sectors,
3224 (unsigned long long)new_n_sectors);
623a3128
TH
3225 return 0;
3226 }
3227
3228 return 1;
3229}
3230
3231/**
3232 * ata_dev_revalidate - Revalidate ATA device
623a3128 3233 * @dev: device to revalidate
bff04647 3234 * @readid_flags: read ID flags
623a3128
TH
3235 *
3236 * Re-read IDENTIFY page and make sure @dev is still attached to
3237 * the port.
3238 *
3239 * LOCKING:
3240 * Kernel thread context (may sleep)
3241 *
3242 * RETURNS:
3243 * 0 on success, negative errno otherwise
3244 */
bff04647 3245int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
623a3128 3246{
5eb45c02 3247 unsigned int class = dev->class;
f15a1daf 3248 u16 *id = (void *)dev->ap->sector_buf;
623a3128
TH
3249 int rc;
3250
5eb45c02
TH
3251 if (!ata_dev_enabled(dev)) {
3252 rc = -ENODEV;
3253 goto fail;
3254 }
623a3128 3255
fe635c7e 3256 /* read ID data */
bff04647 3257 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128
TH
3258 if (rc)
3259 goto fail;
3260
3261 /* is the device still there? */
3373efd8 3262 if (!ata_dev_same_device(dev, class, id)) {
623a3128
TH
3263 rc = -ENODEV;
3264 goto fail;
3265 }
3266
fe635c7e 3267 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
623a3128
TH
3268
3269 /* configure device according to the new ID */
efdaedc4 3270 rc = ata_dev_configure(dev);
5eb45c02
TH
3271 if (rc == 0)
3272 return 0;
623a3128
TH
3273
3274 fail:
f15a1daf 3275 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
3276 return rc;
3277}
3278
6919a0a6
AC
3279struct ata_blacklist_entry {
3280 const char *model_num;
3281 const char *model_rev;
3282 unsigned long horkage;
3283};
3284
3285static const struct ata_blacklist_entry ata_device_blacklist [] = {
3286 /* Devices with DMA related problems under Linux */
3287 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3288 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3289 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3290 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3291 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3292 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3293 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3294 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3295 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3296 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3297 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3298 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3299 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3300 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3301 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3302 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3303 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3304 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3305 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3306 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3307 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3308 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3309 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3310 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3311 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3312 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
3313 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3314 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3315 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3316 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
3317
3318 /* Devices we expect to fail diagnostics */
3319
3320 /* Devices where NCQ should be avoided */
3321 /* NCQ is slow */
3322 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
3323
3324 /* Devices with NCQ limits */
3325
3326 /* End Marker */
3327 { }
1da177e4 3328};
2e9edbf8 3329
f4b15fef
AC
3330static int ata_strim(char *s, size_t len)
3331{
3332 len = strnlen(s, len);
3333
3334 /* ATAPI specifies that empty space is blank-filled; remove blanks */
3335 while ((len > 0) && (s[len - 1] == ' ')) {
3336 len--;
3337 s[len] = 0;
3338 }
3339 return len;
3340}
1da177e4 3341
6919a0a6 3342unsigned long ata_device_blacklisted(const struct ata_device *dev)
1da177e4 3343{
f4b15fef
AC
3344 unsigned char model_num[40];
3345 unsigned char model_rev[16];
3346 unsigned int nlen, rlen;
6919a0a6 3347 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 3348
f4b15fef
AC
3349 ata_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
3350 sizeof(model_num));
3351 ata_id_string(dev->id, model_rev, ATA_ID_FW_REV_OFS,
3352 sizeof(model_rev));
3353 nlen = ata_strim(model_num, sizeof(model_num));
3354 rlen = ata_strim(model_rev, sizeof(model_rev));
1da177e4 3355
6919a0a6
AC
3356 while (ad->model_num) {
3357 if (!strncmp(ad->model_num, model_num, nlen)) {
3358 if (ad->model_rev == NULL)
3359 return ad->horkage;
3360 if (!strncmp(ad->model_rev, model_rev, rlen))
3361 return ad->horkage;
f4b15fef 3362 }
6919a0a6 3363 ad++;
f4b15fef 3364 }
1da177e4
LT
3365 return 0;
3366}
3367
6919a0a6
AC
3368static int ata_dma_blacklisted(const struct ata_device *dev)
3369{
3370 /* We don't support polling DMA.
3371 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3372 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3373 */
3374 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
3375 (dev->flags & ATA_DFLAG_CDB_INTR))
3376 return 1;
3377 return (ata_device_blacklisted(dev) & ATA_HORKAGE_NODMA) ? 1 : 0;
3378}
3379
a6d5a51c
TH
3380/**
3381 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
3382 * @dev: Device to compute xfermask for
3383 *
acf356b1
TH
3384 * Compute supported xfermask of @dev and store it in
3385 * dev->*_mask. This function is responsible for applying all
3386 * known limits including host controller limits, device
3387 * blacklist, etc...
a6d5a51c
TH
3388 *
3389 * LOCKING:
3390 * None.
a6d5a51c 3391 */
3373efd8 3392static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 3393{
3373efd8 3394 struct ata_port *ap = dev->ap;
cca3974e 3395 struct ata_host *host = ap->host;
a6d5a51c 3396 unsigned long xfer_mask;
1da177e4 3397
37deecb5 3398 /* controller modes available */
565083e1
TH
3399 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3400 ap->mwdma_mask, ap->udma_mask);
3401
3402 /* Apply cable rule here. Don't apply it early because when
3403 * we handle hot plug the cable type can itself change.
3404 */
3405 if (ap->cbl == ATA_CBL_PATA40)
3406 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
fc085150
AC
3407 /* Apply drive side cable rule. Unknown or 80 pin cables reported
3408 * host side are checked drive side as well. Cases where we know a
3409 * 40wire cable is used safely for 80 are not checked here.
3410 */
3411 if (ata_drive_40wire(dev->id) && (ap->cbl == ATA_CBL_PATA_UNK || ap->cbl == ATA_CBL_PATA80))
3412 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3413
1da177e4 3414
37deecb5
TH
3415 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3416 dev->mwdma_mask, dev->udma_mask);
3417 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 3418
b352e57d
AC
3419 /*
3420 * CFA Advanced TrueIDE timings are not allowed on a shared
3421 * cable
3422 */
3423 if (ata_dev_pair(dev)) {
3424 /* No PIO5 or PIO6 */
3425 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3426 /* No MWDMA3 or MWDMA 4 */
3427 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3428 }
3429
37deecb5
TH
3430 if (ata_dma_blacklisted(dev)) {
3431 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
3432 ata_dev_printk(dev, KERN_WARNING,
3433 "device is on DMA blacklist, disabling DMA\n");
37deecb5 3434 }
a6d5a51c 3435
cca3974e 3436 if ((host->flags & ATA_HOST_SIMPLEX) && host->simplex_claimed) {
37deecb5
TH
3437 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3438 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3439 "other device, disabling DMA\n");
5444a6f4 3440 }
565083e1 3441
5444a6f4
AC
3442 if (ap->ops->mode_filter)
3443 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
3444
565083e1
TH
3445 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3446 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
3447}
3448
1da177e4
LT
3449/**
3450 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
3451 * @dev: Device to which command will be sent
3452 *
780a87f7
JG
3453 * Issue SET FEATURES - XFER MODE command to device @dev
3454 * on port @ap.
3455 *
1da177e4 3456 * LOCKING:
0cba632b 3457 * PCI/etc. bus probe sem.
83206a29
TH
3458 *
3459 * RETURNS:
3460 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
3461 */
3462
3373efd8 3463static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 3464{
a0123703 3465 struct ata_taskfile tf;
83206a29 3466 unsigned int err_mask;
1da177e4
LT
3467
3468 /* set up set-features taskfile */
3469 DPRINTK("set features - xfer mode\n");
3470
3373efd8 3471 ata_tf_init(dev, &tf);
a0123703
TH
3472 tf.command = ATA_CMD_SET_FEATURES;
3473 tf.feature = SETFEATURES_XFER;
3474 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3475 tf.protocol = ATA_PROT_NODATA;
3476 tf.nsect = dev->xfer_mode;
1da177e4 3477
3373efd8 3478 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1da177e4 3479
83206a29
TH
3480 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3481 return err_mask;
1da177e4
LT
3482}
3483
8bf62ece
AL
3484/**
3485 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 3486 * @dev: Device to which command will be sent
e2a7f77a
RD
3487 * @heads: Number of heads (taskfile parameter)
3488 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
3489 *
3490 * LOCKING:
6aff8f1f
TH
3491 * Kernel thread context (may sleep)
3492 *
3493 * RETURNS:
3494 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 3495 */
3373efd8
TH
3496static unsigned int ata_dev_init_params(struct ata_device *dev,
3497 u16 heads, u16 sectors)
8bf62ece 3498{
a0123703 3499 struct ata_taskfile tf;
6aff8f1f 3500 unsigned int err_mask;
8bf62ece
AL
3501
3502 /* Number of sectors per track 1-255. Number of heads 1-16 */
3503 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 3504 return AC_ERR_INVALID;
8bf62ece
AL
3505
3506 /* set up init dev params taskfile */
3507 DPRINTK("init dev params \n");
3508
3373efd8 3509 ata_tf_init(dev, &tf);
a0123703
TH
3510 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3511 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3512 tf.protocol = ATA_PROT_NODATA;
3513 tf.nsect = sectors;
3514 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 3515
3373efd8 3516 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
8bf62ece 3517
6aff8f1f
TH
3518 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3519 return err_mask;
8bf62ece
AL
3520}
3521
1da177e4 3522/**
0cba632b
JG
3523 * ata_sg_clean - Unmap DMA memory associated with command
3524 * @qc: Command containing DMA memory to be released
3525 *
3526 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
3527 *
3528 * LOCKING:
cca3974e 3529 * spin_lock_irqsave(host lock)
1da177e4 3530 */
70e6ad0c 3531void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
3532{
3533 struct ata_port *ap = qc->ap;
cedc9a47 3534 struct scatterlist *sg = qc->__sg;
1da177e4 3535 int dir = qc->dma_dir;
cedc9a47 3536 void *pad_buf = NULL;
1da177e4 3537
a4631474
TH
3538 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
3539 WARN_ON(sg == NULL);
1da177e4
LT
3540
3541 if (qc->flags & ATA_QCFLAG_SINGLE)
f131883e 3542 WARN_ON(qc->n_elem > 1);
1da177e4 3543
2c13b7ce 3544 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 3545
cedc9a47
JG
3546 /* if we padded the buffer out to 32-bit bound, and data
3547 * xfer direction is from-device, we must copy from the
3548 * pad buffer back into the supplied buffer
3549 */
3550 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
3551 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3552
3553 if (qc->flags & ATA_QCFLAG_SG) {
e1410f2d 3554 if (qc->n_elem)
2f1f610b 3555 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
cedc9a47
JG
3556 /* restore last sg */
3557 sg[qc->orig_n_elem - 1].length += qc->pad_len;
3558 if (pad_buf) {
3559 struct scatterlist *psg = &qc->pad_sgent;
3560 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3561 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
dfa15988 3562 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
3563 }
3564 } else {
2e242fa9 3565 if (qc->n_elem)
2f1f610b 3566 dma_unmap_single(ap->dev,
e1410f2d
JG
3567 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
3568 dir);
cedc9a47
JG
3569 /* restore sg */
3570 sg->length += qc->pad_len;
3571 if (pad_buf)
3572 memcpy(qc->buf_virt + sg->length - qc->pad_len,
3573 pad_buf, qc->pad_len);
3574 }
1da177e4
LT
3575
3576 qc->flags &= ~ATA_QCFLAG_DMAMAP;
cedc9a47 3577 qc->__sg = NULL;
1da177e4
LT
3578}
3579
3580/**
3581 * ata_fill_sg - Fill PCI IDE PRD table
3582 * @qc: Metadata associated with taskfile to be transferred
3583 *
780a87f7
JG
3584 * Fill PCI IDE PRD (scatter-gather) table with segments
3585 * associated with the current disk command.
3586 *
1da177e4 3587 * LOCKING:
cca3974e 3588 * spin_lock_irqsave(host lock)
1da177e4
LT
3589 *
3590 */
3591static void ata_fill_sg(struct ata_queued_cmd *qc)
3592{
1da177e4 3593 struct ata_port *ap = qc->ap;
cedc9a47
JG
3594 struct scatterlist *sg;
3595 unsigned int idx;
1da177e4 3596
a4631474 3597 WARN_ON(qc->__sg == NULL);
f131883e 3598 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1da177e4
LT
3599
3600 idx = 0;
cedc9a47 3601 ata_for_each_sg(sg, qc) {
1da177e4
LT
3602 u32 addr, offset;
3603 u32 sg_len, len;
3604
3605 /* determine if physical DMA addr spans 64K boundary.
3606 * Note h/w doesn't support 64-bit, so we unconditionally
3607 * truncate dma_addr_t to u32.
3608 */
3609 addr = (u32) sg_dma_address(sg);
3610 sg_len = sg_dma_len(sg);
3611
3612 while (sg_len) {
3613 offset = addr & 0xffff;
3614 len = sg_len;
3615 if ((offset + sg_len) > 0x10000)
3616 len = 0x10000 - offset;
3617
3618 ap->prd[idx].addr = cpu_to_le32(addr);
3619 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
3620 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
3621
3622 idx++;
3623 sg_len -= len;
3624 addr += len;
3625 }
3626 }
3627
3628 if (idx)
3629 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
3630}
3631/**
3632 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
3633 * @qc: Metadata associated with taskfile to check
3634 *
780a87f7
JG
3635 * Allow low-level driver to filter ATA PACKET commands, returning
3636 * a status indicating whether or not it is OK to use DMA for the
3637 * supplied PACKET command.
3638 *
1da177e4 3639 * LOCKING:
cca3974e 3640 * spin_lock_irqsave(host lock)
0cba632b 3641 *
1da177e4
LT
3642 * RETURNS: 0 when ATAPI DMA can be used
3643 * nonzero otherwise
3644 */
3645int ata_check_atapi_dma(struct ata_queued_cmd *qc)
3646{
3647 struct ata_port *ap = qc->ap;
3648 int rc = 0; /* Assume ATAPI DMA is OK by default */
3649
3650 if (ap->ops->check_atapi_dma)
3651 rc = ap->ops->check_atapi_dma(qc);
3652
3653 return rc;
3654}
3655/**
3656 * ata_qc_prep - Prepare taskfile for submission
3657 * @qc: Metadata associated with taskfile to be prepared
3658 *
780a87f7
JG
3659 * Prepare ATA taskfile for submission.
3660 *
1da177e4 3661 * LOCKING:
cca3974e 3662 * spin_lock_irqsave(host lock)
1da177e4
LT
3663 */
3664void ata_qc_prep(struct ata_queued_cmd *qc)
3665{
3666 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
3667 return;
3668
3669 ata_fill_sg(qc);
3670}
3671
e46834cd
BK
3672void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
3673
0cba632b
JG
3674/**
3675 * ata_sg_init_one - Associate command with memory buffer
3676 * @qc: Command to be associated
3677 * @buf: Memory buffer
3678 * @buflen: Length of memory buffer, in bytes.
3679 *
3680 * Initialize the data-related elements of queued_cmd @qc
3681 * to point to a single memory buffer, @buf of byte length @buflen.
3682 *
3683 * LOCKING:
cca3974e 3684 * spin_lock_irqsave(host lock)
0cba632b
JG
3685 */
3686
1da177e4
LT
3687void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
3688{
1da177e4
LT
3689 qc->flags |= ATA_QCFLAG_SINGLE;
3690
cedc9a47 3691 qc->__sg = &qc->sgent;
1da177e4 3692 qc->n_elem = 1;
cedc9a47 3693 qc->orig_n_elem = 1;
1da177e4 3694 qc->buf_virt = buf;
233277ca 3695 qc->nbytes = buflen;
1da177e4 3696
61c0596c 3697 sg_init_one(&qc->sgent, buf, buflen);
1da177e4
LT
3698}
3699
0cba632b
JG
3700/**
3701 * ata_sg_init - Associate command with scatter-gather table.
3702 * @qc: Command to be associated
3703 * @sg: Scatter-gather table.
3704 * @n_elem: Number of elements in s/g table.
3705 *
3706 * Initialize the data-related elements of queued_cmd @qc
3707 * to point to a scatter-gather table @sg, containing @n_elem
3708 * elements.
3709 *
3710 * LOCKING:
cca3974e 3711 * spin_lock_irqsave(host lock)
0cba632b
JG
3712 */
3713
1da177e4
LT
3714void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
3715 unsigned int n_elem)
3716{
3717 qc->flags |= ATA_QCFLAG_SG;
cedc9a47 3718 qc->__sg = sg;
1da177e4 3719 qc->n_elem = n_elem;
cedc9a47 3720 qc->orig_n_elem = n_elem;
1da177e4
LT
3721}
3722
3723/**
0cba632b
JG
3724 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
3725 * @qc: Command with memory buffer to be mapped.
3726 *
3727 * DMA-map the memory buffer associated with queued_cmd @qc.
1da177e4
LT
3728 *
3729 * LOCKING:
cca3974e 3730 * spin_lock_irqsave(host lock)
1da177e4
LT
3731 *
3732 * RETURNS:
0cba632b 3733 * Zero on success, negative on error.
1da177e4
LT
3734 */
3735
3736static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3737{
3738 struct ata_port *ap = qc->ap;
3739 int dir = qc->dma_dir;
cedc9a47 3740 struct scatterlist *sg = qc->__sg;
1da177e4 3741 dma_addr_t dma_address;
2e242fa9 3742 int trim_sg = 0;
1da177e4 3743
cedc9a47
JG
3744 /* we must lengthen transfers to end on a 32-bit boundary */
3745 qc->pad_len = sg->length & 3;
3746 if (qc->pad_len) {
3747 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3748 struct scatterlist *psg = &qc->pad_sgent;
3749
a4631474 3750 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
3751
3752 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3753
3754 if (qc->tf.flags & ATA_TFLAG_WRITE)
3755 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3756 qc->pad_len);
3757
3758 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3759 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3760 /* trim sg */
3761 sg->length -= qc->pad_len;
2e242fa9
TH
3762 if (sg->length == 0)
3763 trim_sg = 1;
cedc9a47
JG
3764
3765 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3766 sg->length, qc->pad_len);
3767 }
3768
2e242fa9
TH
3769 if (trim_sg) {
3770 qc->n_elem--;
e1410f2d
JG
3771 goto skip_map;
3772 }
3773
2f1f610b 3774 dma_address = dma_map_single(ap->dev, qc->buf_virt,
32529e01 3775 sg->length, dir);
537a95d9
TH
3776 if (dma_mapping_error(dma_address)) {
3777 /* restore sg */
3778 sg->length += qc->pad_len;
1da177e4 3779 return -1;
537a95d9 3780 }
1da177e4
LT
3781
3782 sg_dma_address(sg) = dma_address;
32529e01 3783 sg_dma_len(sg) = sg->length;
1da177e4 3784
2e242fa9 3785skip_map:
1da177e4
LT
3786 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3787 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3788
3789 return 0;
3790}
3791
3792/**
0cba632b
JG
3793 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3794 * @qc: Command with scatter-gather table to be mapped.
3795 *
3796 * DMA-map the scatter-gather table associated with queued_cmd @qc.
1da177e4
LT
3797 *
3798 * LOCKING:
cca3974e 3799 * spin_lock_irqsave(host lock)
1da177e4
LT
3800 *
3801 * RETURNS:
0cba632b 3802 * Zero on success, negative on error.
1da177e4
LT
3803 *
3804 */
3805
3806static int ata_sg_setup(struct ata_queued_cmd *qc)
3807{
3808 struct ata_port *ap = qc->ap;
cedc9a47
JG
3809 struct scatterlist *sg = qc->__sg;
3810 struct scatterlist *lsg = &sg[qc->n_elem - 1];
e1410f2d 3811 int n_elem, pre_n_elem, dir, trim_sg = 0;
1da177e4
LT
3812
3813 VPRINTK("ENTER, ata%u\n", ap->id);
a4631474 3814 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
1da177e4 3815
cedc9a47
JG
3816 /* we must lengthen transfers to end on a 32-bit boundary */
3817 qc->pad_len = lsg->length & 3;
3818 if (qc->pad_len) {
3819 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3820 struct scatterlist *psg = &qc->pad_sgent;
3821 unsigned int offset;
3822
a4631474 3823 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
3824
3825 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3826
3827 /*
3828 * psg->page/offset are used to copy to-be-written
3829 * data in this function or read data in ata_sg_clean.
3830 */
3831 offset = lsg->offset + lsg->length - qc->pad_len;
3832 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3833 psg->offset = offset_in_page(offset);
3834
3835 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3836 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3837 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
dfa15988 3838 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
3839 }
3840
3841 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3842 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3843 /* trim last sg */
3844 lsg->length -= qc->pad_len;
e1410f2d
JG
3845 if (lsg->length == 0)
3846 trim_sg = 1;
cedc9a47
JG
3847
3848 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3849 qc->n_elem - 1, lsg->length, qc->pad_len);
3850 }
3851
e1410f2d
JG
3852 pre_n_elem = qc->n_elem;
3853 if (trim_sg && pre_n_elem)
3854 pre_n_elem--;
3855
3856 if (!pre_n_elem) {
3857 n_elem = 0;
3858 goto skip_map;
3859 }
3860
1da177e4 3861 dir = qc->dma_dir;
2f1f610b 3862 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
537a95d9
TH
3863 if (n_elem < 1) {
3864 /* restore last sg */
3865 lsg->length += qc->pad_len;
1da177e4 3866 return -1;
537a95d9 3867 }
1da177e4
LT
3868
3869 DPRINTK("%d sg elements mapped\n", n_elem);
3870
e1410f2d 3871skip_map:
1da177e4
LT
3872 qc->n_elem = n_elem;
3873
3874 return 0;
3875}
3876
0baab86b 3877/**
c893a3ae 3878 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
3879 * @buf: Buffer to swap
3880 * @buf_words: Number of 16-bit words in buffer.
3881 *
3882 * Swap halves of 16-bit words if needed to convert from
3883 * little-endian byte order to native cpu byte order, or
3884 * vice-versa.
3885 *
3886 * LOCKING:
6f0ef4fa 3887 * Inherited from caller.
0baab86b 3888 */
1da177e4
LT
3889void swap_buf_le16(u16 *buf, unsigned int buf_words)
3890{
3891#ifdef __BIG_ENDIAN
3892 unsigned int i;
3893
3894 for (i = 0; i < buf_words; i++)
3895 buf[i] = le16_to_cpu(buf[i]);
3896#endif /* __BIG_ENDIAN */
3897}
3898
6ae4cfb5
AL
3899/**
3900 * ata_mmio_data_xfer - Transfer data by MMIO
bf717b11 3901 * @adev: device for this I/O
6ae4cfb5
AL
3902 * @buf: data buffer
3903 * @buflen: buffer length
344babaa 3904 * @write_data: read/write
6ae4cfb5
AL
3905 *
3906 * Transfer data from/to the device data register by MMIO.
3907 *
3908 * LOCKING:
3909 * Inherited from caller.
6ae4cfb5
AL
3910 */
3911
88574551 3912void ata_mmio_data_xfer(struct ata_device *adev, unsigned char *buf,
a6b2c5d4 3913 unsigned int buflen, int write_data)
1da177e4 3914{
a6b2c5d4 3915 struct ata_port *ap = adev->ap;
1da177e4
LT
3916 unsigned int i;
3917 unsigned int words = buflen >> 1;
3918 u16 *buf16 = (u16 *) buf;
3919 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
3920
6ae4cfb5 3921 /* Transfer multiple of 2 bytes */
1da177e4
LT
3922 if (write_data) {
3923 for (i = 0; i < words; i++)
3924 writew(le16_to_cpu(buf16[i]), mmio);
3925 } else {
3926 for (i = 0; i < words; i++)
3927 buf16[i] = cpu_to_le16(readw(mmio));
3928 }
6ae4cfb5
AL
3929
3930 /* Transfer trailing 1 byte, if any. */
3931 if (unlikely(buflen & 0x01)) {
3932 u16 align_buf[1] = { 0 };
3933 unsigned char *trailing_buf = buf + buflen - 1;
3934
3935 if (write_data) {
3936 memcpy(align_buf, trailing_buf, 1);
3937 writew(le16_to_cpu(align_buf[0]), mmio);
3938 } else {
3939 align_buf[0] = cpu_to_le16(readw(mmio));
3940 memcpy(trailing_buf, align_buf, 1);
3941 }
3942 }
1da177e4
LT
3943}
3944
6ae4cfb5
AL
3945/**
3946 * ata_pio_data_xfer - Transfer data by PIO
a6b2c5d4 3947 * @adev: device to target
6ae4cfb5
AL
3948 * @buf: data buffer
3949 * @buflen: buffer length
344babaa 3950 * @write_data: read/write
6ae4cfb5
AL
3951 *
3952 * Transfer data from/to the device data register by PIO.
3953 *
3954 * LOCKING:
3955 * Inherited from caller.
6ae4cfb5
AL
3956 */
3957
88574551 3958void ata_pio_data_xfer(struct ata_device *adev, unsigned char *buf,
a6b2c5d4 3959 unsigned int buflen, int write_data)
1da177e4 3960{
a6b2c5d4 3961 struct ata_port *ap = adev->ap;
6ae4cfb5 3962 unsigned int words = buflen >> 1;
1da177e4 3963
6ae4cfb5 3964 /* Transfer multiple of 2 bytes */
1da177e4 3965 if (write_data)
6ae4cfb5 3966 outsw(ap->ioaddr.data_addr, buf, words);
1da177e4 3967 else
6ae4cfb5
AL
3968 insw(ap->ioaddr.data_addr, buf, words);
3969
3970 /* Transfer trailing 1 byte, if any. */
3971 if (unlikely(buflen & 0x01)) {
3972 u16 align_buf[1] = { 0 };
3973 unsigned char *trailing_buf = buf + buflen - 1;
3974
3975 if (write_data) {
3976 memcpy(align_buf, trailing_buf, 1);
3977 outw(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
3978 } else {
3979 align_buf[0] = cpu_to_le16(inw(ap->ioaddr.data_addr));
3980 memcpy(trailing_buf, align_buf, 1);
3981 }
3982 }
1da177e4
LT
3983}
3984
75e99585
AC
3985/**
3986 * ata_pio_data_xfer_noirq - Transfer data by PIO
3987 * @adev: device to target
3988 * @buf: data buffer
3989 * @buflen: buffer length
3990 * @write_data: read/write
3991 *
88574551 3992 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
3993 * transfer with interrupts disabled.
3994 *
3995 * LOCKING:
3996 * Inherited from caller.
3997 */
3998
3999void ata_pio_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
4000 unsigned int buflen, int write_data)
4001{
4002 unsigned long flags;
4003 local_irq_save(flags);
4004 ata_pio_data_xfer(adev, buf, buflen, write_data);
4005 local_irq_restore(flags);
4006}
4007
4008
6ae4cfb5
AL
4009/**
4010 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
4011 * @qc: Command on going
4012 *
4013 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
4014 *
4015 * LOCKING:
4016 * Inherited from caller.
4017 */
4018
1da177e4
LT
4019static void ata_pio_sector(struct ata_queued_cmd *qc)
4020{
4021 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4022 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4023 struct ata_port *ap = qc->ap;
4024 struct page *page;
4025 unsigned int offset;
4026 unsigned char *buf;
4027
4028 if (qc->cursect == (qc->nsect - 1))
14be71f4 4029 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4030
4031 page = sg[qc->cursg].page;
4032 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
4033
4034 /* get the current page and offset */
4035 page = nth_page(page, (offset >> PAGE_SHIFT));
4036 offset %= PAGE_SIZE;
4037
1da177e4
LT
4038 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4039
91b8b313
AL
4040 if (PageHighMem(page)) {
4041 unsigned long flags;
4042
a6b2c5d4 4043 /* FIXME: use a bounce buffer */
91b8b313
AL
4044 local_irq_save(flags);
4045 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4046
91b8b313 4047 /* do the actual data transfer */
a6b2c5d4 4048 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
1da177e4 4049
91b8b313
AL
4050 kunmap_atomic(buf, KM_IRQ0);
4051 local_irq_restore(flags);
4052 } else {
4053 buf = page_address(page);
a6b2c5d4 4054 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
91b8b313 4055 }
1da177e4
LT
4056
4057 qc->cursect++;
4058 qc->cursg_ofs++;
4059
32529e01 4060 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
1da177e4
LT
4061 qc->cursg++;
4062 qc->cursg_ofs = 0;
4063 }
1da177e4 4064}
1da177e4 4065
07f6f7d0
AL
4066/**
4067 * ata_pio_sectors - Transfer one or many 512-byte sectors.
4068 * @qc: Command on going
4069 *
c81e29b4 4070 * Transfer one or many ATA_SECT_SIZE of data from/to the
07f6f7d0
AL
4071 * ATA device for the DRQ request.
4072 *
4073 * LOCKING:
4074 * Inherited from caller.
4075 */
1da177e4 4076
07f6f7d0
AL
4077static void ata_pio_sectors(struct ata_queued_cmd *qc)
4078{
4079 if (is_multi_taskfile(&qc->tf)) {
4080 /* READ/WRITE MULTIPLE */
4081 unsigned int nsect;
4082
587005de 4083 WARN_ON(qc->dev->multi_count == 0);
1da177e4 4084
07f6f7d0
AL
4085 nsect = min(qc->nsect - qc->cursect, qc->dev->multi_count);
4086 while (nsect--)
4087 ata_pio_sector(qc);
4088 } else
4089 ata_pio_sector(qc);
4090}
4091
c71c1857
AL
4092/**
4093 * atapi_send_cdb - Write CDB bytes to hardware
4094 * @ap: Port to which ATAPI device is attached.
4095 * @qc: Taskfile currently active
4096 *
4097 * When device has indicated its readiness to accept
4098 * a CDB, this function is called. Send the CDB.
4099 *
4100 * LOCKING:
4101 * caller.
4102 */
4103
4104static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4105{
4106 /* send SCSI cdb */
4107 DPRINTK("send cdb\n");
db024d53 4108 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 4109
a6b2c5d4 4110 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
4111 ata_altstatus(ap); /* flush */
4112
4113 switch (qc->tf.protocol) {
4114 case ATA_PROT_ATAPI:
4115 ap->hsm_task_state = HSM_ST;
4116 break;
4117 case ATA_PROT_ATAPI_NODATA:
4118 ap->hsm_task_state = HSM_ST_LAST;
4119 break;
4120 case ATA_PROT_ATAPI_DMA:
4121 ap->hsm_task_state = HSM_ST_LAST;
4122 /* initiate bmdma */
4123 ap->ops->bmdma_start(qc);
4124 break;
4125 }
1da177e4
LT
4126}
4127
6ae4cfb5
AL
4128/**
4129 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
4130 * @qc: Command on going
4131 * @bytes: number of bytes
4132 *
4133 * Transfer Transfer data from/to the ATAPI device.
4134 *
4135 * LOCKING:
4136 * Inherited from caller.
4137 *
4138 */
4139
1da177e4
LT
4140static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4141{
4142 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4143 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4144 struct ata_port *ap = qc->ap;
4145 struct page *page;
4146 unsigned char *buf;
4147 unsigned int offset, count;
4148
563a6e1f 4149 if (qc->curbytes + bytes >= qc->nbytes)
14be71f4 4150 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4151
4152next_sg:
563a6e1f 4153 if (unlikely(qc->cursg >= qc->n_elem)) {
7fb6ec28 4154 /*
563a6e1f
AL
4155 * The end of qc->sg is reached and the device expects
4156 * more data to transfer. In order not to overrun qc->sg
4157 * and fulfill length specified in the byte count register,
4158 * - for read case, discard trailing data from the device
4159 * - for write case, padding zero data to the device
4160 */
4161 u16 pad_buf[1] = { 0 };
4162 unsigned int words = bytes >> 1;
4163 unsigned int i;
4164
4165 if (words) /* warning if bytes > 1 */
f15a1daf
TH
4166 ata_dev_printk(qc->dev, KERN_WARNING,
4167 "%u bytes trailing data\n", bytes);
563a6e1f
AL
4168
4169 for (i = 0; i < words; i++)
a6b2c5d4 4170 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
563a6e1f 4171
14be71f4 4172 ap->hsm_task_state = HSM_ST_LAST;
563a6e1f
AL
4173 return;
4174 }
4175
cedc9a47 4176 sg = &qc->__sg[qc->cursg];
1da177e4 4177
1da177e4
LT
4178 page = sg->page;
4179 offset = sg->offset + qc->cursg_ofs;
4180
4181 /* get the current page and offset */
4182 page = nth_page(page, (offset >> PAGE_SHIFT));
4183 offset %= PAGE_SIZE;
4184
6952df03 4185 /* don't overrun current sg */
32529e01 4186 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
4187
4188 /* don't cross page boundaries */
4189 count = min(count, (unsigned int)PAGE_SIZE - offset);
4190
7282aa4b
AL
4191 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4192
91b8b313
AL
4193 if (PageHighMem(page)) {
4194 unsigned long flags;
4195
a6b2c5d4 4196 /* FIXME: use bounce buffer */
91b8b313
AL
4197 local_irq_save(flags);
4198 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4199
91b8b313 4200 /* do the actual data transfer */
a6b2c5d4 4201 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
7282aa4b 4202
91b8b313
AL
4203 kunmap_atomic(buf, KM_IRQ0);
4204 local_irq_restore(flags);
4205 } else {
4206 buf = page_address(page);
a6b2c5d4 4207 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
91b8b313 4208 }
1da177e4
LT
4209
4210 bytes -= count;
4211 qc->curbytes += count;
4212 qc->cursg_ofs += count;
4213
32529e01 4214 if (qc->cursg_ofs == sg->length) {
1da177e4
LT
4215 qc->cursg++;
4216 qc->cursg_ofs = 0;
4217 }
4218
563a6e1f 4219 if (bytes)
1da177e4 4220 goto next_sg;
1da177e4
LT
4221}
4222
6ae4cfb5
AL
4223/**
4224 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
4225 * @qc: Command on going
4226 *
4227 * Transfer Transfer data from/to the ATAPI device.
4228 *
4229 * LOCKING:
4230 * Inherited from caller.
6ae4cfb5
AL
4231 */
4232
1da177e4
LT
4233static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4234{
4235 struct ata_port *ap = qc->ap;
4236 struct ata_device *dev = qc->dev;
4237 unsigned int ireason, bc_lo, bc_hi, bytes;
4238 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4239
eec4c3f3
AL
4240 /* Abuse qc->result_tf for temp storage of intermediate TF
4241 * here to save some kernel stack usage.
4242 * For normal completion, qc->result_tf is not relevant. For
4243 * error, qc->result_tf is later overwritten by ata_qc_complete().
4244 * So, the correctness of qc->result_tf is not affected.
4245 */
4246 ap->ops->tf_read(ap, &qc->result_tf);
4247 ireason = qc->result_tf.nsect;
4248 bc_lo = qc->result_tf.lbam;
4249 bc_hi = qc->result_tf.lbah;
1da177e4
LT
4250 bytes = (bc_hi << 8) | bc_lo;
4251
4252 /* shall be cleared to zero, indicating xfer of data */
4253 if (ireason & (1 << 0))
4254 goto err_out;
4255
4256 /* make sure transfer direction matches expected */
4257 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4258 if (do_write != i_write)
4259 goto err_out;
4260
312f7da2
AL
4261 VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
4262
1da177e4
LT
4263 __atapi_pio_bytes(qc, bytes);
4264
4265 return;
4266
4267err_out:
f15a1daf 4268 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
11a56d24 4269 qc->err_mask |= AC_ERR_HSM;
14be71f4 4270 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
4271}
4272
4273/**
c234fb00
AL
4274 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4275 * @ap: the target ata_port
4276 * @qc: qc on going
1da177e4 4277 *
c234fb00
AL
4278 * RETURNS:
4279 * 1 if ok in workqueue, 0 otherwise.
1da177e4 4280 */
c234fb00
AL
4281
4282static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 4283{
c234fb00
AL
4284 if (qc->tf.flags & ATA_TFLAG_POLLING)
4285 return 1;
1da177e4 4286
c234fb00
AL
4287 if (ap->hsm_task_state == HSM_ST_FIRST) {
4288 if (qc->tf.protocol == ATA_PROT_PIO &&
4289 (qc->tf.flags & ATA_TFLAG_WRITE))
4290 return 1;
1da177e4 4291
c234fb00
AL
4292 if (is_atapi_taskfile(&qc->tf) &&
4293 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4294 return 1;
fe79e683
AL
4295 }
4296
c234fb00
AL
4297 return 0;
4298}
1da177e4 4299
c17ea20d
TH
4300/**
4301 * ata_hsm_qc_complete - finish a qc running on standard HSM
4302 * @qc: Command to complete
4303 * @in_wq: 1 if called from workqueue, 0 otherwise
4304 *
4305 * Finish @qc which is running on standard HSM.
4306 *
4307 * LOCKING:
cca3974e 4308 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
4309 * Otherwise, none on entry and grabs host lock.
4310 */
4311static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4312{
4313 struct ata_port *ap = qc->ap;
4314 unsigned long flags;
4315
4316 if (ap->ops->error_handler) {
4317 if (in_wq) {
ba6a1308 4318 spin_lock_irqsave(ap->lock, flags);
c17ea20d 4319
cca3974e
JG
4320 /* EH might have kicked in while host lock is
4321 * released.
c17ea20d
TH
4322 */
4323 qc = ata_qc_from_tag(ap, qc->tag);
4324 if (qc) {
4325 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
4326 ata_irq_on(ap);
4327 ata_qc_complete(qc);
4328 } else
4329 ata_port_freeze(ap);
4330 }
4331
ba6a1308 4332 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4333 } else {
4334 if (likely(!(qc->err_mask & AC_ERR_HSM)))
4335 ata_qc_complete(qc);
4336 else
4337 ata_port_freeze(ap);
4338 }
4339 } else {
4340 if (in_wq) {
ba6a1308 4341 spin_lock_irqsave(ap->lock, flags);
c17ea20d
TH
4342 ata_irq_on(ap);
4343 ata_qc_complete(qc);
ba6a1308 4344 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4345 } else
4346 ata_qc_complete(qc);
4347 }
1da177e4 4348
c81e29b4 4349 ata_altstatus(ap); /* flush */
c17ea20d
TH
4350}
4351
bb5cb290
AL
4352/**
4353 * ata_hsm_move - move the HSM to the next state.
4354 * @ap: the target ata_port
4355 * @qc: qc on going
4356 * @status: current device status
4357 * @in_wq: 1 if called from workqueue, 0 otherwise
4358 *
4359 * RETURNS:
4360 * 1 when poll next status needed, 0 otherwise.
4361 */
9a1004d0
TH
4362int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4363 u8 status, int in_wq)
e2cec771 4364{
bb5cb290
AL
4365 unsigned long flags = 0;
4366 int poll_next;
4367
6912ccd5
AL
4368 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4369
bb5cb290
AL
4370 /* Make sure ata_qc_issue_prot() does not throw things
4371 * like DMA polling into the workqueue. Notice that
4372 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4373 */
c234fb00 4374 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 4375
e2cec771 4376fsm_start:
999bb6f4
AL
4377 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
4378 ap->id, qc->tf.protocol, ap->hsm_task_state, status);
4379
e2cec771
AL
4380 switch (ap->hsm_task_state) {
4381 case HSM_ST_FIRST:
bb5cb290
AL
4382 /* Send first data block or PACKET CDB */
4383
4384 /* If polling, we will stay in the work queue after
4385 * sending the data. Otherwise, interrupt handler
4386 * takes over after sending the data.
4387 */
4388 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4389
e2cec771 4390 /* check device status */
3655d1d3
AL
4391 if (unlikely((status & ATA_DRQ) == 0)) {
4392 /* handle BSY=0, DRQ=0 as error */
4393 if (likely(status & (ATA_ERR | ATA_DF)))
4394 /* device stops HSM for abort/error */
4395 qc->err_mask |= AC_ERR_DEV;
4396 else
4397 /* HSM violation. Let EH handle this */
4398 qc->err_mask |= AC_ERR_HSM;
4399
14be71f4 4400 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 4401 goto fsm_start;
1da177e4
LT
4402 }
4403
71601958
AL
4404 /* Device should not ask for data transfer (DRQ=1)
4405 * when it finds something wrong.
eee6c32f
AL
4406 * We ignore DRQ here and stop the HSM by
4407 * changing hsm_task_state to HSM_ST_ERR and
4408 * let the EH abort the command or reset the device.
71601958
AL
4409 */
4410 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4411 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4412 ap->id, status);
3655d1d3 4413 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4414 ap->hsm_task_state = HSM_ST_ERR;
4415 goto fsm_start;
71601958 4416 }
1da177e4 4417
bb5cb290
AL
4418 /* Send the CDB (atapi) or the first data block (ata pio out).
4419 * During the state transition, interrupt handler shouldn't
4420 * be invoked before the data transfer is complete and
4421 * hsm_task_state is changed. Hence, the following locking.
4422 */
4423 if (in_wq)
ba6a1308 4424 spin_lock_irqsave(ap->lock, flags);
1da177e4 4425
bb5cb290
AL
4426 if (qc->tf.protocol == ATA_PROT_PIO) {
4427 /* PIO data out protocol.
4428 * send first data block.
4429 */
0565c26d 4430
bb5cb290
AL
4431 /* ata_pio_sectors() might change the state
4432 * to HSM_ST_LAST. so, the state is changed here
4433 * before ata_pio_sectors().
4434 */
4435 ap->hsm_task_state = HSM_ST;
4436 ata_pio_sectors(qc);
4437 ata_altstatus(ap); /* flush */
4438 } else
4439 /* send CDB */
4440 atapi_send_cdb(ap, qc);
4441
4442 if (in_wq)
ba6a1308 4443 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
4444
4445 /* if polling, ata_pio_task() handles the rest.
4446 * otherwise, interrupt handler takes over from here.
4447 */
e2cec771 4448 break;
1c848984 4449
e2cec771
AL
4450 case HSM_ST:
4451 /* complete command or read/write the data register */
4452 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4453 /* ATAPI PIO protocol */
4454 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
4455 /* No more data to transfer or device error.
4456 * Device error will be tagged in HSM_ST_LAST.
4457 */
e2cec771
AL
4458 ap->hsm_task_state = HSM_ST_LAST;
4459 goto fsm_start;
4460 }
1da177e4 4461
71601958
AL
4462 /* Device should not ask for data transfer (DRQ=1)
4463 * when it finds something wrong.
eee6c32f
AL
4464 * We ignore DRQ here and stop the HSM by
4465 * changing hsm_task_state to HSM_ST_ERR and
4466 * let the EH abort the command or reset the device.
71601958
AL
4467 */
4468 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4469 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4470 ap->id, status);
3655d1d3 4471 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4472 ap->hsm_task_state = HSM_ST_ERR;
4473 goto fsm_start;
71601958 4474 }
1da177e4 4475
e2cec771 4476 atapi_pio_bytes(qc);
7fb6ec28 4477
e2cec771
AL
4478 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4479 /* bad ireason reported by device */
4480 goto fsm_start;
1da177e4 4481
e2cec771
AL
4482 } else {
4483 /* ATA PIO protocol */
4484 if (unlikely((status & ATA_DRQ) == 0)) {
4485 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
4486 if (likely(status & (ATA_ERR | ATA_DF)))
4487 /* device stops HSM for abort/error */
4488 qc->err_mask |= AC_ERR_DEV;
4489 else
55a8e2c8
TH
4490 /* HSM violation. Let EH handle this.
4491 * Phantom devices also trigger this
4492 * condition. Mark hint.
4493 */
4494 qc->err_mask |= AC_ERR_HSM |
4495 AC_ERR_NODEV_HINT;
3655d1d3 4496
e2cec771
AL
4497 ap->hsm_task_state = HSM_ST_ERR;
4498 goto fsm_start;
4499 }
1da177e4 4500
eee6c32f
AL
4501 /* For PIO reads, some devices may ask for
4502 * data transfer (DRQ=1) alone with ERR=1.
4503 * We respect DRQ here and transfer one
4504 * block of junk data before changing the
4505 * hsm_task_state to HSM_ST_ERR.
4506 *
4507 * For PIO writes, ERR=1 DRQ=1 doesn't make
4508 * sense since the data block has been
4509 * transferred to the device.
71601958
AL
4510 */
4511 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
4512 /* data might be corrputed */
4513 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
4514
4515 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4516 ata_pio_sectors(qc);
4517 ata_altstatus(ap);
4518 status = ata_wait_idle(ap);
4519 }
4520
3655d1d3
AL
4521 if (status & (ATA_BUSY | ATA_DRQ))
4522 qc->err_mask |= AC_ERR_HSM;
4523
eee6c32f
AL
4524 /* ata_pio_sectors() might change the
4525 * state to HSM_ST_LAST. so, the state
4526 * is changed after ata_pio_sectors().
4527 */
4528 ap->hsm_task_state = HSM_ST_ERR;
4529 goto fsm_start;
71601958
AL
4530 }
4531
e2cec771
AL
4532 ata_pio_sectors(qc);
4533
4534 if (ap->hsm_task_state == HSM_ST_LAST &&
4535 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4536 /* all data read */
4537 ata_altstatus(ap);
52a32205 4538 status = ata_wait_idle(ap);
e2cec771
AL
4539 goto fsm_start;
4540 }
4541 }
4542
4543 ata_altstatus(ap); /* flush */
bb5cb290 4544 poll_next = 1;
1da177e4
LT
4545 break;
4546
14be71f4 4547 case HSM_ST_LAST:
6912ccd5
AL
4548 if (unlikely(!ata_ok(status))) {
4549 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
4550 ap->hsm_task_state = HSM_ST_ERR;
4551 goto fsm_start;
4552 }
4553
4554 /* no more data to transfer */
4332a771
AL
4555 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
4556 ap->id, qc->dev->devno, status);
e2cec771 4557
6912ccd5
AL
4558 WARN_ON(qc->err_mask);
4559
e2cec771 4560 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 4561
e2cec771 4562 /* complete taskfile transaction */
c17ea20d 4563 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
4564
4565 poll_next = 0;
1da177e4
LT
4566 break;
4567
14be71f4 4568 case HSM_ST_ERR:
e2cec771
AL
4569 /* make sure qc->err_mask is available to
4570 * know what's wrong and recover
4571 */
4572 WARN_ON(qc->err_mask == 0);
4573
4574 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 4575
999bb6f4 4576 /* complete taskfile transaction */
c17ea20d 4577 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
4578
4579 poll_next = 0;
e2cec771
AL
4580 break;
4581 default:
bb5cb290 4582 poll_next = 0;
6912ccd5 4583 BUG();
1da177e4
LT
4584 }
4585
bb5cb290 4586 return poll_next;
1da177e4
LT
4587}
4588
65f27f38 4589static void ata_pio_task(struct work_struct *work)
8061f5f0 4590{
65f27f38
DH
4591 struct ata_port *ap =
4592 container_of(work, struct ata_port, port_task.work);
4593 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 4594 u8 status;
a1af3734 4595 int poll_next;
8061f5f0 4596
7fb6ec28 4597fsm_start:
a1af3734 4598 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 4599
a1af3734
AL
4600 /*
4601 * This is purely heuristic. This is a fast path.
4602 * Sometimes when we enter, BSY will be cleared in
4603 * a chk-status or two. If not, the drive is probably seeking
4604 * or something. Snooze for a couple msecs, then
4605 * chk-status again. If still busy, queue delayed work.
4606 */
4607 status = ata_busy_wait(ap, ATA_BUSY, 5);
4608 if (status & ATA_BUSY) {
4609 msleep(2);
4610 status = ata_busy_wait(ap, ATA_BUSY, 10);
4611 if (status & ATA_BUSY) {
31ce6dae 4612 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
a1af3734
AL
4613 return;
4614 }
8061f5f0
TH
4615 }
4616
a1af3734
AL
4617 /* move the HSM */
4618 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 4619
a1af3734
AL
4620 /* another command or interrupt handler
4621 * may be running at this point.
4622 */
4623 if (poll_next)
7fb6ec28 4624 goto fsm_start;
8061f5f0
TH
4625}
4626
1da177e4
LT
4627/**
4628 * ata_qc_new - Request an available ATA command, for queueing
4629 * @ap: Port associated with device @dev
4630 * @dev: Device from whom we request an available command structure
4631 *
4632 * LOCKING:
0cba632b 4633 * None.
1da177e4
LT
4634 */
4635
4636static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4637{
4638 struct ata_queued_cmd *qc = NULL;
4639 unsigned int i;
4640
e3180499 4641 /* no command while frozen */
b51e9e5d 4642 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
4643 return NULL;
4644
2ab7db1f
TH
4645 /* the last tag is reserved for internal command. */
4646 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 4647 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 4648 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
4649 break;
4650 }
4651
4652 if (qc)
4653 qc->tag = i;
4654
4655 return qc;
4656}
4657
4658/**
4659 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
4660 * @dev: Device from whom we request an available command structure
4661 *
4662 * LOCKING:
0cba632b 4663 * None.
1da177e4
LT
4664 */
4665
3373efd8 4666struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 4667{
3373efd8 4668 struct ata_port *ap = dev->ap;
1da177e4
LT
4669 struct ata_queued_cmd *qc;
4670
4671 qc = ata_qc_new(ap);
4672 if (qc) {
1da177e4
LT
4673 qc->scsicmd = NULL;
4674 qc->ap = ap;
4675 qc->dev = dev;
1da177e4 4676
2c13b7ce 4677 ata_qc_reinit(qc);
1da177e4
LT
4678 }
4679
4680 return qc;
4681}
4682
1da177e4
LT
4683/**
4684 * ata_qc_free - free unused ata_queued_cmd
4685 * @qc: Command to complete
4686 *
4687 * Designed to free unused ata_queued_cmd object
4688 * in case something prevents using it.
4689 *
4690 * LOCKING:
cca3974e 4691 * spin_lock_irqsave(host lock)
1da177e4
LT
4692 */
4693void ata_qc_free(struct ata_queued_cmd *qc)
4694{
4ba946e9
TH
4695 struct ata_port *ap = qc->ap;
4696 unsigned int tag;
4697
a4631474 4698 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 4699
4ba946e9
TH
4700 qc->flags = 0;
4701 tag = qc->tag;
4702 if (likely(ata_tag_valid(tag))) {
4ba946e9 4703 qc->tag = ATA_TAG_POISON;
6cec4a39 4704 clear_bit(tag, &ap->qc_allocated);
4ba946e9 4705 }
1da177e4
LT
4706}
4707
76014427 4708void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 4709{
dedaf2b0
TH
4710 struct ata_port *ap = qc->ap;
4711
a4631474
TH
4712 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4713 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
4714
4715 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4716 ata_sg_clean(qc);
4717
7401abf2 4718 /* command should be marked inactive atomically with qc completion */
dedaf2b0
TH
4719 if (qc->tf.protocol == ATA_PROT_NCQ)
4720 ap->sactive &= ~(1 << qc->tag);
4721 else
4722 ap->active_tag = ATA_TAG_POISON;
7401abf2 4723
3f3791d3
AL
4724 /* atapi: mark qc as inactive to prevent the interrupt handler
4725 * from completing the command twice later, before the error handler
4726 * is called. (when rc != 0 and atapi request sense is needed)
4727 */
4728 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 4729 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 4730
1da177e4 4731 /* call completion callback */
77853bf2 4732 qc->complete_fn(qc);
1da177e4
LT
4733}
4734
39599a53
TH
4735static void fill_result_tf(struct ata_queued_cmd *qc)
4736{
4737 struct ata_port *ap = qc->ap;
4738
4739 ap->ops->tf_read(ap, &qc->result_tf);
4740 qc->result_tf.flags = qc->tf.flags;
4741}
4742
f686bcb8
TH
4743/**
4744 * ata_qc_complete - Complete an active ATA command
4745 * @qc: Command to complete
4746 * @err_mask: ATA Status register contents
4747 *
4748 * Indicate to the mid and upper layers that an ATA
4749 * command has completed, with either an ok or not-ok status.
4750 *
4751 * LOCKING:
cca3974e 4752 * spin_lock_irqsave(host lock)
f686bcb8
TH
4753 */
4754void ata_qc_complete(struct ata_queued_cmd *qc)
4755{
4756 struct ata_port *ap = qc->ap;
4757
4758 /* XXX: New EH and old EH use different mechanisms to
4759 * synchronize EH with regular execution path.
4760 *
4761 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4762 * Normal execution path is responsible for not accessing a
4763 * failed qc. libata core enforces the rule by returning NULL
4764 * from ata_qc_from_tag() for failed qcs.
4765 *
4766 * Old EH depends on ata_qc_complete() nullifying completion
4767 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4768 * not synchronize with interrupt handler. Only PIO task is
4769 * taken care of.
4770 */
4771 if (ap->ops->error_handler) {
b51e9e5d 4772 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
4773
4774 if (unlikely(qc->err_mask))
4775 qc->flags |= ATA_QCFLAG_FAILED;
4776
4777 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4778 if (!ata_tag_internal(qc->tag)) {
4779 /* always fill result TF for failed qc */
39599a53 4780 fill_result_tf(qc);
f686bcb8
TH
4781 ata_qc_schedule_eh(qc);
4782 return;
4783 }
4784 }
4785
4786 /* read result TF if requested */
4787 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4788 fill_result_tf(qc);
f686bcb8
TH
4789
4790 __ata_qc_complete(qc);
4791 } else {
4792 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4793 return;
4794
4795 /* read result TF if failed or requested */
4796 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4797 fill_result_tf(qc);
f686bcb8
TH
4798
4799 __ata_qc_complete(qc);
4800 }
4801}
4802
dedaf2b0
TH
4803/**
4804 * ata_qc_complete_multiple - Complete multiple qcs successfully
4805 * @ap: port in question
4806 * @qc_active: new qc_active mask
4807 * @finish_qc: LLDD callback invoked before completing a qc
4808 *
4809 * Complete in-flight commands. This functions is meant to be
4810 * called from low-level driver's interrupt routine to complete
4811 * requests normally. ap->qc_active and @qc_active is compared
4812 * and commands are completed accordingly.
4813 *
4814 * LOCKING:
cca3974e 4815 * spin_lock_irqsave(host lock)
dedaf2b0
TH
4816 *
4817 * RETURNS:
4818 * Number of completed commands on success, -errno otherwise.
4819 */
4820int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
4821 void (*finish_qc)(struct ata_queued_cmd *))
4822{
4823 int nr_done = 0;
4824 u32 done_mask;
4825 int i;
4826
4827 done_mask = ap->qc_active ^ qc_active;
4828
4829 if (unlikely(done_mask & qc_active)) {
4830 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4831 "(%08x->%08x)\n", ap->qc_active, qc_active);
4832 return -EINVAL;
4833 }
4834
4835 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4836 struct ata_queued_cmd *qc;
4837
4838 if (!(done_mask & (1 << i)))
4839 continue;
4840
4841 if ((qc = ata_qc_from_tag(ap, i))) {
4842 if (finish_qc)
4843 finish_qc(qc);
4844 ata_qc_complete(qc);
4845 nr_done++;
4846 }
4847 }
4848
4849 return nr_done;
4850}
4851
1da177e4
LT
4852static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4853{
4854 struct ata_port *ap = qc->ap;
4855
4856 switch (qc->tf.protocol) {
3dc1d881 4857 case ATA_PROT_NCQ:
1da177e4
LT
4858 case ATA_PROT_DMA:
4859 case ATA_PROT_ATAPI_DMA:
4860 return 1;
4861
4862 case ATA_PROT_ATAPI:
4863 case ATA_PROT_PIO:
1da177e4
LT
4864 if (ap->flags & ATA_FLAG_PIO_DMA)
4865 return 1;
4866
4867 /* fall through */
4868
4869 default:
4870 return 0;
4871 }
4872
4873 /* never reached */
4874}
4875
4876/**
4877 * ata_qc_issue - issue taskfile to device
4878 * @qc: command to issue to device
4879 *
4880 * Prepare an ATA command to submission to device.
4881 * This includes mapping the data into a DMA-able
4882 * area, filling in the S/G table, and finally
4883 * writing the taskfile to hardware, starting the command.
4884 *
4885 * LOCKING:
cca3974e 4886 * spin_lock_irqsave(host lock)
1da177e4 4887 */
8e0e694a 4888void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
4889{
4890 struct ata_port *ap = qc->ap;
4891
dedaf2b0
TH
4892 /* Make sure only one non-NCQ command is outstanding. The
4893 * check is skipped for old EH because it reuses active qc to
4894 * request ATAPI sense.
4895 */
4896 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
4897
4898 if (qc->tf.protocol == ATA_PROT_NCQ) {
4899 WARN_ON(ap->sactive & (1 << qc->tag));
4900 ap->sactive |= 1 << qc->tag;
4901 } else {
4902 WARN_ON(ap->sactive);
4903 ap->active_tag = qc->tag;
4904 }
4905
e4a70e76 4906 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 4907 ap->qc_active |= 1 << qc->tag;
e4a70e76 4908
1da177e4
LT
4909 if (ata_should_dma_map(qc)) {
4910 if (qc->flags & ATA_QCFLAG_SG) {
4911 if (ata_sg_setup(qc))
8e436af9 4912 goto sg_err;
1da177e4
LT
4913 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4914 if (ata_sg_setup_one(qc))
8e436af9 4915 goto sg_err;
1da177e4
LT
4916 }
4917 } else {
4918 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4919 }
4920
4921 ap->ops->qc_prep(qc);
4922
8e0e694a
TH
4923 qc->err_mask |= ap->ops->qc_issue(qc);
4924 if (unlikely(qc->err_mask))
4925 goto err;
4926 return;
1da177e4 4927
8e436af9
TH
4928sg_err:
4929 qc->flags &= ~ATA_QCFLAG_DMAMAP;
8e0e694a
TH
4930 qc->err_mask |= AC_ERR_SYSTEM;
4931err:
4932 ata_qc_complete(qc);
1da177e4
LT
4933}
4934
4935/**
4936 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4937 * @qc: command to issue to device
4938 *
4939 * Using various libata functions and hooks, this function
4940 * starts an ATA command. ATA commands are grouped into
4941 * classes called "protocols", and issuing each type of protocol
4942 * is slightly different.
4943 *
0baab86b
EF
4944 * May be used as the qc_issue() entry in ata_port_operations.
4945 *
1da177e4 4946 * LOCKING:
cca3974e 4947 * spin_lock_irqsave(host lock)
1da177e4
LT
4948 *
4949 * RETURNS:
9a3d9eb0 4950 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
4951 */
4952
9a3d9eb0 4953unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
4954{
4955 struct ata_port *ap = qc->ap;
4956
e50362ec
AL
4957 /* Use polling pio if the LLD doesn't handle
4958 * interrupt driven pio and atapi CDB interrupt.
4959 */
4960 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4961 switch (qc->tf.protocol) {
4962 case ATA_PROT_PIO:
e3472cbe 4963 case ATA_PROT_NODATA:
e50362ec
AL
4964 case ATA_PROT_ATAPI:
4965 case ATA_PROT_ATAPI_NODATA:
4966 qc->tf.flags |= ATA_TFLAG_POLLING;
4967 break;
4968 case ATA_PROT_ATAPI_DMA:
4969 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 4970 /* see ata_dma_blacklisted() */
e50362ec
AL
4971 BUG();
4972 break;
4973 default:
4974 break;
4975 }
4976 }
4977
3d3cca37
TH
4978 /* Some controllers show flaky interrupt behavior after
4979 * setting xfer mode. Use polling instead.
4980 */
4981 if (unlikely(qc->tf.command == ATA_CMD_SET_FEATURES &&
4982 qc->tf.feature == SETFEATURES_XFER) &&
4983 (ap->flags & ATA_FLAG_SETXFER_POLLING))
4984 qc->tf.flags |= ATA_TFLAG_POLLING;
4985
312f7da2 4986 /* select the device */
1da177e4
LT
4987 ata_dev_select(ap, qc->dev->devno, 1, 0);
4988
312f7da2 4989 /* start the command */
1da177e4
LT
4990 switch (qc->tf.protocol) {
4991 case ATA_PROT_NODATA:
312f7da2
AL
4992 if (qc->tf.flags & ATA_TFLAG_POLLING)
4993 ata_qc_set_polling(qc);
4994
e5338254 4995 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
4996 ap->hsm_task_state = HSM_ST_LAST;
4997
4998 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 4999 ata_port_queue_task(ap, ata_pio_task, qc, 0);
312f7da2 5000
1da177e4
LT
5001 break;
5002
5003 case ATA_PROT_DMA:
587005de 5004 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5005
1da177e4
LT
5006 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5007 ap->ops->bmdma_setup(qc); /* set up bmdma */
5008 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 5009 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
5010 break;
5011
312f7da2
AL
5012 case ATA_PROT_PIO:
5013 if (qc->tf.flags & ATA_TFLAG_POLLING)
5014 ata_qc_set_polling(qc);
1da177e4 5015
e5338254 5016 ata_tf_to_host(ap, &qc->tf);
312f7da2 5017
54f00389
AL
5018 if (qc->tf.flags & ATA_TFLAG_WRITE) {
5019 /* PIO data out protocol */
5020 ap->hsm_task_state = HSM_ST_FIRST;
31ce6dae 5021 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5022
5023 /* always send first data block using
e27486db 5024 * the ata_pio_task() codepath.
54f00389 5025 */
312f7da2 5026 } else {
54f00389
AL
5027 /* PIO data in protocol */
5028 ap->hsm_task_state = HSM_ST;
5029
5030 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5031 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5032
5033 /* if polling, ata_pio_task() handles the rest.
5034 * otherwise, interrupt handler takes over from here.
5035 */
312f7da2
AL
5036 }
5037
1da177e4
LT
5038 break;
5039
1da177e4 5040 case ATA_PROT_ATAPI:
1da177e4 5041 case ATA_PROT_ATAPI_NODATA:
312f7da2
AL
5042 if (qc->tf.flags & ATA_TFLAG_POLLING)
5043 ata_qc_set_polling(qc);
5044
e5338254 5045 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 5046
312f7da2
AL
5047 ap->hsm_task_state = HSM_ST_FIRST;
5048
5049 /* send cdb by polling if no cdb interrupt */
5050 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
5051 (qc->tf.flags & ATA_TFLAG_POLLING))
31ce6dae 5052 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5053 break;
5054
5055 case ATA_PROT_ATAPI_DMA:
587005de 5056 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5057
1da177e4
LT
5058 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5059 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
5060 ap->hsm_task_state = HSM_ST_FIRST;
5061
5062 /* send cdb by polling if no cdb interrupt */
5063 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
31ce6dae 5064 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5065 break;
5066
5067 default:
5068 WARN_ON(1);
9a3d9eb0 5069 return AC_ERR_SYSTEM;
1da177e4
LT
5070 }
5071
5072 return 0;
5073}
5074
1da177e4
LT
5075/**
5076 * ata_host_intr - Handle host interrupt for given (port, task)
5077 * @ap: Port on which interrupt arrived (possibly...)
5078 * @qc: Taskfile currently active in engine
5079 *
5080 * Handle host interrupt for given queued command. Currently,
5081 * only DMA interrupts are handled. All other commands are
5082 * handled via polling with interrupts disabled (nIEN bit).
5083 *
5084 * LOCKING:
cca3974e 5085 * spin_lock_irqsave(host lock)
1da177e4
LT
5086 *
5087 * RETURNS:
5088 * One if interrupt was handled, zero if not (shared irq).
5089 */
5090
5091inline unsigned int ata_host_intr (struct ata_port *ap,
5092 struct ata_queued_cmd *qc)
5093{
ea54763f 5094 struct ata_eh_info *ehi = &ap->eh_info;
312f7da2 5095 u8 status, host_stat = 0;
1da177e4 5096
312f7da2
AL
5097 VPRINTK("ata%u: protocol %d task_state %d\n",
5098 ap->id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 5099
312f7da2
AL
5100 /* Check whether we are expecting interrupt in this state */
5101 switch (ap->hsm_task_state) {
5102 case HSM_ST_FIRST:
6912ccd5
AL
5103 /* Some pre-ATAPI-4 devices assert INTRQ
5104 * at this state when ready to receive CDB.
5105 */
1da177e4 5106
312f7da2
AL
5107 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5108 * The flag was turned on only for atapi devices.
5109 * No need to check is_atapi_taskfile(&qc->tf) again.
5110 */
5111 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 5112 goto idle_irq;
1da177e4 5113 break;
312f7da2
AL
5114 case HSM_ST_LAST:
5115 if (qc->tf.protocol == ATA_PROT_DMA ||
5116 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5117 /* check status of DMA engine */
5118 host_stat = ap->ops->bmdma_status(ap);
5119 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
5120
5121 /* if it's not our irq... */
5122 if (!(host_stat & ATA_DMA_INTR))
5123 goto idle_irq;
5124
5125 /* before we do anything else, clear DMA-Start bit */
5126 ap->ops->bmdma_stop(qc);
a4f16610
AL
5127
5128 if (unlikely(host_stat & ATA_DMA_ERR)) {
5129 /* error when transfering data to/from memory */
5130 qc->err_mask |= AC_ERR_HOST_BUS;
5131 ap->hsm_task_state = HSM_ST_ERR;
5132 }
312f7da2
AL
5133 }
5134 break;
5135 case HSM_ST:
5136 break;
1da177e4
LT
5137 default:
5138 goto idle_irq;
5139 }
5140
312f7da2
AL
5141 /* check altstatus */
5142 status = ata_altstatus(ap);
5143 if (status & ATA_BUSY)
5144 goto idle_irq;
1da177e4 5145
312f7da2
AL
5146 /* check main status, clearing INTRQ */
5147 status = ata_chk_status(ap);
5148 if (unlikely(status & ATA_BUSY))
5149 goto idle_irq;
1da177e4 5150
312f7da2
AL
5151 /* ack bmdma irq events */
5152 ap->ops->irq_clear(ap);
1da177e4 5153
bb5cb290 5154 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
5155
5156 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5157 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5158 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5159
1da177e4
LT
5160 return 1; /* irq handled */
5161
5162idle_irq:
5163 ap->stats.idle_irq++;
5164
5165#ifdef ATA_IRQ_TRAP
5166 if ((ap->stats.idle_irq % 1000) == 0) {
1da177e4 5167 ata_irq_ack(ap, 0); /* debug trap */
f15a1daf 5168 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 5169 return 1;
1da177e4
LT
5170 }
5171#endif
5172 return 0; /* irq not handled */
5173}
5174
5175/**
5176 * ata_interrupt - Default ATA host interrupt handler
0cba632b 5177 * @irq: irq line (unused)
cca3974e 5178 * @dev_instance: pointer to our ata_host information structure
1da177e4 5179 *
0cba632b
JG
5180 * Default interrupt handler for PCI IDE devices. Calls
5181 * ata_host_intr() for each port that is not disabled.
5182 *
1da177e4 5183 * LOCKING:
cca3974e 5184 * Obtains host lock during operation.
1da177e4
LT
5185 *
5186 * RETURNS:
0cba632b 5187 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
5188 */
5189
7d12e780 5190irqreturn_t ata_interrupt (int irq, void *dev_instance)
1da177e4 5191{
cca3974e 5192 struct ata_host *host = dev_instance;
1da177e4
LT
5193 unsigned int i;
5194 unsigned int handled = 0;
5195 unsigned long flags;
5196
5197 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 5198 spin_lock_irqsave(&host->lock, flags);
1da177e4 5199
cca3974e 5200 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5201 struct ata_port *ap;
5202
cca3974e 5203 ap = host->ports[i];
c1389503 5204 if (ap &&
029f5468 5205 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
5206 struct ata_queued_cmd *qc;
5207
5208 qc = ata_qc_from_tag(ap, ap->active_tag);
312f7da2 5209 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 5210 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
5211 handled |= ata_host_intr(ap, qc);
5212 }
5213 }
5214
cca3974e 5215 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
5216
5217 return IRQ_RETVAL(handled);
5218}
5219
34bf2170
TH
5220/**
5221 * sata_scr_valid - test whether SCRs are accessible
5222 * @ap: ATA port to test SCR accessibility for
5223 *
5224 * Test whether SCRs are accessible for @ap.
5225 *
5226 * LOCKING:
5227 * None.
5228 *
5229 * RETURNS:
5230 * 1 if SCRs are accessible, 0 otherwise.
5231 */
5232int sata_scr_valid(struct ata_port *ap)
5233{
5234 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
5235}
5236
5237/**
5238 * sata_scr_read - read SCR register of the specified port
5239 * @ap: ATA port to read SCR for
5240 * @reg: SCR to read
5241 * @val: Place to store read value
5242 *
5243 * Read SCR register @reg of @ap into *@val. This function is
5244 * guaranteed to succeed if the cable type of the port is SATA
5245 * and the port implements ->scr_read.
5246 *
5247 * LOCKING:
5248 * None.
5249 *
5250 * RETURNS:
5251 * 0 on success, negative errno on failure.
5252 */
5253int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
5254{
5255 if (sata_scr_valid(ap)) {
5256 *val = ap->ops->scr_read(ap, reg);
5257 return 0;
5258 }
5259 return -EOPNOTSUPP;
5260}
5261
5262/**
5263 * sata_scr_write - write SCR register of the specified port
5264 * @ap: ATA port to write SCR for
5265 * @reg: SCR to write
5266 * @val: value to write
5267 *
5268 * Write @val to SCR register @reg of @ap. This function is
5269 * guaranteed to succeed if the cable type of the port is SATA
5270 * and the port implements ->scr_read.
5271 *
5272 * LOCKING:
5273 * None.
5274 *
5275 * RETURNS:
5276 * 0 on success, negative errno on failure.
5277 */
5278int sata_scr_write(struct ata_port *ap, int reg, u32 val)
5279{
5280 if (sata_scr_valid(ap)) {
5281 ap->ops->scr_write(ap, reg, val);
5282 return 0;
5283 }
5284 return -EOPNOTSUPP;
5285}
5286
5287/**
5288 * sata_scr_write_flush - write SCR register of the specified port and flush
5289 * @ap: ATA port to write SCR for
5290 * @reg: SCR to write
5291 * @val: value to write
5292 *
5293 * This function is identical to sata_scr_write() except that this
5294 * function performs flush after writing to the register.
5295 *
5296 * LOCKING:
5297 * None.
5298 *
5299 * RETURNS:
5300 * 0 on success, negative errno on failure.
5301 */
5302int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
5303{
5304 if (sata_scr_valid(ap)) {
5305 ap->ops->scr_write(ap, reg, val);
5306 ap->ops->scr_read(ap, reg);
5307 return 0;
5308 }
5309 return -EOPNOTSUPP;
5310}
5311
5312/**
5313 * ata_port_online - test whether the given port is online
5314 * @ap: ATA port to test
5315 *
5316 * Test whether @ap is online. Note that this function returns 0
5317 * if online status of @ap cannot be obtained, so
5318 * ata_port_online(ap) != !ata_port_offline(ap).
5319 *
5320 * LOCKING:
5321 * None.
5322 *
5323 * RETURNS:
5324 * 1 if the port online status is available and online.
5325 */
5326int ata_port_online(struct ata_port *ap)
5327{
5328 u32 sstatus;
5329
5330 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
5331 return 1;
5332 return 0;
5333}
5334
5335/**
5336 * ata_port_offline - test whether the given port is offline
5337 * @ap: ATA port to test
5338 *
5339 * Test whether @ap is offline. Note that this function returns
5340 * 0 if offline status of @ap cannot be obtained, so
5341 * ata_port_online(ap) != !ata_port_offline(ap).
5342 *
5343 * LOCKING:
5344 * None.
5345 *
5346 * RETURNS:
5347 * 1 if the port offline status is available and offline.
5348 */
5349int ata_port_offline(struct ata_port *ap)
5350{
5351 u32 sstatus;
5352
5353 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
5354 return 1;
5355 return 0;
5356}
0baab86b 5357
77b08fb5 5358int ata_flush_cache(struct ata_device *dev)
9b847548 5359{
977e6b9f 5360 unsigned int err_mask;
9b847548
JA
5361 u8 cmd;
5362
5363 if (!ata_try_flush_cache(dev))
5364 return 0;
5365
6fc49adb 5366 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
5367 cmd = ATA_CMD_FLUSH_EXT;
5368 else
5369 cmd = ATA_CMD_FLUSH;
5370
977e6b9f
TH
5371 err_mask = ata_do_simple_cmd(dev, cmd);
5372 if (err_mask) {
5373 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5374 return -EIO;
5375 }
5376
5377 return 0;
9b847548
JA
5378}
5379
cca3974e
JG
5380static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5381 unsigned int action, unsigned int ehi_flags,
5382 int wait)
500530f6
TH
5383{
5384 unsigned long flags;
5385 int i, rc;
5386
cca3974e
JG
5387 for (i = 0; i < host->n_ports; i++) {
5388 struct ata_port *ap = host->ports[i];
500530f6
TH
5389
5390 /* Previous resume operation might still be in
5391 * progress. Wait for PM_PENDING to clear.
5392 */
5393 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5394 ata_port_wait_eh(ap);
5395 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5396 }
5397
5398 /* request PM ops to EH */
5399 spin_lock_irqsave(ap->lock, flags);
5400
5401 ap->pm_mesg = mesg;
5402 if (wait) {
5403 rc = 0;
5404 ap->pm_result = &rc;
5405 }
5406
5407 ap->pflags |= ATA_PFLAG_PM_PENDING;
5408 ap->eh_info.action |= action;
5409 ap->eh_info.flags |= ehi_flags;
5410
5411 ata_port_schedule_eh(ap);
5412
5413 spin_unlock_irqrestore(ap->lock, flags);
5414
5415 /* wait and check result */
5416 if (wait) {
5417 ata_port_wait_eh(ap);
5418 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5419 if (rc)
5420 return rc;
5421 }
5422 }
5423
5424 return 0;
5425}
5426
5427/**
cca3974e
JG
5428 * ata_host_suspend - suspend host
5429 * @host: host to suspend
500530f6
TH
5430 * @mesg: PM message
5431 *
cca3974e 5432 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
5433 * function requests EH to perform PM operations and waits for EH
5434 * to finish.
5435 *
5436 * LOCKING:
5437 * Kernel thread context (may sleep).
5438 *
5439 * RETURNS:
5440 * 0 on success, -errno on failure.
5441 */
cca3974e 5442int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6
TH
5443{
5444 int i, j, rc;
5445
cca3974e 5446 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
500530f6
TH
5447 if (rc)
5448 goto fail;
5449
5450 /* EH is quiescent now. Fail if we have any ready device.
5451 * This happens if hotplug occurs between completion of device
5452 * suspension and here.
5453 */
cca3974e
JG
5454 for (i = 0; i < host->n_ports; i++) {
5455 struct ata_port *ap = host->ports[i];
500530f6
TH
5456
5457 for (j = 0; j < ATA_MAX_DEVICES; j++) {
5458 struct ata_device *dev = &ap->device[j];
5459
5460 if (ata_dev_ready(dev)) {
5461 ata_port_printk(ap, KERN_WARNING,
5462 "suspend failed, device %d "
5463 "still active\n", dev->devno);
5464 rc = -EBUSY;
5465 goto fail;
5466 }
5467 }
5468 }
5469
cca3974e 5470 host->dev->power.power_state = mesg;
500530f6
TH
5471 return 0;
5472
5473 fail:
cca3974e 5474 ata_host_resume(host);
500530f6
TH
5475 return rc;
5476}
5477
5478/**
cca3974e
JG
5479 * ata_host_resume - resume host
5480 * @host: host to resume
500530f6 5481 *
cca3974e 5482 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
5483 * function requests EH to perform PM operations and returns.
5484 * Note that all resume operations are performed parallely.
5485 *
5486 * LOCKING:
5487 * Kernel thread context (may sleep).
5488 */
cca3974e 5489void ata_host_resume(struct ata_host *host)
500530f6 5490{
cca3974e
JG
5491 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
5492 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5493 host->dev->power.power_state = PMSG_ON;
500530f6
TH
5494}
5495
c893a3ae
RD
5496/**
5497 * ata_port_start - Set port up for dma.
5498 * @ap: Port to initialize
5499 *
5500 * Called just after data structures for each port are
5501 * initialized. Allocates space for PRD table.
5502 *
5503 * May be used as the port_start() entry in ata_port_operations.
5504 *
5505 * LOCKING:
5506 * Inherited from caller.
5507 */
5508
1da177e4
LT
5509int ata_port_start (struct ata_port *ap)
5510{
2f1f610b 5511 struct device *dev = ap->dev;
6037d6bb 5512 int rc;
1da177e4
LT
5513
5514 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
5515 if (!ap->prd)
5516 return -ENOMEM;
5517
6037d6bb
JG
5518 rc = ata_pad_alloc(ap, dev);
5519 if (rc) {
cedc9a47 5520 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
6037d6bb 5521 return rc;
cedc9a47
JG
5522 }
5523
1da177e4
LT
5524 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
5525
5526 return 0;
5527}
5528
0baab86b
EF
5529
5530/**
5531 * ata_port_stop - Undo ata_port_start()
5532 * @ap: Port to shut down
5533 *
5534 * Frees the PRD table.
5535 *
5536 * May be used as the port_stop() entry in ata_port_operations.
5537 *
5538 * LOCKING:
6f0ef4fa 5539 * Inherited from caller.
0baab86b
EF
5540 */
5541
1da177e4
LT
5542void ata_port_stop (struct ata_port *ap)
5543{
2f1f610b 5544 struct device *dev = ap->dev;
1da177e4
LT
5545
5546 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
6037d6bb 5547 ata_pad_free(ap, dev);
1da177e4
LT
5548}
5549
cca3974e 5550void ata_host_stop (struct ata_host *host)
aa8f0dc6 5551{
cca3974e
JG
5552 if (host->mmio_base)
5553 iounmap(host->mmio_base);
aa8f0dc6
JG
5554}
5555
3ef3b43d
TH
5556/**
5557 * ata_dev_init - Initialize an ata_device structure
5558 * @dev: Device structure to initialize
5559 *
5560 * Initialize @dev in preparation for probing.
5561 *
5562 * LOCKING:
5563 * Inherited from caller.
5564 */
5565void ata_dev_init(struct ata_device *dev)
5566{
5567 struct ata_port *ap = dev->ap;
72fa4b74
TH
5568 unsigned long flags;
5569
5a04bf4b
TH
5570 /* SATA spd limit is bound to the first device */
5571 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5572
72fa4b74
TH
5573 /* High bits of dev->flags are used to record warm plug
5574 * requests which occur asynchronously. Synchronize using
cca3974e 5575 * host lock.
72fa4b74 5576 */
ba6a1308 5577 spin_lock_irqsave(ap->lock, flags);
72fa4b74 5578 dev->flags &= ~ATA_DFLAG_INIT_MASK;
ba6a1308 5579 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 5580
72fa4b74
TH
5581 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5582 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
5583 dev->pio_mask = UINT_MAX;
5584 dev->mwdma_mask = UINT_MAX;
5585 dev->udma_mask = UINT_MAX;
5586}
5587
1da177e4 5588/**
155a8a9c 5589 * ata_port_init - Initialize an ata_port structure
1da177e4 5590 * @ap: Structure to initialize
cca3974e 5591 * @host: Collection of hosts to which @ap belongs
1da177e4
LT
5592 * @ent: Probe information provided by low-level driver
5593 * @port_no: Port number associated with this ata_port
5594 *
155a8a9c 5595 * Initialize a new ata_port structure.
0cba632b 5596 *
1da177e4 5597 * LOCKING:
0cba632b 5598 * Inherited from caller.
1da177e4 5599 */
cca3974e 5600void ata_port_init(struct ata_port *ap, struct ata_host *host,
155a8a9c 5601 const struct ata_probe_ent *ent, unsigned int port_no)
1da177e4
LT
5602{
5603 unsigned int i;
5604
cca3974e 5605 ap->lock = &host->lock;
198e0fed 5606 ap->flags = ATA_FLAG_DISABLED;
155a8a9c 5607 ap->id = ata_unique_id++;
1da177e4 5608 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 5609 ap->host = host;
2f1f610b 5610 ap->dev = ent->dev;
1da177e4 5611 ap->port_no = port_no;
fea63e38
TH
5612 if (port_no == 1 && ent->pinfo2) {
5613 ap->pio_mask = ent->pinfo2->pio_mask;
5614 ap->mwdma_mask = ent->pinfo2->mwdma_mask;
5615 ap->udma_mask = ent->pinfo2->udma_mask;
5616 ap->flags |= ent->pinfo2->flags;
5617 ap->ops = ent->pinfo2->port_ops;
5618 } else {
5619 ap->pio_mask = ent->pio_mask;
5620 ap->mwdma_mask = ent->mwdma_mask;
5621 ap->udma_mask = ent->udma_mask;
5622 ap->flags |= ent->port_flags;
5623 ap->ops = ent->port_ops;
5624 }
5a04bf4b 5625 ap->hw_sata_spd_limit = UINT_MAX;
1da177e4
LT
5626 ap->active_tag = ATA_TAG_POISON;
5627 ap->last_ctl = 0xFF;
bd5d825c
BP
5628
5629#if defined(ATA_VERBOSE_DEBUG)
5630 /* turn on all debugging levels */
5631 ap->msg_enable = 0x00FF;
5632#elif defined(ATA_DEBUG)
5633 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 5634#else
0dd4b21f 5635 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 5636#endif
1da177e4 5637
65f27f38
DH
5638 INIT_DELAYED_WORK(&ap->port_task, NULL);
5639 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5640 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 5641 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 5642 init_waitqueue_head(&ap->eh_wait_q);
1da177e4 5643
838df628
TH
5644 /* set cable type */
5645 ap->cbl = ATA_CBL_NONE;
5646 if (ap->flags & ATA_FLAG_SATA)
5647 ap->cbl = ATA_CBL_SATA;
5648
acf356b1
TH
5649 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5650 struct ata_device *dev = &ap->device[i];
38d87234 5651 dev->ap = ap;
72fa4b74 5652 dev->devno = i;
3ef3b43d 5653 ata_dev_init(dev);
acf356b1 5654 }
1da177e4
LT
5655
5656#ifdef ATA_IRQ_TRAP
5657 ap->stats.unhandled_irq = 1;
5658 ap->stats.idle_irq = 1;
5659#endif
5660
5661 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
5662}
5663
155a8a9c 5664/**
4608c160
TH
5665 * ata_port_init_shost - Initialize SCSI host associated with ATA port
5666 * @ap: ATA port to initialize SCSI host for
5667 * @shost: SCSI host associated with @ap
155a8a9c 5668 *
4608c160 5669 * Initialize SCSI host @shost associated with ATA port @ap.
155a8a9c
BK
5670 *
5671 * LOCKING:
5672 * Inherited from caller.
5673 */
4608c160 5674static void ata_port_init_shost(struct ata_port *ap, struct Scsi_Host *shost)
155a8a9c 5675{
cca3974e 5676 ap->scsi_host = shost;
155a8a9c 5677
4608c160
TH
5678 shost->unique_id = ap->id;
5679 shost->max_id = 16;
5680 shost->max_lun = 1;
5681 shost->max_channel = 1;
5682 shost->max_cmd_len = 12;
155a8a9c
BK
5683}
5684
1da177e4 5685/**
996139f1 5686 * ata_port_add - Attach low-level ATA driver to system
1da177e4 5687 * @ent: Information provided by low-level driver
cca3974e 5688 * @host: Collections of ports to which we add
1da177e4
LT
5689 * @port_no: Port number associated with this host
5690 *
0cba632b
JG
5691 * Attach low-level ATA driver to system.
5692 *
1da177e4 5693 * LOCKING:
0cba632b 5694 * PCI/etc. bus probe sem.
1da177e4
LT
5695 *
5696 * RETURNS:
0cba632b 5697 * New ata_port on success, for NULL on error.
1da177e4 5698 */
996139f1 5699static struct ata_port * ata_port_add(const struct ata_probe_ent *ent,
cca3974e 5700 struct ata_host *host,
1da177e4
LT
5701 unsigned int port_no)
5702{
996139f1 5703 struct Scsi_Host *shost;
1da177e4 5704 struct ata_port *ap;
1da177e4
LT
5705
5706 DPRINTK("ENTER\n");
aec5c3c1 5707
52783c5d 5708 if (!ent->port_ops->error_handler &&
cca3974e 5709 !(ent->port_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
aec5c3c1
TH
5710 printk(KERN_ERR "ata%u: no reset mechanism available\n",
5711 port_no);
5712 return NULL;
5713 }
5714
996139f1
JG
5715 shost = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
5716 if (!shost)
1da177e4
LT
5717 return NULL;
5718
996139f1 5719 shost->transportt = &ata_scsi_transport_template;
30afc84c 5720
996139f1 5721 ap = ata_shost_to_port(shost);
1da177e4 5722
cca3974e 5723 ata_port_init(ap, host, ent, port_no);
996139f1 5724 ata_port_init_shost(ap, shost);
1da177e4 5725
1da177e4 5726 return ap;
1da177e4
LT
5727}
5728
b03732f0 5729/**
cca3974e
JG
5730 * ata_sas_host_init - Initialize a host struct
5731 * @host: host to initialize
5732 * @dev: device host is attached to
5733 * @flags: host flags
5734 * @ops: port_ops
b03732f0
BK
5735 *
5736 * LOCKING:
5737 * PCI/etc. bus probe sem.
5738 *
5739 */
5740
cca3974e
JG
5741void ata_host_init(struct ata_host *host, struct device *dev,
5742 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 5743{
cca3974e
JG
5744 spin_lock_init(&host->lock);
5745 host->dev = dev;
5746 host->flags = flags;
5747 host->ops = ops;
b03732f0
BK
5748}
5749
1da177e4 5750/**
0cba632b
JG
5751 * ata_device_add - Register hardware device with ATA and SCSI layers
5752 * @ent: Probe information describing hardware device to be registered
5753 *
5754 * This function processes the information provided in the probe
5755 * information struct @ent, allocates the necessary ATA and SCSI
5756 * host information structures, initializes them, and registers
5757 * everything with requisite kernel subsystems.
5758 *
5759 * This function requests irqs, probes the ATA bus, and probes
5760 * the SCSI bus.
1da177e4
LT
5761 *
5762 * LOCKING:
0cba632b 5763 * PCI/etc. bus probe sem.
1da177e4
LT
5764 *
5765 * RETURNS:
0cba632b 5766 * Number of ports registered. Zero on error (no ports registered).
1da177e4 5767 */
057ace5e 5768int ata_device_add(const struct ata_probe_ent *ent)
1da177e4 5769{
6d0500df 5770 unsigned int i;
1da177e4 5771 struct device *dev = ent->dev;
cca3974e 5772 struct ata_host *host;
39b07ce6 5773 int rc;
1da177e4
LT
5774
5775 DPRINTK("ENTER\n");
02f076aa
AC
5776
5777 if (ent->irq == 0) {
5778 dev_printk(KERN_ERR, dev, "is not available: No interrupt assigned.\n");
5779 return 0;
5780 }
1da177e4 5781 /* alloc a container for our list of ATA ports (buses) */
cca3974e
JG
5782 host = kzalloc(sizeof(struct ata_host) +
5783 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
5784 if (!host)
1da177e4 5785 return 0;
1da177e4 5786
cca3974e
JG
5787 ata_host_init(host, dev, ent->_host_flags, ent->port_ops);
5788 host->n_ports = ent->n_ports;
5789 host->irq = ent->irq;
5790 host->irq2 = ent->irq2;
5791 host->mmio_base = ent->mmio_base;
5792 host->private_data = ent->private_data;
1da177e4
LT
5793
5794 /* register each port bound to this device */
cca3974e 5795 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5796 struct ata_port *ap;
5797 unsigned long xfer_mode_mask;
2ec7df04 5798 int irq_line = ent->irq;
1da177e4 5799
cca3974e 5800 ap = ata_port_add(ent, host, i);
c38778c3 5801 host->ports[i] = ap;
1da177e4
LT
5802 if (!ap)
5803 goto err_out;
5804
dd5b06c4
TH
5805 /* dummy? */
5806 if (ent->dummy_port_mask & (1 << i)) {
5807 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
5808 ap->ops = &ata_dummy_port_ops;
5809 continue;
5810 }
5811
5812 /* start port */
5813 rc = ap->ops->port_start(ap);
5814 if (rc) {
cca3974e
JG
5815 host->ports[i] = NULL;
5816 scsi_host_put(ap->scsi_host);
dd5b06c4
TH
5817 goto err_out;
5818 }
5819
2ec7df04
AC
5820 /* Report the secondary IRQ for second channel legacy */
5821 if (i == 1 && ent->irq2)
5822 irq_line = ent->irq2;
5823
1da177e4
LT
5824 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
5825 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
5826 (ap->pio_mask << ATA_SHIFT_PIO);
5827
5828 /* print per-port info to dmesg */
f15a1daf 5829 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%lX "
2ec7df04 5830 "ctl 0x%lX bmdma 0x%lX irq %d\n",
f15a1daf
TH
5831 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
5832 ata_mode_string(xfer_mode_mask),
5833 ap->ioaddr.cmd_addr,
5834 ap->ioaddr.ctl_addr,
5835 ap->ioaddr.bmdma_addr,
2ec7df04 5836 irq_line);
1da177e4 5837
0f0a3ad3
TH
5838 /* freeze port before requesting IRQ */
5839 ata_eh_freeze_port(ap);
1da177e4
LT
5840 }
5841
2ec7df04 5842 /* obtain irq, that may be shared between channels */
39b07ce6 5843 rc = request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
cca3974e 5844 DRV_NAME, host);
39b07ce6
JG
5845 if (rc) {
5846 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5847 ent->irq, rc);
1da177e4 5848 goto err_out;
39b07ce6 5849 }
1da177e4 5850
2ec7df04
AC
5851 /* do we have a second IRQ for the other channel, eg legacy mode */
5852 if (ent->irq2) {
5853 /* We will get weird core code crashes later if this is true
5854 so trap it now */
5855 BUG_ON(ent->irq == ent->irq2);
5856
5857 rc = request_irq(ent->irq2, ent->port_ops->irq_handler, ent->irq_flags,
cca3974e 5858 DRV_NAME, host);
2ec7df04
AC
5859 if (rc) {
5860 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5861 ent->irq2, rc);
5862 goto err_out_free_irq;
5863 }
5864 }
5865
1da177e4
LT
5866 /* perform each probe synchronously */
5867 DPRINTK("probe begin\n");
cca3974e
JG
5868 for (i = 0; i < host->n_ports; i++) {
5869 struct ata_port *ap = host->ports[i];
5a04bf4b 5870 u32 scontrol;
1da177e4
LT
5871 int rc;
5872
5a04bf4b
TH
5873 /* init sata_spd_limit to the current value */
5874 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
5875 int spd = (scontrol >> 4) & 0xf;
5876 ap->hw_sata_spd_limit &= (1 << spd) - 1;
5877 }
5878 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5879
cca3974e 5880 rc = scsi_add_host(ap->scsi_host, dev);
1da177e4 5881 if (rc) {
f15a1daf 5882 ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
1da177e4
LT
5883 /* FIXME: do something useful here */
5884 /* FIXME: handle unconditional calls to
5885 * scsi_scan_host and ata_host_remove, below,
5886 * at the very least
5887 */
5888 }
3e706399 5889
52783c5d 5890 if (ap->ops->error_handler) {
1cdaf534 5891 struct ata_eh_info *ehi = &ap->eh_info;
3e706399
TH
5892 unsigned long flags;
5893
5894 ata_port_probe(ap);
5895
5896 /* kick EH for boot probing */
ba6a1308 5897 spin_lock_irqsave(ap->lock, flags);
3e706399 5898
1cdaf534
TH
5899 ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
5900 ehi->action |= ATA_EH_SOFTRESET;
5901 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
3e706399 5902
b51e9e5d 5903 ap->pflags |= ATA_PFLAG_LOADING;
3e706399
TH
5904 ata_port_schedule_eh(ap);
5905
ba6a1308 5906 spin_unlock_irqrestore(ap->lock, flags);
3e706399
TH
5907
5908 /* wait for EH to finish */
5909 ata_port_wait_eh(ap);
5910 } else {
5911 DPRINTK("ata%u: bus probe begin\n", ap->id);
5912 rc = ata_bus_probe(ap);
5913 DPRINTK("ata%u: bus probe end\n", ap->id);
5914
5915 if (rc) {
5916 /* FIXME: do something useful here?
5917 * Current libata behavior will
5918 * tear down everything when
5919 * the module is removed
5920 * or the h/w is unplugged.
5921 */
5922 }
5923 }
1da177e4
LT
5924 }
5925
5926 /* probes are done, now scan each port's disk(s) */
c893a3ae 5927 DPRINTK("host probe begin\n");
cca3974e
JG
5928 for (i = 0; i < host->n_ports; i++) {
5929 struct ata_port *ap = host->ports[i];
1da177e4 5930
644dd0cc 5931 ata_scsi_scan_host(ap);
1da177e4
LT
5932 }
5933
cca3974e 5934 dev_set_drvdata(dev, host);
1da177e4
LT
5935
5936 VPRINTK("EXIT, returning %u\n", ent->n_ports);
5937 return ent->n_ports; /* success */
5938
2ec7df04 5939err_out_free_irq:
cca3974e 5940 free_irq(ent->irq, host);
1da177e4 5941err_out:
cca3974e
JG
5942 for (i = 0; i < host->n_ports; i++) {
5943 struct ata_port *ap = host->ports[i];
77f3f879
TH
5944 if (ap) {
5945 ap->ops->port_stop(ap);
cca3974e 5946 scsi_host_put(ap->scsi_host);
77f3f879 5947 }
1da177e4 5948 }
6d0500df 5949
cca3974e 5950 kfree(host);
1da177e4
LT
5951 VPRINTK("EXIT, returning 0\n");
5952 return 0;
5953}
5954
720ba126
TH
5955/**
5956 * ata_port_detach - Detach ATA port in prepration of device removal
5957 * @ap: ATA port to be detached
5958 *
5959 * Detach all ATA devices and the associated SCSI devices of @ap;
5960 * then, remove the associated SCSI host. @ap is guaranteed to
5961 * be quiescent on return from this function.
5962 *
5963 * LOCKING:
5964 * Kernel thread context (may sleep).
5965 */
5966void ata_port_detach(struct ata_port *ap)
5967{
5968 unsigned long flags;
5969 int i;
5970
5971 if (!ap->ops->error_handler)
c3cf30a9 5972 goto skip_eh;
720ba126
TH
5973
5974 /* tell EH we're leaving & flush EH */
ba6a1308 5975 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 5976 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 5977 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5978
5979 ata_port_wait_eh(ap);
5980
5981 /* EH is now guaranteed to see UNLOADING, so no new device
5982 * will be attached. Disable all existing devices.
5983 */
ba6a1308 5984 spin_lock_irqsave(ap->lock, flags);
720ba126
TH
5985
5986 for (i = 0; i < ATA_MAX_DEVICES; i++)
5987 ata_dev_disable(&ap->device[i]);
5988
ba6a1308 5989 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5990
5991 /* Final freeze & EH. All in-flight commands are aborted. EH
5992 * will be skipped and retrials will be terminated with bad
5993 * target.
5994 */
ba6a1308 5995 spin_lock_irqsave(ap->lock, flags);
720ba126 5996 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 5997 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5998
5999 ata_port_wait_eh(ap);
6000
6001 /* Flush hotplug task. The sequence is similar to
6002 * ata_port_flush_task().
6003 */
6004 flush_workqueue(ata_aux_wq);
6005 cancel_delayed_work(&ap->hotplug_task);
6006 flush_workqueue(ata_aux_wq);
6007
c3cf30a9 6008 skip_eh:
720ba126 6009 /* remove the associated SCSI host */
cca3974e 6010 scsi_remove_host(ap->scsi_host);
720ba126
TH
6011}
6012
17b14451 6013/**
cca3974e
JG
6014 * ata_host_remove - PCI layer callback for device removal
6015 * @host: ATA host set that was removed
17b14451 6016 *
2e9edbf8 6017 * Unregister all objects associated with this host set. Free those
17b14451
AC
6018 * objects.
6019 *
6020 * LOCKING:
6021 * Inherited from calling layer (may sleep).
6022 */
6023
cca3974e 6024void ata_host_remove(struct ata_host *host)
17b14451 6025{
17b14451
AC
6026 unsigned int i;
6027
cca3974e
JG
6028 for (i = 0; i < host->n_ports; i++)
6029 ata_port_detach(host->ports[i]);
17b14451 6030
cca3974e
JG
6031 free_irq(host->irq, host);
6032 if (host->irq2)
6033 free_irq(host->irq2, host);
17b14451 6034
cca3974e
JG
6035 for (i = 0; i < host->n_ports; i++) {
6036 struct ata_port *ap = host->ports[i];
17b14451 6037
cca3974e 6038 ata_scsi_release(ap->scsi_host);
17b14451
AC
6039
6040 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
6041 struct ata_ioports *ioaddr = &ap->ioaddr;
6042
2ec7df04
AC
6043 /* FIXME: Add -ac IDE pci mods to remove these special cases */
6044 if (ioaddr->cmd_addr == ATA_PRIMARY_CMD)
6045 release_region(ATA_PRIMARY_CMD, 8);
6046 else if (ioaddr->cmd_addr == ATA_SECONDARY_CMD)
6047 release_region(ATA_SECONDARY_CMD, 8);
17b14451
AC
6048 }
6049
cca3974e 6050 scsi_host_put(ap->scsi_host);
17b14451
AC
6051 }
6052
cca3974e
JG
6053 if (host->ops->host_stop)
6054 host->ops->host_stop(host);
17b14451 6055
cca3974e 6056 kfree(host);
17b14451
AC
6057}
6058
1da177e4
LT
6059/**
6060 * ata_scsi_release - SCSI layer callback hook for host unload
4f931374 6061 * @shost: libata host to be unloaded
1da177e4
LT
6062 *
6063 * Performs all duties necessary to shut down a libata port...
6064 * Kill port kthread, disable port, and release resources.
6065 *
6066 * LOCKING:
6067 * Inherited from SCSI layer.
6068 *
6069 * RETURNS:
6070 * One.
6071 */
6072
cca3974e 6073int ata_scsi_release(struct Scsi_Host *shost)
1da177e4 6074{
cca3974e 6075 struct ata_port *ap = ata_shost_to_port(shost);
1da177e4
LT
6076
6077 DPRINTK("ENTER\n");
6078
6079 ap->ops->port_disable(ap);
6543bc07 6080 ap->ops->port_stop(ap);
1da177e4
LT
6081
6082 DPRINTK("EXIT\n");
6083 return 1;
6084}
6085
f6d950e2
BK
6086struct ata_probe_ent *
6087ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
6088{
6089 struct ata_probe_ent *probe_ent;
6090
6091 probe_ent = kzalloc(sizeof(*probe_ent), GFP_KERNEL);
6092 if (!probe_ent) {
6093 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
6094 kobject_name(&(dev->kobj)));
6095 return NULL;
6096 }
6097
6098 INIT_LIST_HEAD(&probe_ent->node);
6099 probe_ent->dev = dev;
6100
6101 probe_ent->sht = port->sht;
cca3974e 6102 probe_ent->port_flags = port->flags;
f6d950e2
BK
6103 probe_ent->pio_mask = port->pio_mask;
6104 probe_ent->mwdma_mask = port->mwdma_mask;
6105 probe_ent->udma_mask = port->udma_mask;
6106 probe_ent->port_ops = port->port_ops;
d639ca94 6107 probe_ent->private_data = port->private_data;
f6d950e2
BK
6108
6109 return probe_ent;
6110}
6111
1da177e4
LT
6112/**
6113 * ata_std_ports - initialize ioaddr with standard port offsets.
6114 * @ioaddr: IO address structure to be initialized
0baab86b
EF
6115 *
6116 * Utility function which initializes data_addr, error_addr,
6117 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
6118 * device_addr, status_addr, and command_addr to standard offsets
6119 * relative to cmd_addr.
6120 *
6121 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 6122 */
0baab86b 6123
1da177e4
LT
6124void ata_std_ports(struct ata_ioports *ioaddr)
6125{
6126 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
6127 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
6128 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
6129 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
6130 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
6131 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
6132 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
6133 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
6134 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
6135 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
6136}
6137
0baab86b 6138
374b1873
JG
6139#ifdef CONFIG_PCI
6140
cca3974e 6141void ata_pci_host_stop (struct ata_host *host)
374b1873 6142{
cca3974e 6143 struct pci_dev *pdev = to_pci_dev(host->dev);
374b1873 6144
cca3974e 6145 pci_iounmap(pdev, host->mmio_base);
374b1873
JG
6146}
6147
1da177e4
LT
6148/**
6149 * ata_pci_remove_one - PCI layer callback for device removal
6150 * @pdev: PCI device that was removed
6151 *
6152 * PCI layer indicates to libata via this hook that
6f0ef4fa 6153 * hot-unplug or module unload event has occurred.
1da177e4
LT
6154 * Handle this by unregistering all objects associated
6155 * with this PCI device. Free those objects. Then finally
6156 * release PCI resources and disable device.
6157 *
6158 * LOCKING:
6159 * Inherited from PCI layer (may sleep).
6160 */
6161
6162void ata_pci_remove_one (struct pci_dev *pdev)
6163{
6164 struct device *dev = pci_dev_to_dev(pdev);
cca3974e 6165 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 6166
cca3974e 6167 ata_host_remove(host);
f0eb62b8 6168
1da177e4
LT
6169 pci_release_regions(pdev);
6170 pci_disable_device(pdev);
6171 dev_set_drvdata(dev, NULL);
6172}
6173
6174/* move to PCI subsystem */
057ace5e 6175int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
6176{
6177 unsigned long tmp = 0;
6178
6179 switch (bits->width) {
6180 case 1: {
6181 u8 tmp8 = 0;
6182 pci_read_config_byte(pdev, bits->reg, &tmp8);
6183 tmp = tmp8;
6184 break;
6185 }
6186 case 2: {
6187 u16 tmp16 = 0;
6188 pci_read_config_word(pdev, bits->reg, &tmp16);
6189 tmp = tmp16;
6190 break;
6191 }
6192 case 4: {
6193 u32 tmp32 = 0;
6194 pci_read_config_dword(pdev, bits->reg, &tmp32);
6195 tmp = tmp32;
6196 break;
6197 }
6198
6199 default:
6200 return -EINVAL;
6201 }
6202
6203 tmp &= bits->mask;
6204
6205 return (tmp == bits->val) ? 1 : 0;
6206}
9b847548 6207
3c5100c1 6208void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
6209{
6210 pci_save_state(pdev);
500530f6 6211
3c5100c1 6212 if (mesg.event == PM_EVENT_SUSPEND) {
500530f6
TH
6213 pci_disable_device(pdev);
6214 pci_set_power_state(pdev, PCI_D3hot);
6215 }
9b847548
JA
6216}
6217
500530f6 6218void ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548
JA
6219{
6220 pci_set_power_state(pdev, PCI_D0);
6221 pci_restore_state(pdev);
6222 pci_enable_device(pdev);
6223 pci_set_master(pdev);
500530f6
TH
6224}
6225
3c5100c1 6226int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 6227{
cca3974e 6228 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
6229 int rc = 0;
6230
cca3974e 6231 rc = ata_host_suspend(host, mesg);
500530f6
TH
6232 if (rc)
6233 return rc;
6234
3c5100c1 6235 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
6236
6237 return 0;
6238}
6239
6240int ata_pci_device_resume(struct pci_dev *pdev)
6241{
cca3974e 6242 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
6243
6244 ata_pci_device_do_resume(pdev);
cca3974e 6245 ata_host_resume(host);
9b847548
JA
6246 return 0;
6247}
1da177e4
LT
6248#endif /* CONFIG_PCI */
6249
6250
1da177e4
LT
6251static int __init ata_init(void)
6252{
a8601e5f 6253 ata_probe_timeout *= HZ;
1da177e4
LT
6254 ata_wq = create_workqueue("ata");
6255 if (!ata_wq)
6256 return -ENOMEM;
6257
453b07ac
TH
6258 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6259 if (!ata_aux_wq) {
6260 destroy_workqueue(ata_wq);
6261 return -ENOMEM;
6262 }
6263
1da177e4
LT
6264 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6265 return 0;
6266}
6267
6268static void __exit ata_exit(void)
6269{
6270 destroy_workqueue(ata_wq);
453b07ac 6271 destroy_workqueue(ata_aux_wq);
1da177e4
LT
6272}
6273
a4625085 6274subsys_initcall(ata_init);
1da177e4
LT
6275module_exit(ata_exit);
6276
67846b30 6277static unsigned long ratelimit_time;
34af946a 6278static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
6279
6280int ata_ratelimit(void)
6281{
6282 int rc;
6283 unsigned long flags;
6284
6285 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6286
6287 if (time_after(jiffies, ratelimit_time)) {
6288 rc = 1;
6289 ratelimit_time = jiffies + (HZ/5);
6290 } else
6291 rc = 0;
6292
6293 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6294
6295 return rc;
6296}
6297
c22daff4
TH
6298/**
6299 * ata_wait_register - wait until register value changes
6300 * @reg: IO-mapped register
6301 * @mask: Mask to apply to read register value
6302 * @val: Wait condition
6303 * @interval_msec: polling interval in milliseconds
6304 * @timeout_msec: timeout in milliseconds
6305 *
6306 * Waiting for some bits of register to change is a common
6307 * operation for ATA controllers. This function reads 32bit LE
6308 * IO-mapped register @reg and tests for the following condition.
6309 *
6310 * (*@reg & mask) != val
6311 *
6312 * If the condition is met, it returns; otherwise, the process is
6313 * repeated after @interval_msec until timeout.
6314 *
6315 * LOCKING:
6316 * Kernel thread context (may sleep)
6317 *
6318 * RETURNS:
6319 * The final register value.
6320 */
6321u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6322 unsigned long interval_msec,
6323 unsigned long timeout_msec)
6324{
6325 unsigned long timeout;
6326 u32 tmp;
6327
6328 tmp = ioread32(reg);
6329
6330 /* Calculate timeout _after_ the first read to make sure
6331 * preceding writes reach the controller before starting to
6332 * eat away the timeout.
6333 */
6334 timeout = jiffies + (timeout_msec * HZ) / 1000;
6335
6336 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6337 msleep(interval_msec);
6338 tmp = ioread32(reg);
6339 }
6340
6341 return tmp;
6342}
6343
dd5b06c4
TH
6344/*
6345 * Dummy port_ops
6346 */
6347static void ata_dummy_noret(struct ata_port *ap) { }
6348static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
6349static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6350
6351static u8 ata_dummy_check_status(struct ata_port *ap)
6352{
6353 return ATA_DRDY;
6354}
6355
6356static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6357{
6358 return AC_ERR_SYSTEM;
6359}
6360
6361const struct ata_port_operations ata_dummy_port_ops = {
6362 .port_disable = ata_port_disable,
6363 .check_status = ata_dummy_check_status,
6364 .check_altstatus = ata_dummy_check_status,
6365 .dev_select = ata_noop_dev_select,
6366 .qc_prep = ata_noop_qc_prep,
6367 .qc_issue = ata_dummy_qc_issue,
6368 .freeze = ata_dummy_noret,
6369 .thaw = ata_dummy_noret,
6370 .error_handler = ata_dummy_noret,
6371 .post_internal_cmd = ata_dummy_qc_noret,
6372 .irq_clear = ata_dummy_noret,
6373 .port_start = ata_dummy_ret0,
6374 .port_stop = ata_dummy_noret,
6375};
6376
1da177e4
LT
6377/*
6378 * libata is essentially a library of internal helper functions for
6379 * low-level ATA host controller drivers. As such, the API/ABI is
6380 * likely to change as new drivers are added and updated.
6381 * Do not depend on ABI/API stability.
6382 */
6383
e9c83914
TH
6384EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6385EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6386EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 6387EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
1da177e4
LT
6388EXPORT_SYMBOL_GPL(ata_std_bios_param);
6389EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 6390EXPORT_SYMBOL_GPL(ata_host_init);
1da177e4 6391EXPORT_SYMBOL_GPL(ata_device_add);
720ba126 6392EXPORT_SYMBOL_GPL(ata_port_detach);
cca3974e 6393EXPORT_SYMBOL_GPL(ata_host_remove);
1da177e4
LT
6394EXPORT_SYMBOL_GPL(ata_sg_init);
6395EXPORT_SYMBOL_GPL(ata_sg_init_one);
9a1004d0 6396EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 6397EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 6398EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 6399EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
6400EXPORT_SYMBOL_GPL(ata_tf_load);
6401EXPORT_SYMBOL_GPL(ata_tf_read);
6402EXPORT_SYMBOL_GPL(ata_noop_dev_select);
6403EXPORT_SYMBOL_GPL(ata_std_dev_select);
6404EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6405EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6406EXPORT_SYMBOL_GPL(ata_check_status);
6407EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
6408EXPORT_SYMBOL_GPL(ata_exec_command);
6409EXPORT_SYMBOL_GPL(ata_port_start);
6410EXPORT_SYMBOL_GPL(ata_port_stop);
aa8f0dc6 6411EXPORT_SYMBOL_GPL(ata_host_stop);
1da177e4 6412EXPORT_SYMBOL_GPL(ata_interrupt);
a6b2c5d4
AC
6413EXPORT_SYMBOL_GPL(ata_mmio_data_xfer);
6414EXPORT_SYMBOL_GPL(ata_pio_data_xfer);
75e99585 6415EXPORT_SYMBOL_GPL(ata_pio_data_xfer_noirq);
1da177e4 6416EXPORT_SYMBOL_GPL(ata_qc_prep);
e46834cd 6417EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
6418EXPORT_SYMBOL_GPL(ata_bmdma_setup);
6419EXPORT_SYMBOL_GPL(ata_bmdma_start);
6420EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
6421EXPORT_SYMBOL_GPL(ata_bmdma_status);
6422EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
6423EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
6424EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
6425EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
6426EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
6427EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 6428EXPORT_SYMBOL_GPL(ata_port_probe);
3c567b7d 6429EXPORT_SYMBOL_GPL(sata_set_spd);
d7bb4cc7
TH
6430EXPORT_SYMBOL_GPL(sata_phy_debounce);
6431EXPORT_SYMBOL_GPL(sata_phy_resume);
1da177e4
LT
6432EXPORT_SYMBOL_GPL(sata_phy_reset);
6433EXPORT_SYMBOL_GPL(__sata_phy_reset);
6434EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 6435EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 6436EXPORT_SYMBOL_GPL(ata_std_softreset);
b6103f6d 6437EXPORT_SYMBOL_GPL(sata_port_hardreset);
c2bd5804
TH
6438EXPORT_SYMBOL_GPL(sata_std_hardreset);
6439EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
6440EXPORT_SYMBOL_GPL(ata_dev_classify);
6441EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 6442EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 6443EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 6444EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 6445EXPORT_SYMBOL_GPL(ata_busy_sleep);
86e45b6b 6446EXPORT_SYMBOL_GPL(ata_port_queue_task);
1da177e4
LT
6447EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6448EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 6449EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 6450EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 6451EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4
LT
6452EXPORT_SYMBOL_GPL(ata_scsi_release);
6453EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
6454EXPORT_SYMBOL_GPL(sata_scr_valid);
6455EXPORT_SYMBOL_GPL(sata_scr_read);
6456EXPORT_SYMBOL_GPL(sata_scr_write);
6457EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6458EXPORT_SYMBOL_GPL(ata_port_online);
6459EXPORT_SYMBOL_GPL(ata_port_offline);
cca3974e
JG
6460EXPORT_SYMBOL_GPL(ata_host_suspend);
6461EXPORT_SYMBOL_GPL(ata_host_resume);
6a62a04d
TH
6462EXPORT_SYMBOL_GPL(ata_id_string);
6463EXPORT_SYMBOL_GPL(ata_id_c_string);
6919a0a6 6464EXPORT_SYMBOL_GPL(ata_device_blacklisted);
1da177e4
LT
6465EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6466
1bc4ccff 6467EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
452503f9
AC
6468EXPORT_SYMBOL_GPL(ata_timing_compute);
6469EXPORT_SYMBOL_GPL(ata_timing_merge);
6470
1da177e4
LT
6471#ifdef CONFIG_PCI
6472EXPORT_SYMBOL_GPL(pci_test_config_bits);
374b1873 6473EXPORT_SYMBOL_GPL(ata_pci_host_stop);
1da177e4
LT
6474EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
6475EXPORT_SYMBOL_GPL(ata_pci_init_one);
6476EXPORT_SYMBOL_GPL(ata_pci_remove_one);
500530f6
TH
6477EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6478EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
6479EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6480EXPORT_SYMBOL_GPL(ata_pci_device_resume);
67951ade
AC
6481EXPORT_SYMBOL_GPL(ata_pci_default_filter);
6482EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 6483#endif /* CONFIG_PCI */
9b847548 6484
9b847548
JA
6485EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
6486EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
ece1d636 6487
ece1d636 6488EXPORT_SYMBOL_GPL(ata_eng_timeout);
7b70fc03
TH
6489EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6490EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499
TH
6491EXPORT_SYMBOL_GPL(ata_port_freeze);
6492EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6493EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
6494EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6495EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 6496EXPORT_SYMBOL_GPL(ata_do_eh);