]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/ata/libata-core.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/ericvh...
[net-next-2.6.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
1da177e4
LT
33 */
34
1da177e4
LT
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/mm.h>
41#include <linux/highmem.h>
42#include <linux/spinlock.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/timer.h>
46#include <linux/interrupt.h>
47#include <linux/completion.h>
48#include <linux/suspend.h>
49#include <linux/workqueue.h>
67846b30 50#include <linux/jiffies.h>
378f058c 51#include <linux/scatterlist.h>
1da177e4 52#include <scsi/scsi.h>
193515d5 53#include <scsi/scsi_cmnd.h>
1da177e4
LT
54#include <scsi/scsi_host.h>
55#include <linux/libata.h>
56#include <asm/io.h>
57#include <asm/semaphore.h>
58#include <asm/byteorder.h>
59
60#include "libata.h"
61
fda0efc5 62
d7bb4cc7 63/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
64const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
65const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
66const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 67
3373efd8
TH
68static unsigned int ata_dev_init_params(struct ata_device *dev,
69 u16 heads, u16 sectors);
70static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
9f45cbd3 71static unsigned int ata_dev_set_AN(struct ata_device *dev, u8 enable);
3373efd8 72static void ata_dev_xfermask(struct ata_device *dev);
75683fe7 73static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
1da177e4 74
f3187195 75unsigned int ata_print_id = 1;
1da177e4
LT
76static struct workqueue_struct *ata_wq;
77
453b07ac
TH
78struct workqueue_struct *ata_aux_wq;
79
418dc1f5 80int atapi_enabled = 1;
1623c81e
JG
81module_param(atapi_enabled, int, 0444);
82MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
83
95de719a
AL
84int atapi_dmadir = 0;
85module_param(atapi_dmadir, int, 0444);
86MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
87
baf4fdfa
ML
88int atapi_passthru16 = 1;
89module_param(atapi_passthru16, int, 0444);
90MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
91
c3c013a2
JG
92int libata_fua = 0;
93module_param_named(fua, libata_fua, int, 0444);
94MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
95
1e999736
AC
96static int ata_ignore_hpa = 0;
97module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
98MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
99
b3a70601
AC
100static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
101module_param_named(dma, libata_dma_mask, int, 0444);
102MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
103
a8601e5f
AM
104static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
105module_param(ata_probe_timeout, int, 0444);
106MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
107
6ebe9d86 108int libata_noacpi = 0;
d7d0dad6 109module_param_named(noacpi, libata_noacpi, int, 0444);
6ebe9d86 110MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
11ef697b 111
1da177e4
LT
112MODULE_AUTHOR("Jeff Garzik");
113MODULE_DESCRIPTION("Library module for ATA devices");
114MODULE_LICENSE("GPL");
115MODULE_VERSION(DRV_VERSION);
116
0baab86b 117
1da177e4
LT
118/**
119 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
120 * @tf: Taskfile to convert
1da177e4 121 * @pmp: Port multiplier port
9977126c
TH
122 * @is_cmd: This FIS is for command
123 * @fis: Buffer into which data will output
1da177e4
LT
124 *
125 * Converts a standard ATA taskfile to a Serial ATA
126 * FIS structure (Register - Host to Device).
127 *
128 * LOCKING:
129 * Inherited from caller.
130 */
9977126c 131void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
1da177e4 132{
9977126c
TH
133 fis[0] = 0x27; /* Register - Host to Device FIS */
134 fis[1] = pmp & 0xf; /* Port multiplier number*/
135 if (is_cmd)
136 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
137
1da177e4
LT
138 fis[2] = tf->command;
139 fis[3] = tf->feature;
140
141 fis[4] = tf->lbal;
142 fis[5] = tf->lbam;
143 fis[6] = tf->lbah;
144 fis[7] = tf->device;
145
146 fis[8] = tf->hob_lbal;
147 fis[9] = tf->hob_lbam;
148 fis[10] = tf->hob_lbah;
149 fis[11] = tf->hob_feature;
150
151 fis[12] = tf->nsect;
152 fis[13] = tf->hob_nsect;
153 fis[14] = 0;
154 fis[15] = tf->ctl;
155
156 fis[16] = 0;
157 fis[17] = 0;
158 fis[18] = 0;
159 fis[19] = 0;
160}
161
162/**
163 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
164 * @fis: Buffer from which data will be input
165 * @tf: Taskfile to output
166 *
e12a1be6 167 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
168 *
169 * LOCKING:
170 * Inherited from caller.
171 */
172
057ace5e 173void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
174{
175 tf->command = fis[2]; /* status */
176 tf->feature = fis[3]; /* error */
177
178 tf->lbal = fis[4];
179 tf->lbam = fis[5];
180 tf->lbah = fis[6];
181 tf->device = fis[7];
182
183 tf->hob_lbal = fis[8];
184 tf->hob_lbam = fis[9];
185 tf->hob_lbah = fis[10];
186
187 tf->nsect = fis[12];
188 tf->hob_nsect = fis[13];
189}
190
8cbd6df1
AL
191static const u8 ata_rw_cmds[] = {
192 /* pio multi */
193 ATA_CMD_READ_MULTI,
194 ATA_CMD_WRITE_MULTI,
195 ATA_CMD_READ_MULTI_EXT,
196 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
197 0,
198 0,
199 0,
200 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
201 /* pio */
202 ATA_CMD_PIO_READ,
203 ATA_CMD_PIO_WRITE,
204 ATA_CMD_PIO_READ_EXT,
205 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
206 0,
207 0,
208 0,
209 0,
8cbd6df1
AL
210 /* dma */
211 ATA_CMD_READ,
212 ATA_CMD_WRITE,
213 ATA_CMD_READ_EXT,
9a3dccc4
TH
214 ATA_CMD_WRITE_EXT,
215 0,
216 0,
217 0,
218 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 219};
1da177e4
LT
220
221/**
8cbd6df1 222 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
223 * @tf: command to examine and configure
224 * @dev: device tf belongs to
1da177e4 225 *
2e9edbf8 226 * Examine the device configuration and tf->flags to calculate
8cbd6df1 227 * the proper read/write commands and protocol to use.
1da177e4
LT
228 *
229 * LOCKING:
230 * caller.
231 */
bd056d7e 232static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 233{
9a3dccc4 234 u8 cmd;
1da177e4 235
9a3dccc4 236 int index, fua, lba48, write;
2e9edbf8 237
9a3dccc4 238 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
239 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
240 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 241
8cbd6df1
AL
242 if (dev->flags & ATA_DFLAG_PIO) {
243 tf->protocol = ATA_PROT_PIO;
9a3dccc4 244 index = dev->multi_count ? 0 : 8;
9af5c9c9 245 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
246 /* Unable to use DMA due to host limitation */
247 tf->protocol = ATA_PROT_PIO;
0565c26d 248 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
249 } else {
250 tf->protocol = ATA_PROT_DMA;
9a3dccc4 251 index = 16;
8cbd6df1 252 }
1da177e4 253
9a3dccc4
TH
254 cmd = ata_rw_cmds[index + fua + lba48 + write];
255 if (cmd) {
256 tf->command = cmd;
257 return 0;
258 }
259 return -1;
1da177e4
LT
260}
261
35b649fe
TH
262/**
263 * ata_tf_read_block - Read block address from ATA taskfile
264 * @tf: ATA taskfile of interest
265 * @dev: ATA device @tf belongs to
266 *
267 * LOCKING:
268 * None.
269 *
270 * Read block address from @tf. This function can handle all
271 * three address formats - LBA, LBA48 and CHS. tf->protocol and
272 * flags select the address format to use.
273 *
274 * RETURNS:
275 * Block address read from @tf.
276 */
277u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
278{
279 u64 block = 0;
280
281 if (tf->flags & ATA_TFLAG_LBA) {
282 if (tf->flags & ATA_TFLAG_LBA48) {
283 block |= (u64)tf->hob_lbah << 40;
284 block |= (u64)tf->hob_lbam << 32;
285 block |= tf->hob_lbal << 24;
286 } else
287 block |= (tf->device & 0xf) << 24;
288
289 block |= tf->lbah << 16;
290 block |= tf->lbam << 8;
291 block |= tf->lbal;
292 } else {
293 u32 cyl, head, sect;
294
295 cyl = tf->lbam | (tf->lbah << 8);
296 head = tf->device & 0xf;
297 sect = tf->lbal;
298
299 block = (cyl * dev->heads + head) * dev->sectors + sect;
300 }
301
302 return block;
303}
304
bd056d7e
TH
305/**
306 * ata_build_rw_tf - Build ATA taskfile for given read/write request
307 * @tf: Target ATA taskfile
308 * @dev: ATA device @tf belongs to
309 * @block: Block address
310 * @n_block: Number of blocks
311 * @tf_flags: RW/FUA etc...
312 * @tag: tag
313 *
314 * LOCKING:
315 * None.
316 *
317 * Build ATA taskfile @tf for read/write request described by
318 * @block, @n_block, @tf_flags and @tag on @dev.
319 *
320 * RETURNS:
321 *
322 * 0 on success, -ERANGE if the request is too large for @dev,
323 * -EINVAL if the request is invalid.
324 */
325int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
326 u64 block, u32 n_block, unsigned int tf_flags,
327 unsigned int tag)
328{
329 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
330 tf->flags |= tf_flags;
331
6d1245bf 332 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
333 /* yay, NCQ */
334 if (!lba_48_ok(block, n_block))
335 return -ERANGE;
336
337 tf->protocol = ATA_PROT_NCQ;
338 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
339
340 if (tf->flags & ATA_TFLAG_WRITE)
341 tf->command = ATA_CMD_FPDMA_WRITE;
342 else
343 tf->command = ATA_CMD_FPDMA_READ;
344
345 tf->nsect = tag << 3;
346 tf->hob_feature = (n_block >> 8) & 0xff;
347 tf->feature = n_block & 0xff;
348
349 tf->hob_lbah = (block >> 40) & 0xff;
350 tf->hob_lbam = (block >> 32) & 0xff;
351 tf->hob_lbal = (block >> 24) & 0xff;
352 tf->lbah = (block >> 16) & 0xff;
353 tf->lbam = (block >> 8) & 0xff;
354 tf->lbal = block & 0xff;
355
356 tf->device = 1 << 6;
357 if (tf->flags & ATA_TFLAG_FUA)
358 tf->device |= 1 << 7;
359 } else if (dev->flags & ATA_DFLAG_LBA) {
360 tf->flags |= ATA_TFLAG_LBA;
361
362 if (lba_28_ok(block, n_block)) {
363 /* use LBA28 */
364 tf->device |= (block >> 24) & 0xf;
365 } else if (lba_48_ok(block, n_block)) {
366 if (!(dev->flags & ATA_DFLAG_LBA48))
367 return -ERANGE;
368
369 /* use LBA48 */
370 tf->flags |= ATA_TFLAG_LBA48;
371
372 tf->hob_nsect = (n_block >> 8) & 0xff;
373
374 tf->hob_lbah = (block >> 40) & 0xff;
375 tf->hob_lbam = (block >> 32) & 0xff;
376 tf->hob_lbal = (block >> 24) & 0xff;
377 } else
378 /* request too large even for LBA48 */
379 return -ERANGE;
380
381 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
382 return -EINVAL;
383
384 tf->nsect = n_block & 0xff;
385
386 tf->lbah = (block >> 16) & 0xff;
387 tf->lbam = (block >> 8) & 0xff;
388 tf->lbal = block & 0xff;
389
390 tf->device |= ATA_LBA;
391 } else {
392 /* CHS */
393 u32 sect, head, cyl, track;
394
395 /* The request -may- be too large for CHS addressing. */
396 if (!lba_28_ok(block, n_block))
397 return -ERANGE;
398
399 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
400 return -EINVAL;
401
402 /* Convert LBA to CHS */
403 track = (u32)block / dev->sectors;
404 cyl = track / dev->heads;
405 head = track % dev->heads;
406 sect = (u32)block % dev->sectors + 1;
407
408 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
409 (u32)block, track, cyl, head, sect);
410
411 /* Check whether the converted CHS can fit.
412 Cylinder: 0-65535
413 Head: 0-15
414 Sector: 1-255*/
415 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
416 return -ERANGE;
417
418 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
419 tf->lbal = sect;
420 tf->lbam = cyl;
421 tf->lbah = cyl >> 8;
422 tf->device |= head;
423 }
424
425 return 0;
426}
427
cb95d562
TH
428/**
429 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
430 * @pio_mask: pio_mask
431 * @mwdma_mask: mwdma_mask
432 * @udma_mask: udma_mask
433 *
434 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
435 * unsigned int xfer_mask.
436 *
437 * LOCKING:
438 * None.
439 *
440 * RETURNS:
441 * Packed xfer_mask.
442 */
443static unsigned int ata_pack_xfermask(unsigned int pio_mask,
444 unsigned int mwdma_mask,
445 unsigned int udma_mask)
446{
447 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
448 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
449 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
450}
451
c0489e4e
TH
452/**
453 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
454 * @xfer_mask: xfer_mask to unpack
455 * @pio_mask: resulting pio_mask
456 * @mwdma_mask: resulting mwdma_mask
457 * @udma_mask: resulting udma_mask
458 *
459 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
460 * Any NULL distination masks will be ignored.
461 */
462static void ata_unpack_xfermask(unsigned int xfer_mask,
463 unsigned int *pio_mask,
464 unsigned int *mwdma_mask,
465 unsigned int *udma_mask)
466{
467 if (pio_mask)
468 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
469 if (mwdma_mask)
470 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
471 if (udma_mask)
472 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
473}
474
cb95d562 475static const struct ata_xfer_ent {
be9a50c8 476 int shift, bits;
cb95d562
TH
477 u8 base;
478} ata_xfer_tbl[] = {
479 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
480 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
481 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
482 { -1, },
483};
484
485/**
486 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
487 * @xfer_mask: xfer_mask of interest
488 *
489 * Return matching XFER_* value for @xfer_mask. Only the highest
490 * bit of @xfer_mask is considered.
491 *
492 * LOCKING:
493 * None.
494 *
495 * RETURNS:
496 * Matching XFER_* value, 0 if no match found.
497 */
498static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
499{
500 int highbit = fls(xfer_mask) - 1;
501 const struct ata_xfer_ent *ent;
502
503 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
504 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
505 return ent->base + highbit - ent->shift;
506 return 0;
507}
508
509/**
510 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
511 * @xfer_mode: XFER_* of interest
512 *
513 * Return matching xfer_mask for @xfer_mode.
514 *
515 * LOCKING:
516 * None.
517 *
518 * RETURNS:
519 * Matching xfer_mask, 0 if no match found.
520 */
521static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
522{
523 const struct ata_xfer_ent *ent;
524
525 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
526 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
527 return 1 << (ent->shift + xfer_mode - ent->base);
528 return 0;
529}
530
531/**
532 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
533 * @xfer_mode: XFER_* of interest
534 *
535 * Return matching xfer_shift for @xfer_mode.
536 *
537 * LOCKING:
538 * None.
539 *
540 * RETURNS:
541 * Matching xfer_shift, -1 if no match found.
542 */
543static int ata_xfer_mode2shift(unsigned int xfer_mode)
544{
545 const struct ata_xfer_ent *ent;
546
547 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
548 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
549 return ent->shift;
550 return -1;
551}
552
1da177e4 553/**
1da7b0d0
TH
554 * ata_mode_string - convert xfer_mask to string
555 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
556 *
557 * Determine string which represents the highest speed
1da7b0d0 558 * (highest bit in @modemask).
1da177e4
LT
559 *
560 * LOCKING:
561 * None.
562 *
563 * RETURNS:
564 * Constant C string representing highest speed listed in
1da7b0d0 565 * @mode_mask, or the constant C string "<n/a>".
1da177e4 566 */
1da7b0d0 567static const char *ata_mode_string(unsigned int xfer_mask)
1da177e4 568{
75f554bc
TH
569 static const char * const xfer_mode_str[] = {
570 "PIO0",
571 "PIO1",
572 "PIO2",
573 "PIO3",
574 "PIO4",
b352e57d
AC
575 "PIO5",
576 "PIO6",
75f554bc
TH
577 "MWDMA0",
578 "MWDMA1",
579 "MWDMA2",
b352e57d
AC
580 "MWDMA3",
581 "MWDMA4",
75f554bc
TH
582 "UDMA/16",
583 "UDMA/25",
584 "UDMA/33",
585 "UDMA/44",
586 "UDMA/66",
587 "UDMA/100",
588 "UDMA/133",
589 "UDMA7",
590 };
1da7b0d0 591 int highbit;
1da177e4 592
1da7b0d0
TH
593 highbit = fls(xfer_mask) - 1;
594 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
595 return xfer_mode_str[highbit];
1da177e4 596 return "<n/a>";
1da177e4
LT
597}
598
4c360c81
TH
599static const char *sata_spd_string(unsigned int spd)
600{
601 static const char * const spd_str[] = {
602 "1.5 Gbps",
603 "3.0 Gbps",
604 };
605
606 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
607 return "<unknown>";
608 return spd_str[spd - 1];
609}
610
3373efd8 611void ata_dev_disable(struct ata_device *dev)
0b8efb0a 612{
09d7f9b0 613 if (ata_dev_enabled(dev)) {
9af5c9c9 614 if (ata_msg_drv(dev->link->ap))
09d7f9b0 615 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
4ae72a1e
TH
616 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
617 ATA_DNXFER_QUIET);
0b8efb0a
TH
618 dev->class++;
619 }
620}
621
1da177e4 622/**
0d5ff566 623 * ata_devchk - PATA device presence detection
1da177e4
LT
624 * @ap: ATA channel to examine
625 * @device: Device to examine (starting at zero)
626 *
627 * This technique was originally described in
628 * Hale Landis's ATADRVR (www.ata-atapi.com), and
629 * later found its way into the ATA/ATAPI spec.
630 *
631 * Write a pattern to the ATA shadow registers,
632 * and if a device is present, it will respond by
633 * correctly storing and echoing back the
634 * ATA shadow register contents.
635 *
636 * LOCKING:
637 * caller.
638 */
639
0d5ff566 640static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1da177e4
LT
641{
642 struct ata_ioports *ioaddr = &ap->ioaddr;
643 u8 nsect, lbal;
644
645 ap->ops->dev_select(ap, device);
646
0d5ff566
TH
647 iowrite8(0x55, ioaddr->nsect_addr);
648 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 649
0d5ff566
TH
650 iowrite8(0xaa, ioaddr->nsect_addr);
651 iowrite8(0x55, ioaddr->lbal_addr);
1da177e4 652
0d5ff566
TH
653 iowrite8(0x55, ioaddr->nsect_addr);
654 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 655
0d5ff566
TH
656 nsect = ioread8(ioaddr->nsect_addr);
657 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
658
659 if ((nsect == 0x55) && (lbal == 0xaa))
660 return 1; /* we found a device */
661
662 return 0; /* nothing found */
663}
664
1da177e4
LT
665/**
666 * ata_dev_classify - determine device type based on ATA-spec signature
667 * @tf: ATA taskfile register set for device to be identified
668 *
669 * Determine from taskfile register contents whether a device is
670 * ATA or ATAPI, as per "Signature and persistence" section
671 * of ATA/PI spec (volume 1, sect 5.14).
672 *
673 * LOCKING:
674 * None.
675 *
676 * RETURNS:
633273a3
TH
677 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
678 * %ATA_DEV_UNKNOWN the event of failure.
1da177e4 679 */
057ace5e 680unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
681{
682 /* Apple's open source Darwin code hints that some devices only
683 * put a proper signature into the LBA mid/high registers,
684 * So, we only check those. It's sufficient for uniqueness.
633273a3
TH
685 *
686 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
687 * signatures for ATA and ATAPI devices attached on SerialATA,
688 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
689 * spec has never mentioned about using different signatures
690 * for ATA/ATAPI devices. Then, Serial ATA II: Port
691 * Multiplier specification began to use 0x69/0x96 to identify
692 * port multpliers and 0x3c/0xc3 to identify SEMB device.
693 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
694 * 0x69/0x96 shortly and described them as reserved for
695 * SerialATA.
696 *
697 * We follow the current spec and consider that 0x69/0x96
698 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1da177e4 699 */
633273a3 700 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1da177e4
LT
701 DPRINTK("found ATA device by sig\n");
702 return ATA_DEV_ATA;
703 }
704
633273a3 705 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1da177e4
LT
706 DPRINTK("found ATAPI device by sig\n");
707 return ATA_DEV_ATAPI;
708 }
709
633273a3
TH
710 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
711 DPRINTK("found PMP device by sig\n");
712 return ATA_DEV_PMP;
713 }
714
715 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
716 printk("ata: SEMB device ignored\n");
717 return ATA_DEV_SEMB_UNSUP; /* not yet */
718 }
719
1da177e4
LT
720 DPRINTK("unknown device\n");
721 return ATA_DEV_UNKNOWN;
722}
723
724/**
725 * ata_dev_try_classify - Parse returned ATA device signature
3f19859e
TH
726 * @dev: ATA device to classify (starting at zero)
727 * @present: device seems present
b4dc7623 728 * @r_err: Value of error register on completion
1da177e4
LT
729 *
730 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
731 * an ATA/ATAPI-defined set of values is placed in the ATA
732 * shadow registers, indicating the results of device detection
733 * and diagnostics.
734 *
735 * Select the ATA device, and read the values from the ATA shadow
736 * registers. Then parse according to the Error register value,
737 * and the spec-defined values examined by ata_dev_classify().
738 *
739 * LOCKING:
740 * caller.
b4dc7623
TH
741 *
742 * RETURNS:
743 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4 744 */
3f19859e
TH
745unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
746 u8 *r_err)
1da177e4 747{
3f19859e 748 struct ata_port *ap = dev->link->ap;
1da177e4
LT
749 struct ata_taskfile tf;
750 unsigned int class;
751 u8 err;
752
3f19859e 753 ap->ops->dev_select(ap, dev->devno);
1da177e4
LT
754
755 memset(&tf, 0, sizeof(tf));
756
1da177e4 757 ap->ops->tf_read(ap, &tf);
0169e284 758 err = tf.feature;
b4dc7623
TH
759 if (r_err)
760 *r_err = err;
1da177e4 761
93590859 762 /* see if device passed diags: if master then continue and warn later */
3f19859e 763 if (err == 0 && dev->devno == 0)
93590859 764 /* diagnostic fail : do nothing _YET_ */
3f19859e 765 dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
93590859 766 else if (err == 1)
1da177e4 767 /* do nothing */ ;
3f19859e 768 else if ((dev->devno == 0) && (err == 0x81))
1da177e4
LT
769 /* do nothing */ ;
770 else
b4dc7623 771 return ATA_DEV_NONE;
1da177e4 772
b4dc7623 773 /* determine if device is ATA or ATAPI */
1da177e4 774 class = ata_dev_classify(&tf);
b4dc7623 775
d7fbee05
TH
776 if (class == ATA_DEV_UNKNOWN) {
777 /* If the device failed diagnostic, it's likely to
778 * have reported incorrect device signature too.
779 * Assume ATA device if the device seems present but
780 * device signature is invalid with diagnostic
781 * failure.
782 */
783 if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
784 class = ATA_DEV_ATA;
785 else
786 class = ATA_DEV_NONE;
787 } else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
788 class = ATA_DEV_NONE;
789
b4dc7623 790 return class;
1da177e4
LT
791}
792
793/**
6a62a04d 794 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
795 * @id: IDENTIFY DEVICE results we will examine
796 * @s: string into which data is output
797 * @ofs: offset into identify device page
798 * @len: length of string to return. must be an even number.
799 *
800 * The strings in the IDENTIFY DEVICE page are broken up into
801 * 16-bit chunks. Run through the string, and output each
802 * 8-bit chunk linearly, regardless of platform.
803 *
804 * LOCKING:
805 * caller.
806 */
807
6a62a04d
TH
808void ata_id_string(const u16 *id, unsigned char *s,
809 unsigned int ofs, unsigned int len)
1da177e4
LT
810{
811 unsigned int c;
812
813 while (len > 0) {
814 c = id[ofs] >> 8;
815 *s = c;
816 s++;
817
818 c = id[ofs] & 0xff;
819 *s = c;
820 s++;
821
822 ofs++;
823 len -= 2;
824 }
825}
826
0e949ff3 827/**
6a62a04d 828 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
829 * @id: IDENTIFY DEVICE results we will examine
830 * @s: string into which data is output
831 * @ofs: offset into identify device page
832 * @len: length of string to return. must be an odd number.
833 *
6a62a04d 834 * This function is identical to ata_id_string except that it
0e949ff3
TH
835 * trims trailing spaces and terminates the resulting string with
836 * null. @len must be actual maximum length (even number) + 1.
837 *
838 * LOCKING:
839 * caller.
840 */
6a62a04d
TH
841void ata_id_c_string(const u16 *id, unsigned char *s,
842 unsigned int ofs, unsigned int len)
0e949ff3
TH
843{
844 unsigned char *p;
845
846 WARN_ON(!(len & 1));
847
6a62a04d 848 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
849
850 p = s + strnlen(s, len - 1);
851 while (p > s && p[-1] == ' ')
852 p--;
853 *p = '\0';
854}
0baab86b 855
db6f8759
TH
856static u64 ata_id_n_sectors(const u16 *id)
857{
858 if (ata_id_has_lba(id)) {
859 if (ata_id_has_lba48(id))
860 return ata_id_u64(id, 100);
861 else
862 return ata_id_u32(id, 60);
863 } else {
864 if (ata_id_current_chs_valid(id))
865 return ata_id_u32(id, 57);
866 else
867 return id[1] * id[3] * id[6];
868 }
869}
870
1e999736
AC
871static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
872{
873 u64 sectors = 0;
874
875 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
876 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
877 sectors |= (tf->hob_lbal & 0xff) << 24;
878 sectors |= (tf->lbah & 0xff) << 16;
879 sectors |= (tf->lbam & 0xff) << 8;
880 sectors |= (tf->lbal & 0xff);
881
882 return ++sectors;
883}
884
885static u64 ata_tf_to_lba(struct ata_taskfile *tf)
886{
887 u64 sectors = 0;
888
889 sectors |= (tf->device & 0x0f) << 24;
890 sectors |= (tf->lbah & 0xff) << 16;
891 sectors |= (tf->lbam & 0xff) << 8;
892 sectors |= (tf->lbal & 0xff);
893
894 return ++sectors;
895}
896
897/**
c728a914
TH
898 * ata_read_native_max_address - Read native max address
899 * @dev: target device
900 * @max_sectors: out parameter for the result native max address
1e999736 901 *
c728a914
TH
902 * Perform an LBA48 or LBA28 native size query upon the device in
903 * question.
1e999736 904 *
c728a914
TH
905 * RETURNS:
906 * 0 on success, -EACCES if command is aborted by the drive.
907 * -EIO on other errors.
1e999736 908 */
c728a914 909static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1e999736 910{
c728a914 911 unsigned int err_mask;
1e999736 912 struct ata_taskfile tf;
c728a914 913 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
914
915 ata_tf_init(dev, &tf);
916
c728a914 917 /* always clear all address registers */
1e999736 918 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1e999736 919
c728a914
TH
920 if (lba48) {
921 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
922 tf.flags |= ATA_TFLAG_LBA48;
923 } else
924 tf.command = ATA_CMD_READ_NATIVE_MAX;
1e999736 925
1e999736 926 tf.protocol |= ATA_PROT_NODATA;
c728a914
TH
927 tf.device |= ATA_LBA;
928
2b789108 929 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914
TH
930 if (err_mask) {
931 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
932 "max address (err_mask=0x%x)\n", err_mask);
933 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
934 return -EACCES;
935 return -EIO;
936 }
1e999736 937
c728a914
TH
938 if (lba48)
939 *max_sectors = ata_tf_to_lba48(&tf);
940 else
941 *max_sectors = ata_tf_to_lba(&tf);
93328e11
AC
942 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
943 (*max_sectors)--;
c728a914 944 return 0;
1e999736
AC
945}
946
947/**
c728a914
TH
948 * ata_set_max_sectors - Set max sectors
949 * @dev: target device
6b38d1d1 950 * @new_sectors: new max sectors value to set for the device
1e999736 951 *
c728a914
TH
952 * Set max sectors of @dev to @new_sectors.
953 *
954 * RETURNS:
955 * 0 on success, -EACCES if command is aborted or denied (due to
956 * previous non-volatile SET_MAX) by the drive. -EIO on other
957 * errors.
1e999736 958 */
05027adc 959static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1e999736 960{
c728a914 961 unsigned int err_mask;
1e999736 962 struct ata_taskfile tf;
c728a914 963 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
964
965 new_sectors--;
966
967 ata_tf_init(dev, &tf);
968
1e999736 969 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
c728a914
TH
970
971 if (lba48) {
972 tf.command = ATA_CMD_SET_MAX_EXT;
973 tf.flags |= ATA_TFLAG_LBA48;
974
975 tf.hob_lbal = (new_sectors >> 24) & 0xff;
976 tf.hob_lbam = (new_sectors >> 32) & 0xff;
977 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1e582ba4 978 } else {
c728a914
TH
979 tf.command = ATA_CMD_SET_MAX;
980
1e582ba4
TH
981 tf.device |= (new_sectors >> 24) & 0xf;
982 }
983
1e999736 984 tf.protocol |= ATA_PROT_NODATA;
c728a914 985 tf.device |= ATA_LBA;
1e999736
AC
986
987 tf.lbal = (new_sectors >> 0) & 0xff;
988 tf.lbam = (new_sectors >> 8) & 0xff;
989 tf.lbah = (new_sectors >> 16) & 0xff;
1e999736 990
2b789108 991 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914
TH
992 if (err_mask) {
993 ata_dev_printk(dev, KERN_WARNING, "failed to set "
994 "max address (err_mask=0x%x)\n", err_mask);
995 if (err_mask == AC_ERR_DEV &&
996 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
997 return -EACCES;
998 return -EIO;
999 }
1000
c728a914 1001 return 0;
1e999736
AC
1002}
1003
1004/**
1005 * ata_hpa_resize - Resize a device with an HPA set
1006 * @dev: Device to resize
1007 *
1008 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1009 * it if required to the full size of the media. The caller must check
1010 * the drive has the HPA feature set enabled.
05027adc
TH
1011 *
1012 * RETURNS:
1013 * 0 on success, -errno on failure.
1e999736 1014 */
05027adc 1015static int ata_hpa_resize(struct ata_device *dev)
1e999736 1016{
05027adc
TH
1017 struct ata_eh_context *ehc = &dev->link->eh_context;
1018 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1019 u64 sectors = ata_id_n_sectors(dev->id);
1020 u64 native_sectors;
c728a914 1021 int rc;
a617c09f 1022
05027adc
TH
1023 /* do we need to do it? */
1024 if (dev->class != ATA_DEV_ATA ||
1025 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1026 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
c728a914 1027 return 0;
1e999736 1028
05027adc
TH
1029 /* read native max address */
1030 rc = ata_read_native_max_address(dev, &native_sectors);
1031 if (rc) {
1032 /* If HPA isn't going to be unlocked, skip HPA
1033 * resizing from the next try.
1034 */
1035 if (!ata_ignore_hpa) {
1036 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1037 "broken, will skip HPA handling\n");
1038 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1039
1040 /* we can continue if device aborted the command */
1041 if (rc == -EACCES)
1042 rc = 0;
1e999736 1043 }
37301a55 1044
05027adc
TH
1045 return rc;
1046 }
1047
1048 /* nothing to do? */
1049 if (native_sectors <= sectors || !ata_ignore_hpa) {
1050 if (!print_info || native_sectors == sectors)
1051 return 0;
1052
1053 if (native_sectors > sectors)
1054 ata_dev_printk(dev, KERN_INFO,
1055 "HPA detected: current %llu, native %llu\n",
1056 (unsigned long long)sectors,
1057 (unsigned long long)native_sectors);
1058 else if (native_sectors < sectors)
1059 ata_dev_printk(dev, KERN_WARNING,
1060 "native sectors (%llu) is smaller than "
1061 "sectors (%llu)\n",
1062 (unsigned long long)native_sectors,
1063 (unsigned long long)sectors);
1064 return 0;
1065 }
1066
1067 /* let's unlock HPA */
1068 rc = ata_set_max_sectors(dev, native_sectors);
1069 if (rc == -EACCES) {
1070 /* if device aborted the command, skip HPA resizing */
1071 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1072 "(%llu -> %llu), skipping HPA handling\n",
1073 (unsigned long long)sectors,
1074 (unsigned long long)native_sectors);
1075 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1076 return 0;
1077 } else if (rc)
1078 return rc;
1079
1080 /* re-read IDENTIFY data */
1081 rc = ata_dev_reread_id(dev, 0);
1082 if (rc) {
1083 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1084 "data after HPA resizing\n");
1085 return rc;
1086 }
1087
1088 if (print_info) {
1089 u64 new_sectors = ata_id_n_sectors(dev->id);
1090 ata_dev_printk(dev, KERN_INFO,
1091 "HPA unlocked: %llu -> %llu, native %llu\n",
1092 (unsigned long long)sectors,
1093 (unsigned long long)new_sectors,
1094 (unsigned long long)native_sectors);
1095 }
1096
1097 return 0;
1e999736
AC
1098}
1099
10305f0f
AC
1100/**
1101 * ata_id_to_dma_mode - Identify DMA mode from id block
1102 * @dev: device to identify
cc261267 1103 * @unknown: mode to assume if we cannot tell
10305f0f
AC
1104 *
1105 * Set up the timing values for the device based upon the identify
1106 * reported values for the DMA mode. This function is used by drivers
1107 * which rely upon firmware configured modes, but wish to report the
1108 * mode correctly when possible.
1109 *
1110 * In addition we emit similarly formatted messages to the default
1111 * ata_dev_set_mode handler, in order to provide consistency of
1112 * presentation.
1113 */
1114
1115void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
1116{
1117 unsigned int mask;
1118 u8 mode;
1119
1120 /* Pack the DMA modes */
1121 mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
1122 if (dev->id[53] & 0x04)
1123 mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
1124
1125 /* Select the mode in use */
1126 mode = ata_xfer_mask2mode(mask);
1127
1128 if (mode != 0) {
1129 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
1130 ata_mode_string(mask));
1131 } else {
1132 /* SWDMA perhaps ? */
1133 mode = unknown;
1134 ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
1135 }
1136
1137 /* Configure the device reporting */
1138 dev->xfer_mode = mode;
1139 dev->xfer_shift = ata_xfer_mode2shift(mode);
1140}
1141
0baab86b
EF
1142/**
1143 * ata_noop_dev_select - Select device 0/1 on ATA bus
1144 * @ap: ATA channel to manipulate
1145 * @device: ATA device (numbered from zero) to select
1146 *
1147 * This function performs no actual function.
1148 *
1149 * May be used as the dev_select() entry in ata_port_operations.
1150 *
1151 * LOCKING:
1152 * caller.
1153 */
1da177e4
LT
1154void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
1155{
1156}
1157
0baab86b 1158
1da177e4
LT
1159/**
1160 * ata_std_dev_select - Select device 0/1 on ATA bus
1161 * @ap: ATA channel to manipulate
1162 * @device: ATA device (numbered from zero) to select
1163 *
1164 * Use the method defined in the ATA specification to
1165 * make either device 0, or device 1, active on the
0baab86b
EF
1166 * ATA channel. Works with both PIO and MMIO.
1167 *
1168 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
1169 *
1170 * LOCKING:
1171 * caller.
1172 */
1173
1174void ata_std_dev_select (struct ata_port *ap, unsigned int device)
1175{
1176 u8 tmp;
1177
1178 if (device == 0)
1179 tmp = ATA_DEVICE_OBS;
1180 else
1181 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1182
0d5ff566 1183 iowrite8(tmp, ap->ioaddr.device_addr);
1da177e4
LT
1184 ata_pause(ap); /* needed; also flushes, for mmio */
1185}
1186
1187/**
1188 * ata_dev_select - Select device 0/1 on ATA bus
1189 * @ap: ATA channel to manipulate
1190 * @device: ATA device (numbered from zero) to select
1191 * @wait: non-zero to wait for Status register BSY bit to clear
1192 * @can_sleep: non-zero if context allows sleeping
1193 *
1194 * Use the method defined in the ATA specification to
1195 * make either device 0, or device 1, active on the
1196 * ATA channel.
1197 *
1198 * This is a high-level version of ata_std_dev_select(),
1199 * which additionally provides the services of inserting
1200 * the proper pauses and status polling, where needed.
1201 *
1202 * LOCKING:
1203 * caller.
1204 */
1205
1206void ata_dev_select(struct ata_port *ap, unsigned int device,
1207 unsigned int wait, unsigned int can_sleep)
1208{
88574551 1209 if (ata_msg_probe(ap))
44877b4e
TH
1210 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
1211 "device %u, wait %u\n", device, wait);
1da177e4
LT
1212
1213 if (wait)
1214 ata_wait_idle(ap);
1215
1216 ap->ops->dev_select(ap, device);
1217
1218 if (wait) {
9af5c9c9 1219 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
1da177e4
LT
1220 msleep(150);
1221 ata_wait_idle(ap);
1222 }
1223}
1224
1225/**
1226 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 1227 * @id: IDENTIFY DEVICE page to dump
1da177e4 1228 *
0bd3300a
TH
1229 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1230 * page.
1da177e4
LT
1231 *
1232 * LOCKING:
1233 * caller.
1234 */
1235
0bd3300a 1236static inline void ata_dump_id(const u16 *id)
1da177e4
LT
1237{
1238 DPRINTK("49==0x%04x "
1239 "53==0x%04x "
1240 "63==0x%04x "
1241 "64==0x%04x "
1242 "75==0x%04x \n",
0bd3300a
TH
1243 id[49],
1244 id[53],
1245 id[63],
1246 id[64],
1247 id[75]);
1da177e4
LT
1248 DPRINTK("80==0x%04x "
1249 "81==0x%04x "
1250 "82==0x%04x "
1251 "83==0x%04x "
1252 "84==0x%04x \n",
0bd3300a
TH
1253 id[80],
1254 id[81],
1255 id[82],
1256 id[83],
1257 id[84]);
1da177e4
LT
1258 DPRINTK("88==0x%04x "
1259 "93==0x%04x\n",
0bd3300a
TH
1260 id[88],
1261 id[93]);
1da177e4
LT
1262}
1263
cb95d562
TH
1264/**
1265 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1266 * @id: IDENTIFY data to compute xfer mask from
1267 *
1268 * Compute the xfermask for this device. This is not as trivial
1269 * as it seems if we must consider early devices correctly.
1270 *
1271 * FIXME: pre IDE drive timing (do we care ?).
1272 *
1273 * LOCKING:
1274 * None.
1275 *
1276 * RETURNS:
1277 * Computed xfermask
1278 */
1279static unsigned int ata_id_xfermask(const u16 *id)
1280{
1281 unsigned int pio_mask, mwdma_mask, udma_mask;
1282
1283 /* Usual case. Word 53 indicates word 64 is valid */
1284 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1285 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1286 pio_mask <<= 3;
1287 pio_mask |= 0x7;
1288 } else {
1289 /* If word 64 isn't valid then Word 51 high byte holds
1290 * the PIO timing number for the maximum. Turn it into
1291 * a mask.
1292 */
7a0f1c8a 1293 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb
AC
1294 if (mode < 5) /* Valid PIO range */
1295 pio_mask = (2 << mode) - 1;
1296 else
1297 pio_mask = 1;
cb95d562
TH
1298
1299 /* But wait.. there's more. Design your standards by
1300 * committee and you too can get a free iordy field to
1301 * process. However its the speeds not the modes that
1302 * are supported... Note drivers using the timing API
1303 * will get this right anyway
1304 */
1305 }
1306
1307 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1308
b352e57d
AC
1309 if (ata_id_is_cfa(id)) {
1310 /*
1311 * Process compact flash extended modes
1312 */
1313 int pio = id[163] & 0x7;
1314 int dma = (id[163] >> 3) & 7;
1315
1316 if (pio)
1317 pio_mask |= (1 << 5);
1318 if (pio > 1)
1319 pio_mask |= (1 << 6);
1320 if (dma)
1321 mwdma_mask |= (1 << 3);
1322 if (dma > 1)
1323 mwdma_mask |= (1 << 4);
1324 }
1325
fb21f0d0
TH
1326 udma_mask = 0;
1327 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1328 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1329
1330 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1331}
1332
86e45b6b
TH
1333/**
1334 * ata_port_queue_task - Queue port_task
1335 * @ap: The ata_port to queue port_task for
e2a7f77a 1336 * @fn: workqueue function to be scheduled
65f27f38 1337 * @data: data for @fn to use
e2a7f77a 1338 * @delay: delay time for workqueue function
86e45b6b
TH
1339 *
1340 * Schedule @fn(@data) for execution after @delay jiffies using
1341 * port_task. There is one port_task per port and it's the
1342 * user(low level driver)'s responsibility to make sure that only
1343 * one task is active at any given time.
1344 *
1345 * libata core layer takes care of synchronization between
1346 * port_task and EH. ata_port_queue_task() may be ignored for EH
1347 * synchronization.
1348 *
1349 * LOCKING:
1350 * Inherited from caller.
1351 */
65f27f38 1352void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
86e45b6b
TH
1353 unsigned long delay)
1354{
65f27f38
DH
1355 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1356 ap->port_task_data = data;
86e45b6b 1357
45a66c1c
ON
1358 /* may fail if ata_port_flush_task() in progress */
1359 queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1360}
1361
1362/**
1363 * ata_port_flush_task - Flush port_task
1364 * @ap: The ata_port to flush port_task for
1365 *
1366 * After this function completes, port_task is guranteed not to
1367 * be running or scheduled.
1368 *
1369 * LOCKING:
1370 * Kernel thread context (may sleep)
1371 */
1372void ata_port_flush_task(struct ata_port *ap)
1373{
86e45b6b
TH
1374 DPRINTK("ENTER\n");
1375
45a66c1c 1376 cancel_rearming_delayed_work(&ap->port_task);
86e45b6b 1377
0dd4b21f
BP
1378 if (ata_msg_ctl(ap))
1379 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
86e45b6b
TH
1380}
1381
7102d230 1382static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1383{
77853bf2 1384 struct completion *waiting = qc->private_data;
a2a7a662 1385
a2a7a662 1386 complete(waiting);
a2a7a662
TH
1387}
1388
1389/**
2432697b 1390 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1391 * @dev: Device to which the command is sent
1392 * @tf: Taskfile registers for the command and the result
d69cf37d 1393 * @cdb: CDB for packet command
a2a7a662 1394 * @dma_dir: Data tranfer direction of the command
5c1ad8b3 1395 * @sgl: sg list for the data buffer of the command
2432697b 1396 * @n_elem: Number of sg entries
2b789108 1397 * @timeout: Timeout in msecs (0 for default)
a2a7a662
TH
1398 *
1399 * Executes libata internal command with timeout. @tf contains
1400 * command on entry and result on return. Timeout and error
1401 * conditions are reported via return value. No recovery action
1402 * is taken after a command times out. It's caller's duty to
1403 * clean up after timeout.
1404 *
1405 * LOCKING:
1406 * None. Should be called with kernel context, might sleep.
551e8889
TH
1407 *
1408 * RETURNS:
1409 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1410 */
2432697b
TH
1411unsigned ata_exec_internal_sg(struct ata_device *dev,
1412 struct ata_taskfile *tf, const u8 *cdb,
87260216 1413 int dma_dir, struct scatterlist *sgl,
2b789108 1414 unsigned int n_elem, unsigned long timeout)
a2a7a662 1415{
9af5c9c9
TH
1416 struct ata_link *link = dev->link;
1417 struct ata_port *ap = link->ap;
a2a7a662
TH
1418 u8 command = tf->command;
1419 struct ata_queued_cmd *qc;
2ab7db1f 1420 unsigned int tag, preempted_tag;
dedaf2b0 1421 u32 preempted_sactive, preempted_qc_active;
da917d69 1422 int preempted_nr_active_links;
60be6b9a 1423 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1424 unsigned long flags;
77853bf2 1425 unsigned int err_mask;
d95a717f 1426 int rc;
a2a7a662 1427
ba6a1308 1428 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1429
e3180499 1430 /* no internal command while frozen */
b51e9e5d 1431 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1432 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1433 return AC_ERR_SYSTEM;
1434 }
1435
2ab7db1f 1436 /* initialize internal qc */
a2a7a662 1437
2ab7db1f
TH
1438 /* XXX: Tag 0 is used for drivers with legacy EH as some
1439 * drivers choke if any other tag is given. This breaks
1440 * ata_tag_internal() test for those drivers. Don't use new
1441 * EH stuff without converting to it.
1442 */
1443 if (ap->ops->error_handler)
1444 tag = ATA_TAG_INTERNAL;
1445 else
1446 tag = 0;
1447
6cec4a39 1448 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1449 BUG();
f69499f4 1450 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1451
1452 qc->tag = tag;
1453 qc->scsicmd = NULL;
1454 qc->ap = ap;
1455 qc->dev = dev;
1456 ata_qc_reinit(qc);
1457
9af5c9c9
TH
1458 preempted_tag = link->active_tag;
1459 preempted_sactive = link->sactive;
dedaf2b0 1460 preempted_qc_active = ap->qc_active;
da917d69 1461 preempted_nr_active_links = ap->nr_active_links;
9af5c9c9
TH
1462 link->active_tag = ATA_TAG_POISON;
1463 link->sactive = 0;
dedaf2b0 1464 ap->qc_active = 0;
da917d69 1465 ap->nr_active_links = 0;
2ab7db1f
TH
1466
1467 /* prepare & issue qc */
a2a7a662 1468 qc->tf = *tf;
d69cf37d
TH
1469 if (cdb)
1470 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1471 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1472 qc->dma_dir = dma_dir;
1473 if (dma_dir != DMA_NONE) {
2432697b 1474 unsigned int i, buflen = 0;
87260216 1475 struct scatterlist *sg;
2432697b 1476
87260216
JA
1477 for_each_sg(sgl, sg, n_elem, i)
1478 buflen += sg->length;
2432697b 1479
87260216 1480 ata_sg_init(qc, sgl, n_elem);
49c80429 1481 qc->nbytes = buflen;
a2a7a662
TH
1482 }
1483
77853bf2 1484 qc->private_data = &wait;
a2a7a662
TH
1485 qc->complete_fn = ata_qc_complete_internal;
1486
8e0e694a 1487 ata_qc_issue(qc);
a2a7a662 1488
ba6a1308 1489 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1490
2b789108
TH
1491 if (!timeout)
1492 timeout = ata_probe_timeout * 1000 / HZ;
1493
1494 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
d95a717f
TH
1495
1496 ata_port_flush_task(ap);
41ade50c 1497
d95a717f 1498 if (!rc) {
ba6a1308 1499 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1500
1501 /* We're racing with irq here. If we lose, the
1502 * following test prevents us from completing the qc
d95a717f
TH
1503 * twice. If we win, the port is frozen and will be
1504 * cleaned up by ->post_internal_cmd().
a2a7a662 1505 */
77853bf2 1506 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1507 qc->err_mask |= AC_ERR_TIMEOUT;
1508
1509 if (ap->ops->error_handler)
1510 ata_port_freeze(ap);
1511 else
1512 ata_qc_complete(qc);
f15a1daf 1513
0dd4b21f
BP
1514 if (ata_msg_warn(ap))
1515 ata_dev_printk(dev, KERN_WARNING,
88574551 1516 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1517 }
1518
ba6a1308 1519 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1520 }
1521
d95a717f
TH
1522 /* do post_internal_cmd */
1523 if (ap->ops->post_internal_cmd)
1524 ap->ops->post_internal_cmd(qc);
1525
a51d644a
TH
1526 /* perform minimal error analysis */
1527 if (qc->flags & ATA_QCFLAG_FAILED) {
1528 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1529 qc->err_mask |= AC_ERR_DEV;
1530
1531 if (!qc->err_mask)
1532 qc->err_mask |= AC_ERR_OTHER;
1533
1534 if (qc->err_mask & ~AC_ERR_OTHER)
1535 qc->err_mask &= ~AC_ERR_OTHER;
d95a717f
TH
1536 }
1537
15869303 1538 /* finish up */
ba6a1308 1539 spin_lock_irqsave(ap->lock, flags);
15869303 1540
e61e0672 1541 *tf = qc->result_tf;
77853bf2
TH
1542 err_mask = qc->err_mask;
1543
1544 ata_qc_free(qc);
9af5c9c9
TH
1545 link->active_tag = preempted_tag;
1546 link->sactive = preempted_sactive;
dedaf2b0 1547 ap->qc_active = preempted_qc_active;
da917d69 1548 ap->nr_active_links = preempted_nr_active_links;
77853bf2 1549
1f7dd3e9
TH
1550 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1551 * Until those drivers are fixed, we detect the condition
1552 * here, fail the command with AC_ERR_SYSTEM and reenable the
1553 * port.
1554 *
1555 * Note that this doesn't change any behavior as internal
1556 * command failure results in disabling the device in the
1557 * higher layer for LLDDs without new reset/EH callbacks.
1558 *
1559 * Kill the following code as soon as those drivers are fixed.
1560 */
198e0fed 1561 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1562 err_mask |= AC_ERR_SYSTEM;
1563 ata_port_probe(ap);
1564 }
1565
ba6a1308 1566 spin_unlock_irqrestore(ap->lock, flags);
15869303 1567
77853bf2 1568 return err_mask;
a2a7a662
TH
1569}
1570
2432697b 1571/**
33480a0e 1572 * ata_exec_internal - execute libata internal command
2432697b
TH
1573 * @dev: Device to which the command is sent
1574 * @tf: Taskfile registers for the command and the result
1575 * @cdb: CDB for packet command
1576 * @dma_dir: Data tranfer direction of the command
1577 * @buf: Data buffer of the command
1578 * @buflen: Length of data buffer
2b789108 1579 * @timeout: Timeout in msecs (0 for default)
2432697b
TH
1580 *
1581 * Wrapper around ata_exec_internal_sg() which takes simple
1582 * buffer instead of sg list.
1583 *
1584 * LOCKING:
1585 * None. Should be called with kernel context, might sleep.
1586 *
1587 * RETURNS:
1588 * Zero on success, AC_ERR_* mask on failure
1589 */
1590unsigned ata_exec_internal(struct ata_device *dev,
1591 struct ata_taskfile *tf, const u8 *cdb,
2b789108
TH
1592 int dma_dir, void *buf, unsigned int buflen,
1593 unsigned long timeout)
2432697b 1594{
33480a0e
TH
1595 struct scatterlist *psg = NULL, sg;
1596 unsigned int n_elem = 0;
2432697b 1597
33480a0e
TH
1598 if (dma_dir != DMA_NONE) {
1599 WARN_ON(!buf);
1600 sg_init_one(&sg, buf, buflen);
1601 psg = &sg;
1602 n_elem++;
1603 }
2432697b 1604
2b789108
TH
1605 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1606 timeout);
2432697b
TH
1607}
1608
977e6b9f
TH
1609/**
1610 * ata_do_simple_cmd - execute simple internal command
1611 * @dev: Device to which the command is sent
1612 * @cmd: Opcode to execute
1613 *
1614 * Execute a 'simple' command, that only consists of the opcode
1615 * 'cmd' itself, without filling any other registers
1616 *
1617 * LOCKING:
1618 * Kernel thread context (may sleep).
1619 *
1620 * RETURNS:
1621 * Zero on success, AC_ERR_* mask on failure
e58eb583 1622 */
77b08fb5 1623unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1624{
1625 struct ata_taskfile tf;
e58eb583
TH
1626
1627 ata_tf_init(dev, &tf);
1628
1629 tf.command = cmd;
1630 tf.flags |= ATA_TFLAG_DEVICE;
1631 tf.protocol = ATA_PROT_NODATA;
1632
2b789108 1633 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
e58eb583
TH
1634}
1635
1bc4ccff
AC
1636/**
1637 * ata_pio_need_iordy - check if iordy needed
1638 * @adev: ATA device
1639 *
1640 * Check if the current speed of the device requires IORDY. Used
1641 * by various controllers for chip configuration.
1642 */
a617c09f 1643
1bc4ccff
AC
1644unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1645{
432729f0
AC
1646 /* Controller doesn't support IORDY. Probably a pointless check
1647 as the caller should know this */
9af5c9c9 1648 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1bc4ccff 1649 return 0;
432729f0
AC
1650 /* PIO3 and higher it is mandatory */
1651 if (adev->pio_mode > XFER_PIO_2)
1652 return 1;
1653 /* We turn it on when possible */
1654 if (ata_id_has_iordy(adev->id))
1bc4ccff 1655 return 1;
432729f0
AC
1656 return 0;
1657}
2e9edbf8 1658
432729f0
AC
1659/**
1660 * ata_pio_mask_no_iordy - Return the non IORDY mask
1661 * @adev: ATA device
1662 *
1663 * Compute the highest mode possible if we are not using iordy. Return
1664 * -1 if no iordy mode is available.
1665 */
a617c09f 1666
432729f0
AC
1667static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1668{
1bc4ccff 1669 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1bc4ccff 1670 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
432729f0 1671 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1bc4ccff
AC
1672 /* Is the speed faster than the drive allows non IORDY ? */
1673 if (pio) {
1674 /* This is cycle times not frequency - watch the logic! */
1675 if (pio > 240) /* PIO2 is 240nS per cycle */
432729f0
AC
1676 return 3 << ATA_SHIFT_PIO;
1677 return 7 << ATA_SHIFT_PIO;
1bc4ccff
AC
1678 }
1679 }
432729f0 1680 return 3 << ATA_SHIFT_PIO;
1bc4ccff
AC
1681}
1682
1da177e4 1683/**
49016aca 1684 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1685 * @dev: target device
1686 * @p_class: pointer to class of the target device (may be changed)
bff04647 1687 * @flags: ATA_READID_* flags
fe635c7e 1688 * @id: buffer to read IDENTIFY data into
1da177e4 1689 *
49016aca
TH
1690 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1691 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1692 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1693 * for pre-ATA4 drives.
1da177e4 1694 *
50a99018
AC
1695 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1696 * now we abort if we hit that case.
1697 *
1da177e4 1698 * LOCKING:
49016aca
TH
1699 * Kernel thread context (may sleep)
1700 *
1701 * RETURNS:
1702 * 0 on success, -errno otherwise.
1da177e4 1703 */
a9beec95 1704int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1705 unsigned int flags, u16 *id)
1da177e4 1706{
9af5c9c9 1707 struct ata_port *ap = dev->link->ap;
49016aca 1708 unsigned int class = *p_class;
a0123703 1709 struct ata_taskfile tf;
49016aca
TH
1710 unsigned int err_mask = 0;
1711 const char *reason;
54936f8b 1712 int may_fallback = 1, tried_spinup = 0;
49016aca 1713 int rc;
1da177e4 1714
0dd4b21f 1715 if (ata_msg_ctl(ap))
44877b4e 1716 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1717
49016aca 1718 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
49016aca 1719 retry:
3373efd8 1720 ata_tf_init(dev, &tf);
a0123703 1721
49016aca
TH
1722 switch (class) {
1723 case ATA_DEV_ATA:
a0123703 1724 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1725 break;
1726 case ATA_DEV_ATAPI:
a0123703 1727 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1728 break;
1729 default:
1730 rc = -ENODEV;
1731 reason = "unsupported class";
1732 goto err_out;
1da177e4
LT
1733 }
1734
a0123703 1735 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1736
1737 /* Some devices choke if TF registers contain garbage. Make
1738 * sure those are properly initialized.
1739 */
1740 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1741
1742 /* Device presence detection is unreliable on some
1743 * controllers. Always poll IDENTIFY if available.
1744 */
1745 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 1746
3373efd8 1747 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
2b789108 1748 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
a0123703 1749 if (err_mask) {
800b3996 1750 if (err_mask & AC_ERR_NODEV_HINT) {
55a8e2c8 1751 DPRINTK("ata%u.%d: NODEV after polling detection\n",
44877b4e 1752 ap->print_id, dev->devno);
55a8e2c8
TH
1753 return -ENOENT;
1754 }
1755
54936f8b
TH
1756 /* Device or controller might have reported the wrong
1757 * device class. Give a shot at the other IDENTIFY if
1758 * the current one is aborted by the device.
1759 */
1760 if (may_fallback &&
1761 (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1762 may_fallback = 0;
1763
1764 if (class == ATA_DEV_ATA)
1765 class = ATA_DEV_ATAPI;
1766 else
1767 class = ATA_DEV_ATA;
1768 goto retry;
1769 }
1770
49016aca
TH
1771 rc = -EIO;
1772 reason = "I/O error";
1da177e4
LT
1773 goto err_out;
1774 }
1775
54936f8b
TH
1776 /* Falling back doesn't make sense if ID data was read
1777 * successfully at least once.
1778 */
1779 may_fallback = 0;
1780
49016aca 1781 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1782
49016aca 1783 /* sanity check */
a4f5749b 1784 rc = -EINVAL;
6070068b 1785 reason = "device reports invalid type";
a4f5749b
TH
1786
1787 if (class == ATA_DEV_ATA) {
1788 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1789 goto err_out;
1790 } else {
1791 if (ata_id_is_ata(id))
1792 goto err_out;
49016aca
TH
1793 }
1794
169439c2
ML
1795 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1796 tried_spinup = 1;
1797 /*
1798 * Drive powered-up in standby mode, and requires a specific
1799 * SET_FEATURES spin-up subcommand before it will accept
1800 * anything other than the original IDENTIFY command.
1801 */
1802 ata_tf_init(dev, &tf);
1803 tf.command = ATA_CMD_SET_FEATURES;
1804 tf.feature = SETFEATURES_SPINUP;
1805 tf.protocol = ATA_PROT_NODATA;
1806 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2b789108
TH
1807 err_mask = ata_exec_internal(dev, &tf, NULL,
1808 DMA_NONE, NULL, 0, 0);
fb0582f9 1809 if (err_mask && id[2] != 0x738c) {
169439c2
ML
1810 rc = -EIO;
1811 reason = "SPINUP failed";
1812 goto err_out;
1813 }
1814 /*
1815 * If the drive initially returned incomplete IDENTIFY info,
1816 * we now must reissue the IDENTIFY command.
1817 */
1818 if (id[2] == 0x37c8)
1819 goto retry;
1820 }
1821
bff04647 1822 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
1823 /*
1824 * The exact sequence expected by certain pre-ATA4 drives is:
1825 * SRST RESET
50a99018
AC
1826 * IDENTIFY (optional in early ATA)
1827 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
49016aca
TH
1828 * anything else..
1829 * Some drives were very specific about that exact sequence.
50a99018
AC
1830 *
1831 * Note that ATA4 says lba is mandatory so the second check
1832 * shoud never trigger.
49016aca
TH
1833 */
1834 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 1835 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
1836 if (err_mask) {
1837 rc = -EIO;
1838 reason = "INIT_DEV_PARAMS failed";
1839 goto err_out;
1840 }
1841
1842 /* current CHS translation info (id[53-58]) might be
1843 * changed. reread the identify device info.
1844 */
bff04647 1845 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
1846 goto retry;
1847 }
1848 }
1849
1850 *p_class = class;
fe635c7e 1851
49016aca
TH
1852 return 0;
1853
1854 err_out:
88574551 1855 if (ata_msg_warn(ap))
0dd4b21f 1856 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 1857 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
1858 return rc;
1859}
1860
3373efd8 1861static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 1862{
9af5c9c9
TH
1863 struct ata_port *ap = dev->link->ap;
1864 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
1865}
1866
a6e6ce8e
TH
1867static void ata_dev_config_ncq(struct ata_device *dev,
1868 char *desc, size_t desc_sz)
1869{
9af5c9c9 1870 struct ata_port *ap = dev->link->ap;
a6e6ce8e
TH
1871 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1872
1873 if (!ata_id_has_ncq(dev->id)) {
1874 desc[0] = '\0';
1875 return;
1876 }
75683fe7 1877 if (dev->horkage & ATA_HORKAGE_NONCQ) {
6919a0a6
AC
1878 snprintf(desc, desc_sz, "NCQ (not used)");
1879 return;
1880 }
a6e6ce8e 1881 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 1882 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
1883 dev->flags |= ATA_DFLAG_NCQ;
1884 }
1885
1886 if (hdepth >= ddepth)
1887 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1888 else
1889 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1890}
1891
49016aca 1892/**
ffeae418 1893 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
1894 * @dev: Target device to configure
1895 *
1896 * Configure @dev according to @dev->id. Generic and low-level
1897 * driver specific fixups are also applied.
49016aca
TH
1898 *
1899 * LOCKING:
ffeae418
TH
1900 * Kernel thread context (may sleep)
1901 *
1902 * RETURNS:
1903 * 0 on success, -errno otherwise
49016aca 1904 */
efdaedc4 1905int ata_dev_configure(struct ata_device *dev)
49016aca 1906{
9af5c9c9
TH
1907 struct ata_port *ap = dev->link->ap;
1908 struct ata_eh_context *ehc = &dev->link->eh_context;
6746544c 1909 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1148c3a7 1910 const u16 *id = dev->id;
ff8854b2 1911 unsigned int xfer_mask;
b352e57d 1912 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
1913 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
1914 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 1915 int rc;
49016aca 1916
0dd4b21f 1917 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
44877b4e
TH
1918 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
1919 __FUNCTION__);
ffeae418 1920 return 0;
49016aca
TH
1921 }
1922
0dd4b21f 1923 if (ata_msg_probe(ap))
44877b4e 1924 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1925
75683fe7
TH
1926 /* set horkage */
1927 dev->horkage |= ata_dev_blacklisted(dev);
1928
6746544c
TH
1929 /* let ACPI work its magic */
1930 rc = ata_acpi_on_devcfg(dev);
1931 if (rc)
1932 return rc;
08573a86 1933
05027adc
TH
1934 /* massage HPA, do it early as it might change IDENTIFY data */
1935 rc = ata_hpa_resize(dev);
1936 if (rc)
1937 return rc;
1938
c39f5ebe 1939 /* print device capabilities */
0dd4b21f 1940 if (ata_msg_probe(ap))
88574551
TH
1941 ata_dev_printk(dev, KERN_DEBUG,
1942 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1943 "85:%04x 86:%04x 87:%04x 88:%04x\n",
0dd4b21f 1944 __FUNCTION__,
f15a1daf
TH
1945 id[49], id[82], id[83], id[84],
1946 id[85], id[86], id[87], id[88]);
c39f5ebe 1947
208a9933 1948 /* initialize to-be-configured parameters */
ea1dd4e1 1949 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
1950 dev->max_sectors = 0;
1951 dev->cdb_len = 0;
1952 dev->n_sectors = 0;
1953 dev->cylinders = 0;
1954 dev->heads = 0;
1955 dev->sectors = 0;
1956
1da177e4
LT
1957 /*
1958 * common ATA, ATAPI feature tests
1959 */
1960
ff8854b2 1961 /* find max transfer mode; for printk only */
1148c3a7 1962 xfer_mask = ata_id_xfermask(id);
1da177e4 1963
0dd4b21f
BP
1964 if (ata_msg_probe(ap))
1965 ata_dump_id(id);
1da177e4 1966
ef143d57
AL
1967 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
1968 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
1969 sizeof(fwrevbuf));
1970
1971 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
1972 sizeof(modelbuf));
1973
1da177e4
LT
1974 /* ATA-specific feature tests */
1975 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
1976 if (ata_id_is_cfa(id)) {
1977 if (id[162] & 1) /* CPRM may make this media unusable */
44877b4e
TH
1978 ata_dev_printk(dev, KERN_WARNING,
1979 "supports DRM functions and may "
1980 "not be fully accessable.\n");
b352e57d
AC
1981 snprintf(revbuf, 7, "CFA");
1982 }
1983 else
1984 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1985
1148c3a7 1986 dev->n_sectors = ata_id_n_sectors(id);
2940740b 1987
3f64f565
EM
1988 if (dev->id[59] & 0x100)
1989 dev->multi_count = dev->id[59] & 0xff;
1990
1148c3a7 1991 if (ata_id_has_lba(id)) {
4c2d721a 1992 const char *lba_desc;
a6e6ce8e 1993 char ncq_desc[20];
8bf62ece 1994
4c2d721a
TH
1995 lba_desc = "LBA";
1996 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 1997 if (ata_id_has_lba48(id)) {
8bf62ece 1998 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 1999 lba_desc = "LBA48";
6fc49adb
TH
2000
2001 if (dev->n_sectors >= (1UL << 28) &&
2002 ata_id_has_flush_ext(id))
2003 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 2004 }
8bf62ece 2005
a6e6ce8e
TH
2006 /* config NCQ */
2007 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2008
8bf62ece 2009 /* print device info to dmesg */
3f64f565
EM
2010 if (ata_msg_drv(ap) && print_info) {
2011 ata_dev_printk(dev, KERN_INFO,
2012 "%s: %s, %s, max %s\n",
2013 revbuf, modelbuf, fwrevbuf,
2014 ata_mode_string(xfer_mask));
2015 ata_dev_printk(dev, KERN_INFO,
2016 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 2017 (unsigned long long)dev->n_sectors,
3f64f565
EM
2018 dev->multi_count, lba_desc, ncq_desc);
2019 }
ffeae418 2020 } else {
8bf62ece
AL
2021 /* CHS */
2022
2023 /* Default translation */
1148c3a7
TH
2024 dev->cylinders = id[1];
2025 dev->heads = id[3];
2026 dev->sectors = id[6];
8bf62ece 2027
1148c3a7 2028 if (ata_id_current_chs_valid(id)) {
8bf62ece 2029 /* Current CHS translation is valid. */
1148c3a7
TH
2030 dev->cylinders = id[54];
2031 dev->heads = id[55];
2032 dev->sectors = id[56];
8bf62ece
AL
2033 }
2034
2035 /* print device info to dmesg */
3f64f565 2036 if (ata_msg_drv(ap) && print_info) {
88574551 2037 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2038 "%s: %s, %s, max %s\n",
2039 revbuf, modelbuf, fwrevbuf,
2040 ata_mode_string(xfer_mask));
a84471fe 2041 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2042 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2043 (unsigned long long)dev->n_sectors,
2044 dev->multi_count, dev->cylinders,
2045 dev->heads, dev->sectors);
2046 }
07f6f7d0
AL
2047 }
2048
6e7846e9 2049 dev->cdb_len = 16;
1da177e4
LT
2050 }
2051
2052 /* ATAPI-specific feature tests */
2c13b7ce 2053 else if (dev->class == ATA_DEV_ATAPI) {
854c73a2
TH
2054 const char *cdb_intr_string = "";
2055 const char *atapi_an_string = "";
7d77b247 2056 u32 sntf;
08a556db 2057
1148c3a7 2058 rc = atapi_cdb_len(id);
1da177e4 2059 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 2060 if (ata_msg_warn(ap))
88574551
TH
2061 ata_dev_printk(dev, KERN_WARNING,
2062 "unsupported CDB len\n");
ffeae418 2063 rc = -EINVAL;
1da177e4
LT
2064 goto err_out_nosup;
2065 }
6e7846e9 2066 dev->cdb_len = (unsigned int) rc;
1da177e4 2067
7d77b247
TH
2068 /* Enable ATAPI AN if both the host and device have
2069 * the support. If PMP is attached, SNTF is required
2070 * to enable ATAPI AN to discern between PHY status
2071 * changed notifications and ATAPI ANs.
9f45cbd3 2072 */
7d77b247
TH
2073 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2074 (!ap->nr_pmp_links ||
2075 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
854c73a2
TH
2076 unsigned int err_mask;
2077
9f45cbd3 2078 /* issue SET feature command to turn this on */
854c73a2
TH
2079 err_mask = ata_dev_set_AN(dev, SETFEATURES_SATA_ENABLE);
2080 if (err_mask)
9f45cbd3 2081 ata_dev_printk(dev, KERN_ERR,
854c73a2
TH
2082 "failed to enable ATAPI AN "
2083 "(err_mask=0x%x)\n", err_mask);
2084 else {
9f45cbd3 2085 dev->flags |= ATA_DFLAG_AN;
854c73a2
TH
2086 atapi_an_string = ", ATAPI AN";
2087 }
9f45cbd3
KCA
2088 }
2089
08a556db 2090 if (ata_id_cdb_intr(dev->id)) {
312f7da2 2091 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
2092 cdb_intr_string = ", CDB intr";
2093 }
312f7da2 2094
1da177e4 2095 /* print device info to dmesg */
5afc8142 2096 if (ata_msg_drv(ap) && print_info)
ef143d57 2097 ata_dev_printk(dev, KERN_INFO,
854c73a2 2098 "ATAPI: %s, %s, max %s%s%s\n",
ef143d57 2099 modelbuf, fwrevbuf,
12436c30 2100 ata_mode_string(xfer_mask),
854c73a2 2101 cdb_intr_string, atapi_an_string);
1da177e4
LT
2102 }
2103
914ed354
TH
2104 /* determine max_sectors */
2105 dev->max_sectors = ATA_MAX_SECTORS;
2106 if (dev->flags & ATA_DFLAG_LBA48)
2107 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2108
93590859
AC
2109 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2110 /* Let the user know. We don't want to disallow opens for
2111 rescue purposes, or in case the vendor is just a blithering
2112 idiot */
2113 if (print_info) {
2114 ata_dev_printk(dev, KERN_WARNING,
2115"Drive reports diagnostics failure. This may indicate a drive\n");
2116 ata_dev_printk(dev, KERN_WARNING,
2117"fault or invalid emulation. Contact drive vendor for information.\n");
2118 }
2119 }
2120
4b2f3ede 2121 /* limit bridge transfers to udma5, 200 sectors */
3373efd8 2122 if (ata_dev_knobble(dev)) {
5afc8142 2123 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
2124 ata_dev_printk(dev, KERN_INFO,
2125 "applying bridge limits\n");
5a529139 2126 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
2127 dev->max_sectors = ATA_MAX_SECTORS;
2128 }
2129
75683fe7 2130 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
03ec52de
TH
2131 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2132 dev->max_sectors);
18d6e9d5 2133
4b2f3ede 2134 if (ap->ops->dev_config)
cd0d3bbc 2135 ap->ops->dev_config(dev);
4b2f3ede 2136
0dd4b21f
BP
2137 if (ata_msg_probe(ap))
2138 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2139 __FUNCTION__, ata_chk_status(ap));
ffeae418 2140 return 0;
1da177e4
LT
2141
2142err_out_nosup:
0dd4b21f 2143 if (ata_msg_probe(ap))
88574551
TH
2144 ata_dev_printk(dev, KERN_DEBUG,
2145 "%s: EXIT, err\n", __FUNCTION__);
ffeae418 2146 return rc;
1da177e4
LT
2147}
2148
be0d18df 2149/**
2e41e8e6 2150 * ata_cable_40wire - return 40 wire cable type
be0d18df
AC
2151 * @ap: port
2152 *
2e41e8e6 2153 * Helper method for drivers which want to hardwire 40 wire cable
be0d18df
AC
2154 * detection.
2155 */
2156
2157int ata_cable_40wire(struct ata_port *ap)
2158{
2159 return ATA_CBL_PATA40;
2160}
2161
2162/**
2e41e8e6 2163 * ata_cable_80wire - return 80 wire cable type
be0d18df
AC
2164 * @ap: port
2165 *
2e41e8e6 2166 * Helper method for drivers which want to hardwire 80 wire cable
be0d18df
AC
2167 * detection.
2168 */
2169
2170int ata_cable_80wire(struct ata_port *ap)
2171{
2172 return ATA_CBL_PATA80;
2173}
2174
2175/**
2176 * ata_cable_unknown - return unknown PATA cable.
2177 * @ap: port
2178 *
2179 * Helper method for drivers which have no PATA cable detection.
2180 */
2181
2182int ata_cable_unknown(struct ata_port *ap)
2183{
2184 return ATA_CBL_PATA_UNK;
2185}
2186
2187/**
2188 * ata_cable_sata - return SATA cable type
2189 * @ap: port
2190 *
2191 * Helper method for drivers which have SATA cables
2192 */
2193
2194int ata_cable_sata(struct ata_port *ap)
2195{
2196 return ATA_CBL_SATA;
2197}
2198
1da177e4
LT
2199/**
2200 * ata_bus_probe - Reset and probe ATA bus
2201 * @ap: Bus to probe
2202 *
0cba632b
JG
2203 * Master ATA bus probing function. Initiates a hardware-dependent
2204 * bus reset, then attempts to identify any devices found on
2205 * the bus.
2206 *
1da177e4 2207 * LOCKING:
0cba632b 2208 * PCI/etc. bus probe sem.
1da177e4
LT
2209 *
2210 * RETURNS:
96072e69 2211 * Zero on success, negative errno otherwise.
1da177e4
LT
2212 */
2213
80289167 2214int ata_bus_probe(struct ata_port *ap)
1da177e4 2215{
28ca5c57 2216 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 2217 int tries[ATA_MAX_DEVICES];
f58229f8 2218 int rc;
e82cbdb9 2219 struct ata_device *dev;
1da177e4 2220
28ca5c57 2221 ata_port_probe(ap);
c19ba8af 2222
f58229f8
TH
2223 ata_link_for_each_dev(dev, &ap->link)
2224 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
14d2bac1
TH
2225
2226 retry:
2044470c 2227 /* reset and determine device classes */
52783c5d 2228 ap->ops->phy_reset(ap);
2061a47a 2229
f58229f8 2230 ata_link_for_each_dev(dev, &ap->link) {
52783c5d
TH
2231 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2232 dev->class != ATA_DEV_UNKNOWN)
2233 classes[dev->devno] = dev->class;
2234 else
2235 classes[dev->devno] = ATA_DEV_NONE;
2044470c 2236
52783c5d 2237 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 2238 }
1da177e4 2239
52783c5d 2240 ata_port_probe(ap);
2044470c 2241
b6079ca4
AC
2242 /* after the reset the device state is PIO 0 and the controller
2243 state is undefined. Record the mode */
2244
f58229f8
TH
2245 ata_link_for_each_dev(dev, &ap->link)
2246 dev->pio_mode = XFER_PIO_0;
b6079ca4 2247
f31f0cc2
JG
2248 /* read IDENTIFY page and configure devices. We have to do the identify
2249 specific sequence bass-ackwards so that PDIAG- is released by
2250 the slave device */
2251
f58229f8
TH
2252 ata_link_for_each_dev(dev, &ap->link) {
2253 if (tries[dev->devno])
2254 dev->class = classes[dev->devno];
ffeae418 2255
14d2bac1 2256 if (!ata_dev_enabled(dev))
ffeae418 2257 continue;
ffeae418 2258
bff04647
TH
2259 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2260 dev->id);
14d2bac1
TH
2261 if (rc)
2262 goto fail;
f31f0cc2
JG
2263 }
2264
be0d18df
AC
2265 /* Now ask for the cable type as PDIAG- should have been released */
2266 if (ap->ops->cable_detect)
2267 ap->cbl = ap->ops->cable_detect(ap);
2268
614fe29b
AC
2269 /* We may have SATA bridge glue hiding here irrespective of the
2270 reported cable types and sensed types */
2271 ata_link_for_each_dev(dev, &ap->link) {
2272 if (!ata_dev_enabled(dev))
2273 continue;
2274 /* SATA drives indicate we have a bridge. We don't know which
2275 end of the link the bridge is which is a problem */
2276 if (ata_id_is_sata(dev->id))
2277 ap->cbl = ATA_CBL_SATA;
2278 }
2279
f31f0cc2
JG
2280 /* After the identify sequence we can now set up the devices. We do
2281 this in the normal order so that the user doesn't get confused */
2282
f58229f8 2283 ata_link_for_each_dev(dev, &ap->link) {
f31f0cc2
JG
2284 if (!ata_dev_enabled(dev))
2285 continue;
14d2bac1 2286
9af5c9c9 2287 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
efdaedc4 2288 rc = ata_dev_configure(dev);
9af5c9c9 2289 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
2290 if (rc)
2291 goto fail;
1da177e4
LT
2292 }
2293
e82cbdb9 2294 /* configure transfer mode */
0260731f 2295 rc = ata_set_mode(&ap->link, &dev);
4ae72a1e 2296 if (rc)
51713d35 2297 goto fail;
1da177e4 2298
f58229f8
TH
2299 ata_link_for_each_dev(dev, &ap->link)
2300 if (ata_dev_enabled(dev))
e82cbdb9 2301 return 0;
1da177e4 2302
e82cbdb9
TH
2303 /* no device present, disable port */
2304 ata_port_disable(ap);
96072e69 2305 return -ENODEV;
14d2bac1
TH
2306
2307 fail:
4ae72a1e
TH
2308 tries[dev->devno]--;
2309
14d2bac1
TH
2310 switch (rc) {
2311 case -EINVAL:
4ae72a1e 2312 /* eeek, something went very wrong, give up */
14d2bac1
TH
2313 tries[dev->devno] = 0;
2314 break;
4ae72a1e
TH
2315
2316 case -ENODEV:
2317 /* give it just one more chance */
2318 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 2319 case -EIO:
4ae72a1e
TH
2320 if (tries[dev->devno] == 1) {
2321 /* This is the last chance, better to slow
2322 * down than lose it.
2323 */
936fd732 2324 sata_down_spd_limit(&ap->link);
4ae72a1e
TH
2325 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2326 }
14d2bac1
TH
2327 }
2328
4ae72a1e 2329 if (!tries[dev->devno])
3373efd8 2330 ata_dev_disable(dev);
ec573755 2331
14d2bac1 2332 goto retry;
1da177e4
LT
2333}
2334
2335/**
0cba632b
JG
2336 * ata_port_probe - Mark port as enabled
2337 * @ap: Port for which we indicate enablement
1da177e4 2338 *
0cba632b
JG
2339 * Modify @ap data structure such that the system
2340 * thinks that the entire port is enabled.
2341 *
cca3974e 2342 * LOCKING: host lock, or some other form of
0cba632b 2343 * serialization.
1da177e4
LT
2344 */
2345
2346void ata_port_probe(struct ata_port *ap)
2347{
198e0fed 2348 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
2349}
2350
3be680b7
TH
2351/**
2352 * sata_print_link_status - Print SATA link status
936fd732 2353 * @link: SATA link to printk link status about
3be680b7
TH
2354 *
2355 * This function prints link speed and status of a SATA link.
2356 *
2357 * LOCKING:
2358 * None.
2359 */
936fd732 2360void sata_print_link_status(struct ata_link *link)
3be680b7 2361{
6d5f9732 2362 u32 sstatus, scontrol, tmp;
3be680b7 2363
936fd732 2364 if (sata_scr_read(link, SCR_STATUS, &sstatus))
3be680b7 2365 return;
936fd732 2366 sata_scr_read(link, SCR_CONTROL, &scontrol);
3be680b7 2367
936fd732 2368 if (ata_link_online(link)) {
3be680b7 2369 tmp = (sstatus >> 4) & 0xf;
936fd732 2370 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2371 "SATA link up %s (SStatus %X SControl %X)\n",
2372 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 2373 } else {
936fd732 2374 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2375 "SATA link down (SStatus %X SControl %X)\n",
2376 sstatus, scontrol);
3be680b7
TH
2377 }
2378}
2379
1da177e4 2380/**
780a87f7
JG
2381 * __sata_phy_reset - Wake/reset a low-level SATA PHY
2382 * @ap: SATA port associated with target SATA PHY.
1da177e4 2383 *
780a87f7
JG
2384 * This function issues commands to standard SATA Sxxx
2385 * PHY registers, to wake up the phy (and device), and
2386 * clear any reset condition.
1da177e4
LT
2387 *
2388 * LOCKING:
0cba632b 2389 * PCI/etc. bus probe sem.
1da177e4
LT
2390 *
2391 */
2392void __sata_phy_reset(struct ata_port *ap)
2393{
936fd732 2394 struct ata_link *link = &ap->link;
1da177e4 2395 unsigned long timeout = jiffies + (HZ * 5);
936fd732 2396 u32 sstatus;
1da177e4
LT
2397
2398 if (ap->flags & ATA_FLAG_SATA_RESET) {
cdcca89e 2399 /* issue phy wake/reset */
936fd732 2400 sata_scr_write_flush(link, SCR_CONTROL, 0x301);
62ba2841
TH
2401 /* Couldn't find anything in SATA I/II specs, but
2402 * AHCI-1.1 10.4.2 says at least 1 ms. */
2403 mdelay(1);
1da177e4 2404 }
81952c54 2405 /* phy wake/clear reset */
936fd732 2406 sata_scr_write_flush(link, SCR_CONTROL, 0x300);
1da177e4
LT
2407
2408 /* wait for phy to become ready, if necessary */
2409 do {
2410 msleep(200);
936fd732 2411 sata_scr_read(link, SCR_STATUS, &sstatus);
1da177e4
LT
2412 if ((sstatus & 0xf) != 1)
2413 break;
2414 } while (time_before(jiffies, timeout));
2415
3be680b7 2416 /* print link status */
936fd732 2417 sata_print_link_status(link);
656563e3 2418
3be680b7 2419 /* TODO: phy layer with polling, timeouts, etc. */
936fd732 2420 if (!ata_link_offline(link))
1da177e4 2421 ata_port_probe(ap);
3be680b7 2422 else
1da177e4 2423 ata_port_disable(ap);
1da177e4 2424
198e0fed 2425 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2426 return;
2427
2428 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2429 ata_port_disable(ap);
2430 return;
2431 }
2432
2433 ap->cbl = ATA_CBL_SATA;
2434}
2435
2436/**
780a87f7
JG
2437 * sata_phy_reset - Reset SATA bus.
2438 * @ap: SATA port associated with target SATA PHY.
1da177e4 2439 *
780a87f7
JG
2440 * This function resets the SATA bus, and then probes
2441 * the bus for devices.
1da177e4
LT
2442 *
2443 * LOCKING:
0cba632b 2444 * PCI/etc. bus probe sem.
1da177e4
LT
2445 *
2446 */
2447void sata_phy_reset(struct ata_port *ap)
2448{
2449 __sata_phy_reset(ap);
198e0fed 2450 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2451 return;
2452 ata_bus_reset(ap);
2453}
2454
ebdfca6e
AC
2455/**
2456 * ata_dev_pair - return other device on cable
ebdfca6e
AC
2457 * @adev: device
2458 *
2459 * Obtain the other device on the same cable, or if none is
2460 * present NULL is returned
2461 */
2e9edbf8 2462
3373efd8 2463struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2464{
9af5c9c9
TH
2465 struct ata_link *link = adev->link;
2466 struct ata_device *pair = &link->device[1 - adev->devno];
e1211e3f 2467 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2468 return NULL;
2469 return pair;
2470}
2471
1da177e4 2472/**
780a87f7
JG
2473 * ata_port_disable - Disable port.
2474 * @ap: Port to be disabled.
1da177e4 2475 *
780a87f7
JG
2476 * Modify @ap data structure such that the system
2477 * thinks that the entire port is disabled, and should
2478 * never attempt to probe or communicate with devices
2479 * on this port.
2480 *
cca3974e 2481 * LOCKING: host lock, or some other form of
780a87f7 2482 * serialization.
1da177e4
LT
2483 */
2484
2485void ata_port_disable(struct ata_port *ap)
2486{
9af5c9c9
TH
2487 ap->link.device[0].class = ATA_DEV_NONE;
2488 ap->link.device[1].class = ATA_DEV_NONE;
198e0fed 2489 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2490}
2491
1c3fae4d 2492/**
3c567b7d 2493 * sata_down_spd_limit - adjust SATA spd limit downward
936fd732 2494 * @link: Link to adjust SATA spd limit for
1c3fae4d 2495 *
936fd732 2496 * Adjust SATA spd limit of @link downward. Note that this
1c3fae4d 2497 * function only adjusts the limit. The change must be applied
3c567b7d 2498 * using sata_set_spd().
1c3fae4d
TH
2499 *
2500 * LOCKING:
2501 * Inherited from caller.
2502 *
2503 * RETURNS:
2504 * 0 on success, negative errno on failure
2505 */
936fd732 2506int sata_down_spd_limit(struct ata_link *link)
1c3fae4d 2507{
81952c54
TH
2508 u32 sstatus, spd, mask;
2509 int rc, highbit;
1c3fae4d 2510
936fd732 2511 if (!sata_scr_valid(link))
008a7896
TH
2512 return -EOPNOTSUPP;
2513
2514 /* If SCR can be read, use it to determine the current SPD.
936fd732 2515 * If not, use cached value in link->sata_spd.
008a7896 2516 */
936fd732 2517 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
008a7896
TH
2518 if (rc == 0)
2519 spd = (sstatus >> 4) & 0xf;
2520 else
936fd732 2521 spd = link->sata_spd;
1c3fae4d 2522
936fd732 2523 mask = link->sata_spd_limit;
1c3fae4d
TH
2524 if (mask <= 1)
2525 return -EINVAL;
008a7896
TH
2526
2527 /* unconditionally mask off the highest bit */
1c3fae4d
TH
2528 highbit = fls(mask) - 1;
2529 mask &= ~(1 << highbit);
2530
008a7896
TH
2531 /* Mask off all speeds higher than or equal to the current
2532 * one. Force 1.5Gbps if current SPD is not available.
2533 */
2534 if (spd > 1)
2535 mask &= (1 << (spd - 1)) - 1;
2536 else
2537 mask &= 1;
2538
2539 /* were we already at the bottom? */
1c3fae4d
TH
2540 if (!mask)
2541 return -EINVAL;
2542
936fd732 2543 link->sata_spd_limit = mask;
1c3fae4d 2544
936fd732 2545 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
f15a1daf 2546 sata_spd_string(fls(mask)));
1c3fae4d
TH
2547
2548 return 0;
2549}
2550
936fd732 2551static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
1c3fae4d
TH
2552{
2553 u32 spd, limit;
2554
936fd732 2555 if (link->sata_spd_limit == UINT_MAX)
1c3fae4d
TH
2556 limit = 0;
2557 else
936fd732 2558 limit = fls(link->sata_spd_limit);
1c3fae4d
TH
2559
2560 spd = (*scontrol >> 4) & 0xf;
2561 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2562
2563 return spd != limit;
2564}
2565
2566/**
3c567b7d 2567 * sata_set_spd_needed - is SATA spd configuration needed
936fd732 2568 * @link: Link in question
1c3fae4d
TH
2569 *
2570 * Test whether the spd limit in SControl matches
936fd732 2571 * @link->sata_spd_limit. This function is used to determine
1c3fae4d
TH
2572 * whether hardreset is necessary to apply SATA spd
2573 * configuration.
2574 *
2575 * LOCKING:
2576 * Inherited from caller.
2577 *
2578 * RETURNS:
2579 * 1 if SATA spd configuration is needed, 0 otherwise.
2580 */
936fd732 2581int sata_set_spd_needed(struct ata_link *link)
1c3fae4d
TH
2582{
2583 u32 scontrol;
2584
936fd732 2585 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
1c3fae4d
TH
2586 return 0;
2587
936fd732 2588 return __sata_set_spd_needed(link, &scontrol);
1c3fae4d
TH
2589}
2590
2591/**
3c567b7d 2592 * sata_set_spd - set SATA spd according to spd limit
936fd732 2593 * @link: Link to set SATA spd for
1c3fae4d 2594 *
936fd732 2595 * Set SATA spd of @link according to sata_spd_limit.
1c3fae4d
TH
2596 *
2597 * LOCKING:
2598 * Inherited from caller.
2599 *
2600 * RETURNS:
2601 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2602 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2603 */
936fd732 2604int sata_set_spd(struct ata_link *link)
1c3fae4d
TH
2605{
2606 u32 scontrol;
81952c54 2607 int rc;
1c3fae4d 2608
936fd732 2609 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 2610 return rc;
1c3fae4d 2611
936fd732 2612 if (!__sata_set_spd_needed(link, &scontrol))
1c3fae4d
TH
2613 return 0;
2614
936fd732 2615 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54
TH
2616 return rc;
2617
1c3fae4d
TH
2618 return 1;
2619}
2620
452503f9
AC
2621/*
2622 * This mode timing computation functionality is ported over from
2623 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2624 */
2625/*
b352e57d 2626 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2627 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2628 * for UDMA6, which is currently supported only by Maxtor drives.
2629 *
2630 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2631 */
2632
2633static const struct ata_timing ata_timing[] = {
2634
2635 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2636 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2637 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2638 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2639
b352e57d
AC
2640 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2641 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
452503f9
AC
2642 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2643 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2644 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2645
2646/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2e9edbf8 2647
452503f9
AC
2648 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2649 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2650 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2e9edbf8 2651
452503f9
AC
2652 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2653 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2654 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2655
b352e57d
AC
2656 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2657 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
452503f9
AC
2658 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2659 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2660
2661 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2662 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2663 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2664
2665/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2666
2667 { 0xFF }
2668};
2669
2670#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2671#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2672
2673static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2674{
2675 q->setup = EZ(t->setup * 1000, T);
2676 q->act8b = EZ(t->act8b * 1000, T);
2677 q->rec8b = EZ(t->rec8b * 1000, T);
2678 q->cyc8b = EZ(t->cyc8b * 1000, T);
2679 q->active = EZ(t->active * 1000, T);
2680 q->recover = EZ(t->recover * 1000, T);
2681 q->cycle = EZ(t->cycle * 1000, T);
2682 q->udma = EZ(t->udma * 1000, UT);
2683}
2684
2685void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2686 struct ata_timing *m, unsigned int what)
2687{
2688 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2689 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2690 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2691 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2692 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2693 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2694 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2695 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2696}
2697
2698static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2699{
2700 const struct ata_timing *t;
2701
2702 for (t = ata_timing; t->mode != speed; t++)
91190758 2703 if (t->mode == 0xFF)
452503f9 2704 return NULL;
2e9edbf8 2705 return t;
452503f9
AC
2706}
2707
2708int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2709 struct ata_timing *t, int T, int UT)
2710{
2711 const struct ata_timing *s;
2712 struct ata_timing p;
2713
2714 /*
2e9edbf8 2715 * Find the mode.
75b1f2f8 2716 */
452503f9
AC
2717
2718 if (!(s = ata_timing_find_mode(speed)))
2719 return -EINVAL;
2720
75b1f2f8
AL
2721 memcpy(t, s, sizeof(*s));
2722
452503f9
AC
2723 /*
2724 * If the drive is an EIDE drive, it can tell us it needs extended
2725 * PIO/MW_DMA cycle timing.
2726 */
2727
2728 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2729 memset(&p, 0, sizeof(p));
2730 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2731 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2732 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2733 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2734 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2735 }
2736 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2737 }
2738
2739 /*
2740 * Convert the timing to bus clock counts.
2741 */
2742
75b1f2f8 2743 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2744
2745 /*
c893a3ae
RD
2746 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2747 * S.M.A.R.T * and some other commands. We have to ensure that the
2748 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2749 */
2750
fd3367af 2751 if (speed > XFER_PIO_6) {
452503f9
AC
2752 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2753 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2754 }
2755
2756 /*
c893a3ae 2757 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2758 */
2759
2760 if (t->act8b + t->rec8b < t->cyc8b) {
2761 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2762 t->rec8b = t->cyc8b - t->act8b;
2763 }
2764
2765 if (t->active + t->recover < t->cycle) {
2766 t->active += (t->cycle - (t->active + t->recover)) / 2;
2767 t->recover = t->cycle - t->active;
2768 }
a617c09f 2769
4f701d1e
AC
2770 /* In a few cases quantisation may produce enough errors to
2771 leave t->cycle too low for the sum of active and recovery
2772 if so we must correct this */
2773 if (t->active + t->recover > t->cycle)
2774 t->cycle = t->active + t->recover;
452503f9
AC
2775
2776 return 0;
2777}
2778
cf176e1a
TH
2779/**
2780 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 2781 * @dev: Device to adjust xfer masks
458337db 2782 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
2783 *
2784 * Adjust xfer masks of @dev downward. Note that this function
2785 * does not apply the change. Invoking ata_set_mode() afterwards
2786 * will apply the limit.
2787 *
2788 * LOCKING:
2789 * Inherited from caller.
2790 *
2791 * RETURNS:
2792 * 0 on success, negative errno on failure
2793 */
458337db 2794int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 2795{
458337db
TH
2796 char buf[32];
2797 unsigned int orig_mask, xfer_mask;
2798 unsigned int pio_mask, mwdma_mask, udma_mask;
2799 int quiet, highbit;
cf176e1a 2800
458337db
TH
2801 quiet = !!(sel & ATA_DNXFER_QUIET);
2802 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 2803
458337db
TH
2804 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2805 dev->mwdma_mask,
2806 dev->udma_mask);
2807 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 2808
458337db
TH
2809 switch (sel) {
2810 case ATA_DNXFER_PIO:
2811 highbit = fls(pio_mask) - 1;
2812 pio_mask &= ~(1 << highbit);
2813 break;
2814
2815 case ATA_DNXFER_DMA:
2816 if (udma_mask) {
2817 highbit = fls(udma_mask) - 1;
2818 udma_mask &= ~(1 << highbit);
2819 if (!udma_mask)
2820 return -ENOENT;
2821 } else if (mwdma_mask) {
2822 highbit = fls(mwdma_mask) - 1;
2823 mwdma_mask &= ~(1 << highbit);
2824 if (!mwdma_mask)
2825 return -ENOENT;
2826 }
2827 break;
2828
2829 case ATA_DNXFER_40C:
2830 udma_mask &= ATA_UDMA_MASK_40C;
2831 break;
2832
2833 case ATA_DNXFER_FORCE_PIO0:
2834 pio_mask &= 1;
2835 case ATA_DNXFER_FORCE_PIO:
2836 mwdma_mask = 0;
2837 udma_mask = 0;
2838 break;
2839
458337db
TH
2840 default:
2841 BUG();
2842 }
2843
2844 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2845
2846 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2847 return -ENOENT;
2848
2849 if (!quiet) {
2850 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2851 snprintf(buf, sizeof(buf), "%s:%s",
2852 ata_mode_string(xfer_mask),
2853 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2854 else
2855 snprintf(buf, sizeof(buf), "%s",
2856 ata_mode_string(xfer_mask));
2857
2858 ata_dev_printk(dev, KERN_WARNING,
2859 "limiting speed to %s\n", buf);
2860 }
cf176e1a
TH
2861
2862 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2863 &dev->udma_mask);
2864
cf176e1a 2865 return 0;
cf176e1a
TH
2866}
2867
3373efd8 2868static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 2869{
9af5c9c9 2870 struct ata_eh_context *ehc = &dev->link->eh_context;
83206a29
TH
2871 unsigned int err_mask;
2872 int rc;
1da177e4 2873
e8384607 2874 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
2875 if (dev->xfer_shift == ATA_SHIFT_PIO)
2876 dev->flags |= ATA_DFLAG_PIO;
2877
3373efd8 2878 err_mask = ata_dev_set_xfermode(dev);
11750a40
AC
2879 /* Old CFA may refuse this command, which is just fine */
2880 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2881 err_mask &= ~AC_ERR_DEV;
0bc2a79a
AC
2882 /* Some very old devices and some bad newer ones fail any kind of
2883 SET_XFERMODE request but support PIO0-2 timings and no IORDY */
2884 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
2885 dev->pio_mode <= XFER_PIO_2)
2886 err_mask &= ~AC_ERR_DEV;
83206a29 2887 if (err_mask) {
f15a1daf
TH
2888 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2889 "(err_mask=0x%x)\n", err_mask);
83206a29
TH
2890 return -EIO;
2891 }
1da177e4 2892
baa1e78a 2893 ehc->i.flags |= ATA_EHI_POST_SETMODE;
422c9daa 2894 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
baa1e78a 2895 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
5eb45c02 2896 if (rc)
83206a29 2897 return rc;
48a8a14f 2898
23e71c3d
TH
2899 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2900 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 2901
f15a1daf
TH
2902 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2903 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
83206a29 2904 return 0;
1da177e4
LT
2905}
2906
1da177e4 2907/**
04351821 2908 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 2909 * @link: link on which timings will be programmed
e82cbdb9 2910 * @r_failed_dev: out paramter for failed device
1da177e4 2911 *
04351821
AC
2912 * Standard implementation of the function used to tune and set
2913 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2914 * ata_dev_set_mode() fails, pointer to the failing device is
e82cbdb9 2915 * returned in @r_failed_dev.
780a87f7 2916 *
1da177e4 2917 * LOCKING:
0cba632b 2918 * PCI/etc. bus probe sem.
e82cbdb9
TH
2919 *
2920 * RETURNS:
2921 * 0 on success, negative errno otherwise
1da177e4 2922 */
04351821 2923
0260731f 2924int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
1da177e4 2925{
0260731f 2926 struct ata_port *ap = link->ap;
e8e0619f 2927 struct ata_device *dev;
f58229f8 2928 int rc = 0, used_dma = 0, found = 0;
3adcebb2 2929
a6d5a51c 2930 /* step 1: calculate xfer_mask */
f58229f8 2931 ata_link_for_each_dev(dev, link) {
acf356b1 2932 unsigned int pio_mask, dma_mask;
b3a70601 2933 unsigned int mode_mask;
a6d5a51c 2934
e1211e3f 2935 if (!ata_dev_enabled(dev))
a6d5a51c
TH
2936 continue;
2937
b3a70601
AC
2938 mode_mask = ATA_DMA_MASK_ATA;
2939 if (dev->class == ATA_DEV_ATAPI)
2940 mode_mask = ATA_DMA_MASK_ATAPI;
2941 else if (ata_id_is_cfa(dev->id))
2942 mode_mask = ATA_DMA_MASK_CFA;
2943
3373efd8 2944 ata_dev_xfermask(dev);
1da177e4 2945
acf356b1
TH
2946 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2947 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
b3a70601
AC
2948
2949 if (libata_dma_mask & mode_mask)
2950 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2951 else
2952 dma_mask = 0;
2953
acf356b1
TH
2954 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2955 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 2956
4f65977d 2957 found = 1;
5444a6f4
AC
2958 if (dev->dma_mode)
2959 used_dma = 1;
a6d5a51c 2960 }
4f65977d 2961 if (!found)
e82cbdb9 2962 goto out;
a6d5a51c
TH
2963
2964 /* step 2: always set host PIO timings */
f58229f8 2965 ata_link_for_each_dev(dev, link) {
e8e0619f
TH
2966 if (!ata_dev_enabled(dev))
2967 continue;
2968
2969 if (!dev->pio_mode) {
f15a1daf 2970 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 2971 rc = -EINVAL;
e82cbdb9 2972 goto out;
e8e0619f
TH
2973 }
2974
2975 dev->xfer_mode = dev->pio_mode;
2976 dev->xfer_shift = ATA_SHIFT_PIO;
2977 if (ap->ops->set_piomode)
2978 ap->ops->set_piomode(ap, dev);
2979 }
1da177e4 2980
a6d5a51c 2981 /* step 3: set host DMA timings */
f58229f8 2982 ata_link_for_each_dev(dev, link) {
e8e0619f
TH
2983 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2984 continue;
2985
2986 dev->xfer_mode = dev->dma_mode;
2987 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2988 if (ap->ops->set_dmamode)
2989 ap->ops->set_dmamode(ap, dev);
2990 }
1da177e4
LT
2991
2992 /* step 4: update devices' xfer mode */
f58229f8 2993 ata_link_for_each_dev(dev, link) {
18d90deb 2994 /* don't update suspended devices' xfer mode */
9666f400 2995 if (!ata_dev_enabled(dev))
83206a29
TH
2996 continue;
2997
3373efd8 2998 rc = ata_dev_set_mode(dev);
5bbc53f4 2999 if (rc)
e82cbdb9 3000 goto out;
83206a29 3001 }
1da177e4 3002
e8e0619f
TH
3003 /* Record simplex status. If we selected DMA then the other
3004 * host channels are not permitted to do so.
5444a6f4 3005 */
cca3974e 3006 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 3007 ap->host->simplex_claimed = ap;
5444a6f4 3008
e82cbdb9
TH
3009 out:
3010 if (rc)
3011 *r_failed_dev = dev;
3012 return rc;
1da177e4
LT
3013}
3014
04351821
AC
3015/**
3016 * ata_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 3017 * @link: link on which timings will be programmed
04351821
AC
3018 * @r_failed_dev: out paramter for failed device
3019 *
3020 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3021 * ata_set_mode() fails, pointer to the failing device is
3022 * returned in @r_failed_dev.
3023 *
3024 * LOCKING:
3025 * PCI/etc. bus probe sem.
3026 *
3027 * RETURNS:
3028 * 0 on success, negative errno otherwise
3029 */
0260731f 3030int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
04351821 3031{
0260731f
TH
3032 struct ata_port *ap = link->ap;
3033
04351821
AC
3034 /* has private set_mode? */
3035 if (ap->ops->set_mode)
0260731f
TH
3036 return ap->ops->set_mode(link, r_failed_dev);
3037 return ata_do_set_mode(link, r_failed_dev);
04351821
AC
3038}
3039
1fdffbce
JG
3040/**
3041 * ata_tf_to_host - issue ATA taskfile to host controller
3042 * @ap: port to which command is being issued
3043 * @tf: ATA taskfile register set
3044 *
3045 * Issues ATA taskfile register set to ATA host controller,
3046 * with proper synchronization with interrupt handler and
3047 * other threads.
3048 *
3049 * LOCKING:
cca3974e 3050 * spin_lock_irqsave(host lock)
1fdffbce
JG
3051 */
3052
3053static inline void ata_tf_to_host(struct ata_port *ap,
3054 const struct ata_taskfile *tf)
3055{
3056 ap->ops->tf_load(ap, tf);
3057 ap->ops->exec_command(ap, tf);
3058}
3059
1da177e4
LT
3060/**
3061 * ata_busy_sleep - sleep until BSY clears, or timeout
3062 * @ap: port containing status register to be polled
3063 * @tmout_pat: impatience timeout
3064 * @tmout: overall timeout
3065 *
780a87f7
JG
3066 * Sleep until ATA Status register bit BSY clears,
3067 * or a timeout occurs.
3068 *
d1adc1bb
TH
3069 * LOCKING:
3070 * Kernel thread context (may sleep).
3071 *
3072 * RETURNS:
3073 * 0 on success, -errno otherwise.
1da177e4 3074 */
d1adc1bb
TH
3075int ata_busy_sleep(struct ata_port *ap,
3076 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
3077{
3078 unsigned long timer_start, timeout;
3079 u8 status;
3080
3081 status = ata_busy_wait(ap, ATA_BUSY, 300);
3082 timer_start = jiffies;
3083 timeout = timer_start + tmout_pat;
d1adc1bb
TH
3084 while (status != 0xff && (status & ATA_BUSY) &&
3085 time_before(jiffies, timeout)) {
1da177e4
LT
3086 msleep(50);
3087 status = ata_busy_wait(ap, ATA_BUSY, 3);
3088 }
3089
d1adc1bb 3090 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 3091 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
3092 "port is slow to respond, please be patient "
3093 "(Status 0x%x)\n", status);
1da177e4
LT
3094
3095 timeout = timer_start + tmout;
d1adc1bb
TH
3096 while (status != 0xff && (status & ATA_BUSY) &&
3097 time_before(jiffies, timeout)) {
1da177e4
LT
3098 msleep(50);
3099 status = ata_chk_status(ap);
3100 }
3101
d1adc1bb
TH
3102 if (status == 0xff)
3103 return -ENODEV;
3104
1da177e4 3105 if (status & ATA_BUSY) {
f15a1daf 3106 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
3107 "(%lu secs, Status 0x%x)\n",
3108 tmout / HZ, status);
d1adc1bb 3109 return -EBUSY;
1da177e4
LT
3110 }
3111
3112 return 0;
3113}
3114
d4b2bab4
TH
3115/**
3116 * ata_wait_ready - sleep until BSY clears, or timeout
3117 * @ap: port containing status register to be polled
3118 * @deadline: deadline jiffies for the operation
3119 *
3120 * Sleep until ATA Status register bit BSY clears, or timeout
3121 * occurs.
3122 *
3123 * LOCKING:
3124 * Kernel thread context (may sleep).
3125 *
3126 * RETURNS:
3127 * 0 on success, -errno otherwise.
3128 */
3129int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3130{
3131 unsigned long start = jiffies;
3132 int warned = 0;
3133
3134 while (1) {
3135 u8 status = ata_chk_status(ap);
3136 unsigned long now = jiffies;
3137
3138 if (!(status & ATA_BUSY))
3139 return 0;
936fd732 3140 if (!ata_link_online(&ap->link) && status == 0xff)
d4b2bab4
TH
3141 return -ENODEV;
3142 if (time_after(now, deadline))
3143 return -EBUSY;
3144
3145 if (!warned && time_after(now, start + 5 * HZ) &&
3146 (deadline - now > 3 * HZ)) {
3147 ata_port_printk(ap, KERN_WARNING,
3148 "port is slow to respond, please be patient "
3149 "(Status 0x%x)\n", status);
3150 warned = 1;
3151 }
3152
3153 msleep(50);
3154 }
3155}
3156
3157static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3158 unsigned long deadline)
1da177e4
LT
3159{
3160 struct ata_ioports *ioaddr = &ap->ioaddr;
3161 unsigned int dev0 = devmask & (1 << 0);
3162 unsigned int dev1 = devmask & (1 << 1);
9b89391c 3163 int rc, ret = 0;
1da177e4
LT
3164
3165 /* if device 0 was found in ata_devchk, wait for its
3166 * BSY bit to clear
3167 */
d4b2bab4
TH
3168 if (dev0) {
3169 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3170 if (rc) {
3171 if (rc != -ENODEV)
3172 return rc;
3173 ret = rc;
3174 }
d4b2bab4 3175 }
1da177e4 3176
e141d999
TH
3177 /* if device 1 was found in ata_devchk, wait for register
3178 * access briefly, then wait for BSY to clear.
1da177e4 3179 */
e141d999
TH
3180 if (dev1) {
3181 int i;
1da177e4
LT
3182
3183 ap->ops->dev_select(ap, 1);
e141d999
TH
3184
3185 /* Wait for register access. Some ATAPI devices fail
3186 * to set nsect/lbal after reset, so don't waste too
3187 * much time on it. We're gonna wait for !BSY anyway.
3188 */
3189 for (i = 0; i < 2; i++) {
3190 u8 nsect, lbal;
3191
3192 nsect = ioread8(ioaddr->nsect_addr);
3193 lbal = ioread8(ioaddr->lbal_addr);
3194 if ((nsect == 1) && (lbal == 1))
3195 break;
3196 msleep(50); /* give drive a breather */
3197 }
3198
d4b2bab4 3199 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3200 if (rc) {
3201 if (rc != -ENODEV)
3202 return rc;
3203 ret = rc;
3204 }
d4b2bab4 3205 }
1da177e4
LT
3206
3207 /* is all this really necessary? */
3208 ap->ops->dev_select(ap, 0);
3209 if (dev1)
3210 ap->ops->dev_select(ap, 1);
3211 if (dev0)
3212 ap->ops->dev_select(ap, 0);
d4b2bab4 3213
9b89391c 3214 return ret;
1da177e4
LT
3215}
3216
d4b2bab4
TH
3217static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3218 unsigned long deadline)
1da177e4
LT
3219{
3220 struct ata_ioports *ioaddr = &ap->ioaddr;
681c80b5
AC
3221 struct ata_device *dev;
3222 int i = 0;
1da177e4 3223
44877b4e 3224 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
1da177e4
LT
3225
3226 /* software reset. causes dev0 to be selected */
0d5ff566
TH
3227 iowrite8(ap->ctl, ioaddr->ctl_addr);
3228 udelay(20); /* FIXME: flush */
3229 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3230 udelay(20); /* FIXME: flush */
3231 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4 3232
681c80b5
AC
3233 /* If we issued an SRST then an ATA drive (not ATAPI)
3234 * may have changed configuration and be in PIO0 timing. If
3235 * we did a hard reset (or are coming from power on) this is
3236 * true for ATA or ATAPI. Until we've set a suitable controller
3237 * mode we should not touch the bus as we may be talking too fast.
3238 */
3239
3240 ata_link_for_each_dev(dev, &ap->link)
3241 dev->pio_mode = XFER_PIO_0;
3242
3243 /* If the controller has a pio mode setup function then use
3244 it to set the chipset to rights. Don't touch the DMA setup
3245 as that will be dealt with when revalidating */
3246 if (ap->ops->set_piomode) {
3247 ata_link_for_each_dev(dev, &ap->link)
3248 if (devmask & (1 << i++))
3249 ap->ops->set_piomode(ap, dev);
3250 }
3251
1da177e4
LT
3252 /* spec mandates ">= 2ms" before checking status.
3253 * We wait 150ms, because that was the magic delay used for
3254 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
3255 * between when the ATA command register is written, and then
3256 * status is checked. Because waiting for "a while" before
3257 * checking status is fine, post SRST, we perform this magic
3258 * delay here as well.
09c7ad79
AC
3259 *
3260 * Old drivers/ide uses the 2mS rule and then waits for ready
1da177e4
LT
3261 */
3262 msleep(150);
3263
2e9edbf8 3264 /* Before we perform post reset processing we want to see if
298a41ca
TH
3265 * the bus shows 0xFF because the odd clown forgets the D7
3266 * pulldown resistor.
3267 */
d1adc1bb 3268 if (ata_check_status(ap) == 0xFF)
9b89391c 3269 return -ENODEV;
09c7ad79 3270
d4b2bab4 3271 return ata_bus_post_reset(ap, devmask, deadline);
1da177e4
LT
3272}
3273
3274/**
3275 * ata_bus_reset - reset host port and associated ATA channel
3276 * @ap: port to reset
3277 *
3278 * This is typically the first time we actually start issuing
3279 * commands to the ATA channel. We wait for BSY to clear, then
3280 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3281 * result. Determine what devices, if any, are on the channel
3282 * by looking at the device 0/1 error register. Look at the signature
3283 * stored in each device's taskfile registers, to determine if
3284 * the device is ATA or ATAPI.
3285 *
3286 * LOCKING:
0cba632b 3287 * PCI/etc. bus probe sem.
cca3974e 3288 * Obtains host lock.
1da177e4
LT
3289 *
3290 * SIDE EFFECTS:
198e0fed 3291 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
3292 */
3293
3294void ata_bus_reset(struct ata_port *ap)
3295{
9af5c9c9 3296 struct ata_device *device = ap->link.device;
1da177e4
LT
3297 struct ata_ioports *ioaddr = &ap->ioaddr;
3298 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3299 u8 err;
aec5c3c1 3300 unsigned int dev0, dev1 = 0, devmask = 0;
9b89391c 3301 int rc;
1da177e4 3302
44877b4e 3303 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
1da177e4
LT
3304
3305 /* determine if device 0/1 are present */
3306 if (ap->flags & ATA_FLAG_SATA_RESET)
3307 dev0 = 1;
3308 else {
3309 dev0 = ata_devchk(ap, 0);
3310 if (slave_possible)
3311 dev1 = ata_devchk(ap, 1);
3312 }
3313
3314 if (dev0)
3315 devmask |= (1 << 0);
3316 if (dev1)
3317 devmask |= (1 << 1);
3318
3319 /* select device 0 again */
3320 ap->ops->dev_select(ap, 0);
3321
3322 /* issue bus reset */
9b89391c
TH
3323 if (ap->flags & ATA_FLAG_SRST) {
3324 rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
3325 if (rc && rc != -ENODEV)
aec5c3c1 3326 goto err_out;
9b89391c 3327 }
1da177e4
LT
3328
3329 /*
3330 * determine by signature whether we have ATA or ATAPI devices
3331 */
3f19859e 3332 device[0].class = ata_dev_try_classify(&device[0], dev0, &err);
1da177e4 3333 if ((slave_possible) && (err != 0x81))
3f19859e 3334 device[1].class = ata_dev_try_classify(&device[1], dev1, &err);
1da177e4 3335
1da177e4 3336 /* is double-select really necessary? */
9af5c9c9 3337 if (device[1].class != ATA_DEV_NONE)
1da177e4 3338 ap->ops->dev_select(ap, 1);
9af5c9c9 3339 if (device[0].class != ATA_DEV_NONE)
1da177e4
LT
3340 ap->ops->dev_select(ap, 0);
3341
3342 /* if no devices were detected, disable this port */
9af5c9c9
TH
3343 if ((device[0].class == ATA_DEV_NONE) &&
3344 (device[1].class == ATA_DEV_NONE))
1da177e4
LT
3345 goto err_out;
3346
3347 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3348 /* set up device control for ATA_FLAG_SATA_RESET */
0d5ff566 3349 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3350 }
3351
3352 DPRINTK("EXIT\n");
3353 return;
3354
3355err_out:
f15a1daf 3356 ata_port_printk(ap, KERN_ERR, "disabling port\n");
ac8869d5 3357 ata_port_disable(ap);
1da177e4
LT
3358
3359 DPRINTK("EXIT\n");
3360}
3361
d7bb4cc7 3362/**
936fd732
TH
3363 * sata_link_debounce - debounce SATA phy status
3364 * @link: ATA link to debounce SATA phy status for
d7bb4cc7 3365 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3366 * @deadline: deadline jiffies for the operation
d7bb4cc7 3367 *
936fd732 3368* Make sure SStatus of @link reaches stable state, determined by
d7bb4cc7
TH
3369 * holding the same value where DET is not 1 for @duration polled
3370 * every @interval, before @timeout. Timeout constraints the
d4b2bab4
TH
3371 * beginning of the stable state. Because DET gets stuck at 1 on
3372 * some controllers after hot unplugging, this functions waits
d7bb4cc7
TH
3373 * until timeout then returns 0 if DET is stable at 1.
3374 *
d4b2bab4
TH
3375 * @timeout is further limited by @deadline. The sooner of the
3376 * two is used.
3377 *
d7bb4cc7
TH
3378 * LOCKING:
3379 * Kernel thread context (may sleep)
3380 *
3381 * RETURNS:
3382 * 0 on success, -errno on failure.
3383 */
936fd732
TH
3384int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3385 unsigned long deadline)
7a7921e8 3386{
d7bb4cc7 3387 unsigned long interval_msec = params[0];
d4b2bab4
TH
3388 unsigned long duration = msecs_to_jiffies(params[1]);
3389 unsigned long last_jiffies, t;
d7bb4cc7
TH
3390 u32 last, cur;
3391 int rc;
3392
d4b2bab4
TH
3393 t = jiffies + msecs_to_jiffies(params[2]);
3394 if (time_before(t, deadline))
3395 deadline = t;
3396
936fd732 3397 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3398 return rc;
3399 cur &= 0xf;
3400
3401 last = cur;
3402 last_jiffies = jiffies;
3403
3404 while (1) {
3405 msleep(interval_msec);
936fd732 3406 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3407 return rc;
3408 cur &= 0xf;
3409
3410 /* DET stable? */
3411 if (cur == last) {
d4b2bab4 3412 if (cur == 1 && time_before(jiffies, deadline))
d7bb4cc7
TH
3413 continue;
3414 if (time_after(jiffies, last_jiffies + duration))
3415 return 0;
3416 continue;
3417 }
3418
3419 /* unstable, start over */
3420 last = cur;
3421 last_jiffies = jiffies;
3422
f1545154
TH
3423 /* Check deadline. If debouncing failed, return
3424 * -EPIPE to tell upper layer to lower link speed.
3425 */
d4b2bab4 3426 if (time_after(jiffies, deadline))
f1545154 3427 return -EPIPE;
d7bb4cc7
TH
3428 }
3429}
3430
3431/**
936fd732
TH
3432 * sata_link_resume - resume SATA link
3433 * @link: ATA link to resume SATA
d7bb4cc7 3434 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3435 * @deadline: deadline jiffies for the operation
d7bb4cc7 3436 *
936fd732 3437 * Resume SATA phy @link and debounce it.
d7bb4cc7
TH
3438 *
3439 * LOCKING:
3440 * Kernel thread context (may sleep)
3441 *
3442 * RETURNS:
3443 * 0 on success, -errno on failure.
3444 */
936fd732
TH
3445int sata_link_resume(struct ata_link *link, const unsigned long *params,
3446 unsigned long deadline)
d7bb4cc7
TH
3447{
3448 u32 scontrol;
81952c54
TH
3449 int rc;
3450
936fd732 3451 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 3452 return rc;
7a7921e8 3453
852ee16a 3454 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54 3455
936fd732 3456 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54 3457 return rc;
7a7921e8 3458
d7bb4cc7
TH
3459 /* Some PHYs react badly if SStatus is pounded immediately
3460 * after resuming. Delay 200ms before debouncing.
3461 */
3462 msleep(200);
7a7921e8 3463
936fd732 3464 return sata_link_debounce(link, params, deadline);
7a7921e8
TH
3465}
3466
f5914a46
TH
3467/**
3468 * ata_std_prereset - prepare for reset
cc0680a5 3469 * @link: ATA link to be reset
d4b2bab4 3470 * @deadline: deadline jiffies for the operation
f5914a46 3471 *
cc0680a5 3472 * @link is about to be reset. Initialize it. Failure from
b8cffc6a
TH
3473 * prereset makes libata abort whole reset sequence and give up
3474 * that port, so prereset should be best-effort. It does its
3475 * best to prepare for reset sequence but if things go wrong, it
3476 * should just whine, not fail.
f5914a46
TH
3477 *
3478 * LOCKING:
3479 * Kernel thread context (may sleep)
3480 *
3481 * RETURNS:
3482 * 0 on success, -errno otherwise.
3483 */
cc0680a5 3484int ata_std_prereset(struct ata_link *link, unsigned long deadline)
f5914a46 3485{
cc0680a5 3486 struct ata_port *ap = link->ap;
936fd732 3487 struct ata_eh_context *ehc = &link->eh_context;
e9c83914 3488 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
3489 int rc;
3490
31daabda 3491 /* handle link resume */
28324304 3492 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
0c88758b 3493 (link->flags & ATA_LFLAG_HRST_TO_RESUME))
28324304
TH
3494 ehc->i.action |= ATA_EH_HARDRESET;
3495
633273a3
TH
3496 /* Some PMPs don't work with only SRST, force hardreset if PMP
3497 * is supported.
3498 */
3499 if (ap->flags & ATA_FLAG_PMP)
3500 ehc->i.action |= ATA_EH_HARDRESET;
3501
f5914a46
TH
3502 /* if we're about to do hardreset, nothing more to do */
3503 if (ehc->i.action & ATA_EH_HARDRESET)
3504 return 0;
3505
936fd732 3506 /* if SATA, resume link */
a16abc0b 3507 if (ap->flags & ATA_FLAG_SATA) {
936fd732 3508 rc = sata_link_resume(link, timing, deadline);
b8cffc6a
TH
3509 /* whine about phy resume failure but proceed */
3510 if (rc && rc != -EOPNOTSUPP)
cc0680a5 3511 ata_link_printk(link, KERN_WARNING, "failed to resume "
f5914a46 3512 "link for reset (errno=%d)\n", rc);
f5914a46
TH
3513 }
3514
3515 /* Wait for !BSY if the controller can wait for the first D2H
3516 * Reg FIS and we don't know that no device is attached.
3517 */
0c88758b 3518 if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) {
b8cffc6a 3519 rc = ata_wait_ready(ap, deadline);
6dffaf61 3520 if (rc && rc != -ENODEV) {
cc0680a5 3521 ata_link_printk(link, KERN_WARNING, "device not ready "
b8cffc6a
TH
3522 "(errno=%d), forcing hardreset\n", rc);
3523 ehc->i.action |= ATA_EH_HARDRESET;
3524 }
3525 }
f5914a46
TH
3526
3527 return 0;
3528}
3529
c2bd5804
TH
3530/**
3531 * ata_std_softreset - reset host port via ATA SRST
cc0680a5 3532 * @link: ATA link to reset
c2bd5804 3533 * @classes: resulting classes of attached devices
d4b2bab4 3534 * @deadline: deadline jiffies for the operation
c2bd5804 3535 *
52783c5d 3536 * Reset host port using ATA SRST.
c2bd5804
TH
3537 *
3538 * LOCKING:
3539 * Kernel thread context (may sleep)
3540 *
3541 * RETURNS:
3542 * 0 on success, -errno otherwise.
3543 */
cc0680a5 3544int ata_std_softreset(struct ata_link *link, unsigned int *classes,
d4b2bab4 3545 unsigned long deadline)
c2bd5804 3546{
cc0680a5 3547 struct ata_port *ap = link->ap;
c2bd5804 3548 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
d4b2bab4
TH
3549 unsigned int devmask = 0;
3550 int rc;
c2bd5804
TH
3551 u8 err;
3552
3553 DPRINTK("ENTER\n");
3554
936fd732 3555 if (ata_link_offline(link)) {
3a39746a
TH
3556 classes[0] = ATA_DEV_NONE;
3557 goto out;
3558 }
3559
c2bd5804
TH
3560 /* determine if device 0/1 are present */
3561 if (ata_devchk(ap, 0))
3562 devmask |= (1 << 0);
3563 if (slave_possible && ata_devchk(ap, 1))
3564 devmask |= (1 << 1);
3565
c2bd5804
TH
3566 /* select device 0 again */
3567 ap->ops->dev_select(ap, 0);
3568
3569 /* issue bus reset */
3570 DPRINTK("about to softreset, devmask=%x\n", devmask);
d4b2bab4 3571 rc = ata_bus_softreset(ap, devmask, deadline);
9b89391c 3572 /* if link is occupied, -ENODEV too is an error */
936fd732 3573 if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
cc0680a5 3574 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
d4b2bab4 3575 return rc;
c2bd5804
TH
3576 }
3577
3578 /* determine by signature whether we have ATA or ATAPI devices */
3f19859e
TH
3579 classes[0] = ata_dev_try_classify(&link->device[0],
3580 devmask & (1 << 0), &err);
c2bd5804 3581 if (slave_possible && err != 0x81)
3f19859e
TH
3582 classes[1] = ata_dev_try_classify(&link->device[1],
3583 devmask & (1 << 1), &err);
c2bd5804 3584
3a39746a 3585 out:
c2bd5804
TH
3586 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3587 return 0;
3588}
3589
3590/**
cc0680a5
TH
3591 * sata_link_hardreset - reset link via SATA phy reset
3592 * @link: link to reset
b6103f6d 3593 * @timing: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3594 * @deadline: deadline jiffies for the operation
c2bd5804 3595 *
cc0680a5 3596 * SATA phy-reset @link using DET bits of SControl register.
c2bd5804
TH
3597 *
3598 * LOCKING:
3599 * Kernel thread context (may sleep)
3600 *
3601 * RETURNS:
3602 * 0 on success, -errno otherwise.
3603 */
cc0680a5 3604int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
d4b2bab4 3605 unsigned long deadline)
c2bd5804 3606{
852ee16a 3607 u32 scontrol;
81952c54 3608 int rc;
852ee16a 3609
c2bd5804
TH
3610 DPRINTK("ENTER\n");
3611
936fd732 3612 if (sata_set_spd_needed(link)) {
1c3fae4d
TH
3613 /* SATA spec says nothing about how to reconfigure
3614 * spd. To be on the safe side, turn off phy during
3615 * reconfiguration. This works for at least ICH7 AHCI
3616 * and Sil3124.
3617 */
936fd732 3618 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3619 goto out;
81952c54 3620
a34b6fc0 3621 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54 3622
936fd732 3623 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
b6103f6d 3624 goto out;
1c3fae4d 3625
936fd732 3626 sata_set_spd(link);
1c3fae4d
TH
3627 }
3628
3629 /* issue phy wake/reset */
936fd732 3630 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3631 goto out;
81952c54 3632
852ee16a 3633 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54 3634
936fd732 3635 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
b6103f6d 3636 goto out;
c2bd5804 3637
1c3fae4d 3638 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
3639 * 10.4.2 says at least 1 ms.
3640 */
3641 msleep(1);
3642
936fd732
TH
3643 /* bring link back */
3644 rc = sata_link_resume(link, timing, deadline);
b6103f6d
TH
3645 out:
3646 DPRINTK("EXIT, rc=%d\n", rc);
3647 return rc;
3648}
3649
3650/**
3651 * sata_std_hardreset - reset host port via SATA phy reset
cc0680a5 3652 * @link: link to reset
b6103f6d 3653 * @class: resulting class of attached device
d4b2bab4 3654 * @deadline: deadline jiffies for the operation
b6103f6d
TH
3655 *
3656 * SATA phy-reset host port using DET bits of SControl register,
3657 * wait for !BSY and classify the attached device.
3658 *
3659 * LOCKING:
3660 * Kernel thread context (may sleep)
3661 *
3662 * RETURNS:
3663 * 0 on success, -errno otherwise.
3664 */
cc0680a5 3665int sata_std_hardreset(struct ata_link *link, unsigned int *class,
d4b2bab4 3666 unsigned long deadline)
b6103f6d 3667{
cc0680a5 3668 struct ata_port *ap = link->ap;
936fd732 3669 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
b6103f6d
TH
3670 int rc;
3671
3672 DPRINTK("ENTER\n");
3673
3674 /* do hardreset */
cc0680a5 3675 rc = sata_link_hardreset(link, timing, deadline);
b6103f6d 3676 if (rc) {
cc0680a5 3677 ata_link_printk(link, KERN_ERR,
b6103f6d
TH
3678 "COMRESET failed (errno=%d)\n", rc);
3679 return rc;
3680 }
c2bd5804 3681
c2bd5804 3682 /* TODO: phy layer with polling, timeouts, etc. */
936fd732 3683 if (ata_link_offline(link)) {
c2bd5804
TH
3684 *class = ATA_DEV_NONE;
3685 DPRINTK("EXIT, link offline\n");
3686 return 0;
3687 }
3688
34fee227
TH
3689 /* wait a while before checking status, see SRST for more info */
3690 msleep(150);
3691
633273a3
TH
3692 /* If PMP is supported, we have to do follow-up SRST. Note
3693 * that some PMPs don't send D2H Reg FIS after hardreset at
3694 * all if the first port is empty. Wait for it just for a
3695 * second and request follow-up SRST.
3696 */
3697 if (ap->flags & ATA_FLAG_PMP) {
3698 ata_wait_ready(ap, jiffies + HZ);
3699 return -EAGAIN;
3700 }
3701
d4b2bab4 3702 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3703 /* link occupied, -ENODEV too is an error */
3704 if (rc) {
cc0680a5 3705 ata_link_printk(link, KERN_ERR,
d4b2bab4
TH
3706 "COMRESET failed (errno=%d)\n", rc);
3707 return rc;
c2bd5804
TH
3708 }
3709
3a39746a
TH
3710 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3711
3f19859e 3712 *class = ata_dev_try_classify(link->device, 1, NULL);
c2bd5804
TH
3713
3714 DPRINTK("EXIT, class=%u\n", *class);
3715 return 0;
3716}
3717
3718/**
3719 * ata_std_postreset - standard postreset callback
cc0680a5 3720 * @link: the target ata_link
c2bd5804
TH
3721 * @classes: classes of attached devices
3722 *
3723 * This function is invoked after a successful reset. Note that
3724 * the device might have been reset more than once using
3725 * different reset methods before postreset is invoked.
c2bd5804 3726 *
c2bd5804
TH
3727 * LOCKING:
3728 * Kernel thread context (may sleep)
3729 */
cc0680a5 3730void ata_std_postreset(struct ata_link *link, unsigned int *classes)
c2bd5804 3731{
cc0680a5 3732 struct ata_port *ap = link->ap;
dc2b3515
TH
3733 u32 serror;
3734
c2bd5804
TH
3735 DPRINTK("ENTER\n");
3736
c2bd5804 3737 /* print link status */
936fd732 3738 sata_print_link_status(link);
c2bd5804 3739
dc2b3515 3740 /* clear SError */
936fd732
TH
3741 if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
3742 sata_scr_write(link, SCR_ERROR, serror);
dc2b3515 3743
c2bd5804
TH
3744 /* is double-select really necessary? */
3745 if (classes[0] != ATA_DEV_NONE)
3746 ap->ops->dev_select(ap, 1);
3747 if (classes[1] != ATA_DEV_NONE)
3748 ap->ops->dev_select(ap, 0);
3749
3a39746a
TH
3750 /* bail out if no device is present */
3751 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3752 DPRINTK("EXIT, no device\n");
3753 return;
3754 }
3755
3756 /* set up device control */
0d5ff566
TH
3757 if (ap->ioaddr.ctl_addr)
3758 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
c2bd5804
TH
3759
3760 DPRINTK("EXIT\n");
3761}
3762
623a3128
TH
3763/**
3764 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3765 * @dev: device to compare against
3766 * @new_class: class of the new device
3767 * @new_id: IDENTIFY page of the new device
3768 *
3769 * Compare @new_class and @new_id against @dev and determine
3770 * whether @dev is the device indicated by @new_class and
3771 * @new_id.
3772 *
3773 * LOCKING:
3774 * None.
3775 *
3776 * RETURNS:
3777 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3778 */
3373efd8
TH
3779static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3780 const u16 *new_id)
623a3128
TH
3781{
3782 const u16 *old_id = dev->id;
a0cf733b
TH
3783 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3784 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
3785
3786 if (dev->class != new_class) {
f15a1daf
TH
3787 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3788 dev->class, new_class);
623a3128
TH
3789 return 0;
3790 }
3791
a0cf733b
TH
3792 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3793 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3794 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3795 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
3796
3797 if (strcmp(model[0], model[1])) {
f15a1daf
TH
3798 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3799 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
3800 return 0;
3801 }
3802
3803 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
3804 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3805 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
3806 return 0;
3807 }
3808
623a3128
TH
3809 return 1;
3810}
3811
3812/**
fe30911b 3813 * ata_dev_reread_id - Re-read IDENTIFY data
3fae450c 3814 * @dev: target ATA device
bff04647 3815 * @readid_flags: read ID flags
623a3128
TH
3816 *
3817 * Re-read IDENTIFY page and make sure @dev is still attached to
3818 * the port.
3819 *
3820 * LOCKING:
3821 * Kernel thread context (may sleep)
3822 *
3823 * RETURNS:
3824 * 0 on success, negative errno otherwise
3825 */
fe30911b 3826int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
623a3128 3827{
5eb45c02 3828 unsigned int class = dev->class;
9af5c9c9 3829 u16 *id = (void *)dev->link->ap->sector_buf;
623a3128
TH
3830 int rc;
3831
fe635c7e 3832 /* read ID data */
bff04647 3833 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128 3834 if (rc)
fe30911b 3835 return rc;
623a3128
TH
3836
3837 /* is the device still there? */
fe30911b
TH
3838 if (!ata_dev_same_device(dev, class, id))
3839 return -ENODEV;
623a3128 3840
fe635c7e 3841 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
fe30911b
TH
3842 return 0;
3843}
3844
3845/**
3846 * ata_dev_revalidate - Revalidate ATA device
3847 * @dev: device to revalidate
422c9daa 3848 * @new_class: new class code
fe30911b
TH
3849 * @readid_flags: read ID flags
3850 *
3851 * Re-read IDENTIFY page, make sure @dev is still attached to the
3852 * port and reconfigure it according to the new IDENTIFY page.
3853 *
3854 * LOCKING:
3855 * Kernel thread context (may sleep)
3856 *
3857 * RETURNS:
3858 * 0 on success, negative errno otherwise
3859 */
422c9daa
TH
3860int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3861 unsigned int readid_flags)
fe30911b 3862{
6ddcd3b0 3863 u64 n_sectors = dev->n_sectors;
fe30911b
TH
3864 int rc;
3865
3866 if (!ata_dev_enabled(dev))
3867 return -ENODEV;
3868
422c9daa
TH
3869 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3870 if (ata_class_enabled(new_class) &&
3871 new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
3872 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
3873 dev->class, new_class);
3874 rc = -ENODEV;
3875 goto fail;
3876 }
3877
fe30911b
TH
3878 /* re-read ID */
3879 rc = ata_dev_reread_id(dev, readid_flags);
3880 if (rc)
3881 goto fail;
623a3128
TH
3882
3883 /* configure device according to the new ID */
efdaedc4 3884 rc = ata_dev_configure(dev);
6ddcd3b0
TH
3885 if (rc)
3886 goto fail;
3887
3888 /* verify n_sectors hasn't changed */
b54eebd6
TH
3889 if (dev->class == ATA_DEV_ATA && n_sectors &&
3890 dev->n_sectors != n_sectors) {
6ddcd3b0
TH
3891 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3892 "%llu != %llu\n",
3893 (unsigned long long)n_sectors,
3894 (unsigned long long)dev->n_sectors);
8270bec4
TH
3895
3896 /* restore original n_sectors */
3897 dev->n_sectors = n_sectors;
3898
6ddcd3b0
TH
3899 rc = -ENODEV;
3900 goto fail;
3901 }
3902
3903 return 0;
623a3128
TH
3904
3905 fail:
f15a1daf 3906 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
3907 return rc;
3908}
3909
6919a0a6
AC
3910struct ata_blacklist_entry {
3911 const char *model_num;
3912 const char *model_rev;
3913 unsigned long horkage;
3914};
3915
3916static const struct ata_blacklist_entry ata_device_blacklist [] = {
3917 /* Devices with DMA related problems under Linux */
3918 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3919 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3920 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3921 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3922 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3923 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3924 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3925 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3926 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3927 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3928 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3929 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3930 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3931 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3932 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3933 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3934 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3935 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3936 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3937 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3938 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3939 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3940 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3941 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3942 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3943 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
3944 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3945 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3946 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
39f19886 3947 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
5acd50f6 3948 { "IOMEGA ZIP 250 ATAPI", NULL, ATA_HORKAGE_NODMA }, /* temporary fix */
39ce7128
TH
3949 { "IOMEGA ZIP 250 ATAPI Floppy",
3950 NULL, ATA_HORKAGE_NODMA },
3af9a77a
TH
3951 /* Odd clown on sil3726/4726 PMPs */
3952 { "Config Disk", NULL, ATA_HORKAGE_NODMA |
3953 ATA_HORKAGE_SKIP_PM },
6919a0a6 3954
18d6e9d5 3955 /* Weird ATAPI devices */
40a1d531 3956 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
18d6e9d5 3957
6919a0a6
AC
3958 /* Devices we expect to fail diagnostics */
3959
3960 /* Devices where NCQ should be avoided */
3961 /* NCQ is slow */
3962 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
09125ea6
TH
3963 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3964 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30 3965 /* NCQ is broken */
539cc7c7 3966 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
0e3dbc01 3967 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
0b0a43e0
DM
3968 { "HITACHI HDS7250SASUN500G*", NULL, ATA_HORKAGE_NONCQ },
3969 { "HITACHI HDS7225SBSUN250G*", NULL, ATA_HORKAGE_NONCQ },
da6f0ec2 3970 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
539cc7c7 3971
36e337d0
RH
3972 /* Blacklist entries taken from Silicon Image 3124/3132
3973 Windows driver .inf file - also several Linux problem reports */
3974 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
3975 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
3976 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
bd9c5a39
TH
3977 /* Drives which do spurious command completion */
3978 { "HTS541680J9SA00", "SB2IC7EP", ATA_HORKAGE_NONCQ, },
2f8fcebb 3979 { "HTS541612J9SA00", "SBDIC7JP", ATA_HORKAGE_NONCQ, },
70edb185 3980 { "HDT722516DLA380", "V43OA96A", ATA_HORKAGE_NONCQ, },
e14cbfa6 3981 { "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, },
2f8fcebb 3982 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
7f567620 3983 { "WDC WD3200AAJS-00RYA0", "12.01B01", ATA_HORKAGE_NONCQ, },
a520f261 3984 { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ, },
7f567620 3985 { "ST9120822AS", "3.CLF", ATA_HORKAGE_NONCQ, },
3fb6589c 3986 { "ST9160821AS", "3.CLF", ATA_HORKAGE_NONCQ, },
954bb005 3987 { "ST9160821AS", "3.ALD", ATA_HORKAGE_NONCQ, },
13587960 3988 { "ST9160821AS", "3.CCD", ATA_HORKAGE_NONCQ, },
7f567620
TH
3989 { "ST3160812AS", "3.ADJ", ATA_HORKAGE_NONCQ, },
3990 { "ST980813AS", "3.ADB", ATA_HORKAGE_NONCQ, },
5d6aca8d 3991 { "SAMSUNG HD401LJ", "ZZ100-15", ATA_HORKAGE_NONCQ, },
6919a0a6 3992
16c55b03
TH
3993 /* devices which puke on READ_NATIVE_MAX */
3994 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
3995 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
3996 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
3997 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
6919a0a6 3998
93328e11
AC
3999 /* Devices which report 1 sector over size HPA */
4000 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4001 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
4002
6919a0a6
AC
4003 /* End Marker */
4004 { }
1da177e4 4005};
2e9edbf8 4006
539cc7c7
JG
4007int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
4008{
4009 const char *p;
4010 int len;
4011
4012 /*
4013 * check for trailing wildcard: *\0
4014 */
4015 p = strchr(patt, wildchar);
4016 if (p && ((*(p + 1)) == 0))
4017 len = p - patt;
317b50b8 4018 else {
539cc7c7 4019 len = strlen(name);
317b50b8
AP
4020 if (!len) {
4021 if (!*patt)
4022 return 0;
4023 return -1;
4024 }
4025 }
539cc7c7
JG
4026
4027 return strncmp(patt, name, len);
4028}
4029
75683fe7 4030static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
1da177e4 4031{
8bfa79fc
TH
4032 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4033 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 4034 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 4035
8bfa79fc
TH
4036 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4037 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 4038
6919a0a6 4039 while (ad->model_num) {
539cc7c7 4040 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
6919a0a6
AC
4041 if (ad->model_rev == NULL)
4042 return ad->horkage;
539cc7c7 4043 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
6919a0a6 4044 return ad->horkage;
f4b15fef 4045 }
6919a0a6 4046 ad++;
f4b15fef 4047 }
1da177e4
LT
4048 return 0;
4049}
4050
6919a0a6
AC
4051static int ata_dma_blacklisted(const struct ata_device *dev)
4052{
4053 /* We don't support polling DMA.
4054 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4055 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4056 */
9af5c9c9 4057 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
6919a0a6
AC
4058 (dev->flags & ATA_DFLAG_CDB_INTR))
4059 return 1;
75683fe7 4060 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
6919a0a6
AC
4061}
4062
a6d5a51c
TH
4063/**
4064 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
4065 * @dev: Device to compute xfermask for
4066 *
acf356b1
TH
4067 * Compute supported xfermask of @dev and store it in
4068 * dev->*_mask. This function is responsible for applying all
4069 * known limits including host controller limits, device
4070 * blacklist, etc...
a6d5a51c
TH
4071 *
4072 * LOCKING:
4073 * None.
a6d5a51c 4074 */
3373efd8 4075static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 4076{
9af5c9c9
TH
4077 struct ata_link *link = dev->link;
4078 struct ata_port *ap = link->ap;
cca3974e 4079 struct ata_host *host = ap->host;
a6d5a51c 4080 unsigned long xfer_mask;
1da177e4 4081
37deecb5 4082 /* controller modes available */
565083e1
TH
4083 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4084 ap->mwdma_mask, ap->udma_mask);
4085
8343f889 4086 /* drive modes available */
37deecb5
TH
4087 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4088 dev->mwdma_mask, dev->udma_mask);
4089 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 4090
b352e57d
AC
4091 /*
4092 * CFA Advanced TrueIDE timings are not allowed on a shared
4093 * cable
4094 */
4095 if (ata_dev_pair(dev)) {
4096 /* No PIO5 or PIO6 */
4097 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4098 /* No MWDMA3 or MWDMA 4 */
4099 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4100 }
4101
37deecb5
TH
4102 if (ata_dma_blacklisted(dev)) {
4103 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
4104 ata_dev_printk(dev, KERN_WARNING,
4105 "device is on DMA blacklist, disabling DMA\n");
37deecb5 4106 }
a6d5a51c 4107
14d66ab7
PV
4108 if ((host->flags & ATA_HOST_SIMPLEX) &&
4109 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5
TH
4110 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4111 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4112 "other device, disabling DMA\n");
5444a6f4 4113 }
565083e1 4114
e424675f
JG
4115 if (ap->flags & ATA_FLAG_NO_IORDY)
4116 xfer_mask &= ata_pio_mask_no_iordy(dev);
4117
5444a6f4 4118 if (ap->ops->mode_filter)
a76b62ca 4119 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
5444a6f4 4120
8343f889
RH
4121 /* Apply cable rule here. Don't apply it early because when
4122 * we handle hot plug the cable type can itself change.
4123 * Check this last so that we know if the transfer rate was
4124 * solely limited by the cable.
4125 * Unknown or 80 wire cables reported host side are checked
4126 * drive side as well. Cases where we know a 40wire cable
4127 * is used safely for 80 are not checked here.
4128 */
4129 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4130 /* UDMA/44 or higher would be available */
4131 if((ap->cbl == ATA_CBL_PATA40) ||
4132 (ata_drive_40wire(dev->id) &&
4133 (ap->cbl == ATA_CBL_PATA_UNK ||
4134 ap->cbl == ATA_CBL_PATA80))) {
4135 ata_dev_printk(dev, KERN_WARNING,
4136 "limited to UDMA/33 due to 40-wire cable\n");
4137 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4138 }
4139
565083e1
TH
4140 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4141 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
4142}
4143
1da177e4
LT
4144/**
4145 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
4146 * @dev: Device to which command will be sent
4147 *
780a87f7
JG
4148 * Issue SET FEATURES - XFER MODE command to device @dev
4149 * on port @ap.
4150 *
1da177e4 4151 * LOCKING:
0cba632b 4152 * PCI/etc. bus probe sem.
83206a29
TH
4153 *
4154 * RETURNS:
4155 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
4156 */
4157
3373efd8 4158static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 4159{
a0123703 4160 struct ata_taskfile tf;
83206a29 4161 unsigned int err_mask;
1da177e4
LT
4162
4163 /* set up set-features taskfile */
4164 DPRINTK("set features - xfer mode\n");
4165
464cf177
TH
4166 /* Some controllers and ATAPI devices show flaky interrupt
4167 * behavior after setting xfer mode. Use polling instead.
4168 */
3373efd8 4169 ata_tf_init(dev, &tf);
a0123703
TH
4170 tf.command = ATA_CMD_SET_FEATURES;
4171 tf.feature = SETFEATURES_XFER;
464cf177 4172 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
a0123703
TH
4173 tf.protocol = ATA_PROT_NODATA;
4174 tf.nsect = dev->xfer_mode;
1da177e4 4175
2b789108 4176 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
9f45cbd3
KCA
4177
4178 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4179 return err_mask;
4180}
4181
4182/**
4183 * ata_dev_set_AN - Issue SET FEATURES - SATA FEATURES
4184 * @dev: Device to which command will be sent
4185 * @enable: Whether to enable or disable the feature
4186 *
4187 * Issue SET FEATURES - SATA FEATURES command to device @dev
4188 * on port @ap with sector count set to indicate Asynchronous
4189 * Notification feature
4190 *
4191 * LOCKING:
4192 * PCI/etc. bus probe sem.
4193 *
4194 * RETURNS:
4195 * 0 on success, AC_ERR_* mask otherwise.
4196 */
4197static unsigned int ata_dev_set_AN(struct ata_device *dev, u8 enable)
4198{
4199 struct ata_taskfile tf;
4200 unsigned int err_mask;
4201
4202 /* set up set-features taskfile */
4203 DPRINTK("set features - SATA features\n");
4204
4205 ata_tf_init(dev, &tf);
4206 tf.command = ATA_CMD_SET_FEATURES;
4207 tf.feature = enable;
4208 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4209 tf.protocol = ATA_PROT_NODATA;
4210 tf.nsect = SATA_AN;
4211
2b789108 4212 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1da177e4 4213
83206a29
TH
4214 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4215 return err_mask;
1da177e4
LT
4216}
4217
8bf62ece
AL
4218/**
4219 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 4220 * @dev: Device to which command will be sent
e2a7f77a
RD
4221 * @heads: Number of heads (taskfile parameter)
4222 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
4223 *
4224 * LOCKING:
6aff8f1f
TH
4225 * Kernel thread context (may sleep)
4226 *
4227 * RETURNS:
4228 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 4229 */
3373efd8
TH
4230static unsigned int ata_dev_init_params(struct ata_device *dev,
4231 u16 heads, u16 sectors)
8bf62ece 4232{
a0123703 4233 struct ata_taskfile tf;
6aff8f1f 4234 unsigned int err_mask;
8bf62ece
AL
4235
4236 /* Number of sectors per track 1-255. Number of heads 1-16 */
4237 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 4238 return AC_ERR_INVALID;
8bf62ece
AL
4239
4240 /* set up init dev params taskfile */
4241 DPRINTK("init dev params \n");
4242
3373efd8 4243 ata_tf_init(dev, &tf);
a0123703
TH
4244 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4245 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4246 tf.protocol = ATA_PROT_NODATA;
4247 tf.nsect = sectors;
4248 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 4249
2b789108 4250 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
18b2466c
AC
4251 /* A clean abort indicates an original or just out of spec drive
4252 and we should continue as we issue the setup based on the
4253 drive reported working geometry */
4254 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4255 err_mask = 0;
8bf62ece 4256
6aff8f1f
TH
4257 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4258 return err_mask;
8bf62ece
AL
4259}
4260
1da177e4 4261/**
0cba632b
JG
4262 * ata_sg_clean - Unmap DMA memory associated with command
4263 * @qc: Command containing DMA memory to be released
4264 *
4265 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
4266 *
4267 * LOCKING:
cca3974e 4268 * spin_lock_irqsave(host lock)
1da177e4 4269 */
70e6ad0c 4270void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
4271{
4272 struct ata_port *ap = qc->ap;
cedc9a47 4273 struct scatterlist *sg = qc->__sg;
1da177e4 4274 int dir = qc->dma_dir;
cedc9a47 4275 void *pad_buf = NULL;
1da177e4 4276
a4631474
TH
4277 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
4278 WARN_ON(sg == NULL);
1da177e4
LT
4279
4280 if (qc->flags & ATA_QCFLAG_SINGLE)
f131883e 4281 WARN_ON(qc->n_elem > 1);
1da177e4 4282
2c13b7ce 4283 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 4284
cedc9a47
JG
4285 /* if we padded the buffer out to 32-bit bound, and data
4286 * xfer direction is from-device, we must copy from the
4287 * pad buffer back into the supplied buffer
4288 */
4289 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4290 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4291
4292 if (qc->flags & ATA_QCFLAG_SG) {
e1410f2d 4293 if (qc->n_elem)
2f1f610b 4294 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
cedc9a47 4295 /* restore last sg */
87260216 4296 sg_last(sg, qc->orig_n_elem)->length += qc->pad_len;
cedc9a47
JG
4297 if (pad_buf) {
4298 struct scatterlist *psg = &qc->pad_sgent;
45711f1a 4299 void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
cedc9a47 4300 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
dfa15988 4301 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4302 }
4303 } else {
2e242fa9 4304 if (qc->n_elem)
2f1f610b 4305 dma_unmap_single(ap->dev,
e1410f2d
JG
4306 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
4307 dir);
cedc9a47
JG
4308 /* restore sg */
4309 sg->length += qc->pad_len;
4310 if (pad_buf)
4311 memcpy(qc->buf_virt + sg->length - qc->pad_len,
4312 pad_buf, qc->pad_len);
4313 }
1da177e4
LT
4314
4315 qc->flags &= ~ATA_QCFLAG_DMAMAP;
cedc9a47 4316 qc->__sg = NULL;
1da177e4
LT
4317}
4318
4319/**
4320 * ata_fill_sg - Fill PCI IDE PRD table
4321 * @qc: Metadata associated with taskfile to be transferred
4322 *
780a87f7
JG
4323 * Fill PCI IDE PRD (scatter-gather) table with segments
4324 * associated with the current disk command.
4325 *
1da177e4 4326 * LOCKING:
cca3974e 4327 * spin_lock_irqsave(host lock)
1da177e4
LT
4328 *
4329 */
4330static void ata_fill_sg(struct ata_queued_cmd *qc)
4331{
1da177e4 4332 struct ata_port *ap = qc->ap;
cedc9a47
JG
4333 struct scatterlist *sg;
4334 unsigned int idx;
1da177e4 4335
a4631474 4336 WARN_ON(qc->__sg == NULL);
f131883e 4337 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1da177e4
LT
4338
4339 idx = 0;
cedc9a47 4340 ata_for_each_sg(sg, qc) {
1da177e4
LT
4341 u32 addr, offset;
4342 u32 sg_len, len;
4343
4344 /* determine if physical DMA addr spans 64K boundary.
4345 * Note h/w doesn't support 64-bit, so we unconditionally
4346 * truncate dma_addr_t to u32.
4347 */
4348 addr = (u32) sg_dma_address(sg);
4349 sg_len = sg_dma_len(sg);
4350
4351 while (sg_len) {
4352 offset = addr & 0xffff;
4353 len = sg_len;
4354 if ((offset + sg_len) > 0x10000)
4355 len = 0x10000 - offset;
4356
4357 ap->prd[idx].addr = cpu_to_le32(addr);
4358 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
4359 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4360
4361 idx++;
4362 sg_len -= len;
4363 addr += len;
4364 }
4365 }
4366
4367 if (idx)
4368 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4369}
b9a4197e 4370
d26fc955
AC
4371/**
4372 * ata_fill_sg_dumb - Fill PCI IDE PRD table
4373 * @qc: Metadata associated with taskfile to be transferred
4374 *
4375 * Fill PCI IDE PRD (scatter-gather) table with segments
4376 * associated with the current disk command. Perform the fill
4377 * so that we avoid writing any length 64K records for
4378 * controllers that don't follow the spec.
4379 *
4380 * LOCKING:
4381 * spin_lock_irqsave(host lock)
4382 *
4383 */
4384static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4385{
4386 struct ata_port *ap = qc->ap;
4387 struct scatterlist *sg;
4388 unsigned int idx;
4389
4390 WARN_ON(qc->__sg == NULL);
4391 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4392
4393 idx = 0;
4394 ata_for_each_sg(sg, qc) {
4395 u32 addr, offset;
4396 u32 sg_len, len, blen;
4397
4398 /* determine if physical DMA addr spans 64K boundary.
4399 * Note h/w doesn't support 64-bit, so we unconditionally
4400 * truncate dma_addr_t to u32.
4401 */
4402 addr = (u32) sg_dma_address(sg);
4403 sg_len = sg_dma_len(sg);
4404
4405 while (sg_len) {
4406 offset = addr & 0xffff;
4407 len = sg_len;
4408 if ((offset + sg_len) > 0x10000)
4409 len = 0x10000 - offset;
4410
4411 blen = len & 0xffff;
4412 ap->prd[idx].addr = cpu_to_le32(addr);
4413 if (blen == 0) {
4414 /* Some PATA chipsets like the CS5530 can't
4415 cope with 0x0000 meaning 64K as the spec says */
4416 ap->prd[idx].flags_len = cpu_to_le32(0x8000);
4417 blen = 0x8000;
4418 ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000);
4419 }
4420 ap->prd[idx].flags_len = cpu_to_le32(blen);
4421 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4422
4423 idx++;
4424 sg_len -= len;
4425 addr += len;
4426 }
4427 }
4428
4429 if (idx)
4430 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4431}
4432
1da177e4
LT
4433/**
4434 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4435 * @qc: Metadata associated with taskfile to check
4436 *
780a87f7
JG
4437 * Allow low-level driver to filter ATA PACKET commands, returning
4438 * a status indicating whether or not it is OK to use DMA for the
4439 * supplied PACKET command.
4440 *
1da177e4 4441 * LOCKING:
cca3974e 4442 * spin_lock_irqsave(host lock)
0cba632b 4443 *
1da177e4
LT
4444 * RETURNS: 0 when ATAPI DMA can be used
4445 * nonzero otherwise
4446 */
4447int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4448{
4449 struct ata_port *ap = qc->ap;
b9a4197e
TH
4450
4451 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4452 * few ATAPI devices choke on such DMA requests.
4453 */
4454 if (unlikely(qc->nbytes & 15))
4455 return 1;
6f23a31d 4456
1da177e4 4457 if (ap->ops->check_atapi_dma)
b9a4197e 4458 return ap->ops->check_atapi_dma(qc);
1da177e4 4459
b9a4197e 4460 return 0;
1da177e4 4461}
b9a4197e 4462
31cc23b3
TH
4463/**
4464 * ata_std_qc_defer - Check whether a qc needs to be deferred
4465 * @qc: ATA command in question
4466 *
4467 * Non-NCQ commands cannot run with any other command, NCQ or
4468 * not. As upper layer only knows the queue depth, we are
4469 * responsible for maintaining exclusion. This function checks
4470 * whether a new command @qc can be issued.
4471 *
4472 * LOCKING:
4473 * spin_lock_irqsave(host lock)
4474 *
4475 * RETURNS:
4476 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4477 */
4478int ata_std_qc_defer(struct ata_queued_cmd *qc)
4479{
4480 struct ata_link *link = qc->dev->link;
4481
4482 if (qc->tf.protocol == ATA_PROT_NCQ) {
4483 if (!ata_tag_valid(link->active_tag))
4484 return 0;
4485 } else {
4486 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4487 return 0;
4488 }
4489
4490 return ATA_DEFER_LINK;
4491}
4492
1da177e4
LT
4493/**
4494 * ata_qc_prep - Prepare taskfile for submission
4495 * @qc: Metadata associated with taskfile to be prepared
4496 *
780a87f7
JG
4497 * Prepare ATA taskfile for submission.
4498 *
1da177e4 4499 * LOCKING:
cca3974e 4500 * spin_lock_irqsave(host lock)
1da177e4
LT
4501 */
4502void ata_qc_prep(struct ata_queued_cmd *qc)
4503{
4504 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4505 return;
4506
4507 ata_fill_sg(qc);
4508}
4509
d26fc955
AC
4510/**
4511 * ata_dumb_qc_prep - Prepare taskfile for submission
4512 * @qc: Metadata associated with taskfile to be prepared
4513 *
4514 * Prepare ATA taskfile for submission.
4515 *
4516 * LOCKING:
4517 * spin_lock_irqsave(host lock)
4518 */
4519void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4520{
4521 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4522 return;
4523
4524 ata_fill_sg_dumb(qc);
4525}
4526
e46834cd
BK
4527void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4528
0cba632b
JG
4529/**
4530 * ata_sg_init_one - Associate command with memory buffer
4531 * @qc: Command to be associated
4532 * @buf: Memory buffer
4533 * @buflen: Length of memory buffer, in bytes.
4534 *
4535 * Initialize the data-related elements of queued_cmd @qc
4536 * to point to a single memory buffer, @buf of byte length @buflen.
4537 *
4538 * LOCKING:
cca3974e 4539 * spin_lock_irqsave(host lock)
0cba632b
JG
4540 */
4541
1da177e4
LT
4542void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4543{
1da177e4
LT
4544 qc->flags |= ATA_QCFLAG_SINGLE;
4545
cedc9a47 4546 qc->__sg = &qc->sgent;
1da177e4 4547 qc->n_elem = 1;
cedc9a47 4548 qc->orig_n_elem = 1;
1da177e4 4549 qc->buf_virt = buf;
233277ca 4550 qc->nbytes = buflen;
87260216 4551 qc->cursg = qc->__sg;
1da177e4 4552
61c0596c 4553 sg_init_one(&qc->sgent, buf, buflen);
1da177e4
LT
4554}
4555
0cba632b
JG
4556/**
4557 * ata_sg_init - Associate command with scatter-gather table.
4558 * @qc: Command to be associated
4559 * @sg: Scatter-gather table.
4560 * @n_elem: Number of elements in s/g table.
4561 *
4562 * Initialize the data-related elements of queued_cmd @qc
4563 * to point to a scatter-gather table @sg, containing @n_elem
4564 * elements.
4565 *
4566 * LOCKING:
cca3974e 4567 * spin_lock_irqsave(host lock)
0cba632b
JG
4568 */
4569
1da177e4
LT
4570void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4571 unsigned int n_elem)
4572{
4573 qc->flags |= ATA_QCFLAG_SG;
cedc9a47 4574 qc->__sg = sg;
1da177e4 4575 qc->n_elem = n_elem;
cedc9a47 4576 qc->orig_n_elem = n_elem;
87260216 4577 qc->cursg = qc->__sg;
1da177e4
LT
4578}
4579
4580/**
0cba632b
JG
4581 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4582 * @qc: Command with memory buffer to be mapped.
4583 *
4584 * DMA-map the memory buffer associated with queued_cmd @qc.
1da177e4
LT
4585 *
4586 * LOCKING:
cca3974e 4587 * spin_lock_irqsave(host lock)
1da177e4
LT
4588 *
4589 * RETURNS:
0cba632b 4590 * Zero on success, negative on error.
1da177e4
LT
4591 */
4592
4593static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4594{
4595 struct ata_port *ap = qc->ap;
4596 int dir = qc->dma_dir;
cedc9a47 4597 struct scatterlist *sg = qc->__sg;
1da177e4 4598 dma_addr_t dma_address;
2e242fa9 4599 int trim_sg = 0;
1da177e4 4600
cedc9a47
JG
4601 /* we must lengthen transfers to end on a 32-bit boundary */
4602 qc->pad_len = sg->length & 3;
4603 if (qc->pad_len) {
4604 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4605 struct scatterlist *psg = &qc->pad_sgent;
4606
a4631474 4607 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4608
4609 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4610
4611 if (qc->tf.flags & ATA_TFLAG_WRITE)
4612 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
4613 qc->pad_len);
4614
4615 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4616 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4617 /* trim sg */
4618 sg->length -= qc->pad_len;
2e242fa9
TH
4619 if (sg->length == 0)
4620 trim_sg = 1;
cedc9a47
JG
4621
4622 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4623 sg->length, qc->pad_len);
4624 }
4625
2e242fa9
TH
4626 if (trim_sg) {
4627 qc->n_elem--;
e1410f2d
JG
4628 goto skip_map;
4629 }
4630
2f1f610b 4631 dma_address = dma_map_single(ap->dev, qc->buf_virt,
32529e01 4632 sg->length, dir);
537a95d9
TH
4633 if (dma_mapping_error(dma_address)) {
4634 /* restore sg */
4635 sg->length += qc->pad_len;
1da177e4 4636 return -1;
537a95d9 4637 }
1da177e4
LT
4638
4639 sg_dma_address(sg) = dma_address;
32529e01 4640 sg_dma_len(sg) = sg->length;
1da177e4 4641
2e242fa9 4642skip_map:
1da177e4
LT
4643 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
4644 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4645
4646 return 0;
4647}
4648
4649/**
0cba632b
JG
4650 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4651 * @qc: Command with scatter-gather table to be mapped.
4652 *
4653 * DMA-map the scatter-gather table associated with queued_cmd @qc.
1da177e4
LT
4654 *
4655 * LOCKING:
cca3974e 4656 * spin_lock_irqsave(host lock)
1da177e4
LT
4657 *
4658 * RETURNS:
0cba632b 4659 * Zero on success, negative on error.
1da177e4
LT
4660 *
4661 */
4662
4663static int ata_sg_setup(struct ata_queued_cmd *qc)
4664{
4665 struct ata_port *ap = qc->ap;
cedc9a47 4666 struct scatterlist *sg = qc->__sg;
87260216 4667 struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem);
e1410f2d 4668 int n_elem, pre_n_elem, dir, trim_sg = 0;
1da177e4 4669
44877b4e 4670 VPRINTK("ENTER, ata%u\n", ap->print_id);
a4631474 4671 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
1da177e4 4672
cedc9a47
JG
4673 /* we must lengthen transfers to end on a 32-bit boundary */
4674 qc->pad_len = lsg->length & 3;
4675 if (qc->pad_len) {
4676 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4677 struct scatterlist *psg = &qc->pad_sgent;
4678 unsigned int offset;
4679
a4631474 4680 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4681
4682 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4683
4684 /*
4685 * psg->page/offset are used to copy to-be-written
4686 * data in this function or read data in ata_sg_clean.
4687 */
4688 offset = lsg->offset + lsg->length - qc->pad_len;
45711f1a 4689 sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT));
cedc9a47
JG
4690 psg->offset = offset_in_page(offset);
4691
4692 if (qc->tf.flags & ATA_TFLAG_WRITE) {
45711f1a 4693 void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
cedc9a47 4694 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
dfa15988 4695 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4696 }
4697
4698 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4699 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4700 /* trim last sg */
4701 lsg->length -= qc->pad_len;
e1410f2d
JG
4702 if (lsg->length == 0)
4703 trim_sg = 1;
cedc9a47
JG
4704
4705 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4706 qc->n_elem - 1, lsg->length, qc->pad_len);
4707 }
4708
e1410f2d
JG
4709 pre_n_elem = qc->n_elem;
4710 if (trim_sg && pre_n_elem)
4711 pre_n_elem--;
4712
4713 if (!pre_n_elem) {
4714 n_elem = 0;
4715 goto skip_map;
4716 }
4717
1da177e4 4718 dir = qc->dma_dir;
2f1f610b 4719 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
537a95d9
TH
4720 if (n_elem < 1) {
4721 /* restore last sg */
4722 lsg->length += qc->pad_len;
1da177e4 4723 return -1;
537a95d9 4724 }
1da177e4
LT
4725
4726 DPRINTK("%d sg elements mapped\n", n_elem);
4727
e1410f2d 4728skip_map:
1da177e4
LT
4729 qc->n_elem = n_elem;
4730
4731 return 0;
4732}
4733
0baab86b 4734/**
c893a3ae 4735 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
4736 * @buf: Buffer to swap
4737 * @buf_words: Number of 16-bit words in buffer.
4738 *
4739 * Swap halves of 16-bit words if needed to convert from
4740 * little-endian byte order to native cpu byte order, or
4741 * vice-versa.
4742 *
4743 * LOCKING:
6f0ef4fa 4744 * Inherited from caller.
0baab86b 4745 */
1da177e4
LT
4746void swap_buf_le16(u16 *buf, unsigned int buf_words)
4747{
4748#ifdef __BIG_ENDIAN
4749 unsigned int i;
4750
4751 for (i = 0; i < buf_words; i++)
4752 buf[i] = le16_to_cpu(buf[i]);
4753#endif /* __BIG_ENDIAN */
4754}
4755
6ae4cfb5 4756/**
0d5ff566 4757 * ata_data_xfer - Transfer data by PIO
a6b2c5d4 4758 * @adev: device to target
6ae4cfb5
AL
4759 * @buf: data buffer
4760 * @buflen: buffer length
344babaa 4761 * @write_data: read/write
6ae4cfb5
AL
4762 *
4763 * Transfer data from/to the device data register by PIO.
4764 *
4765 * LOCKING:
4766 * Inherited from caller.
6ae4cfb5 4767 */
0d5ff566
TH
4768void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
4769 unsigned int buflen, int write_data)
1da177e4 4770{
9af5c9c9 4771 struct ata_port *ap = adev->link->ap;
6ae4cfb5 4772 unsigned int words = buflen >> 1;
1da177e4 4773
6ae4cfb5 4774 /* Transfer multiple of 2 bytes */
1da177e4 4775 if (write_data)
0d5ff566 4776 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
1da177e4 4777 else
0d5ff566 4778 ioread16_rep(ap->ioaddr.data_addr, buf, words);
6ae4cfb5
AL
4779
4780 /* Transfer trailing 1 byte, if any. */
4781 if (unlikely(buflen & 0x01)) {
4782 u16 align_buf[1] = { 0 };
4783 unsigned char *trailing_buf = buf + buflen - 1;
4784
4785 if (write_data) {
4786 memcpy(align_buf, trailing_buf, 1);
0d5ff566 4787 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
6ae4cfb5 4788 } else {
0d5ff566 4789 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
6ae4cfb5
AL
4790 memcpy(trailing_buf, align_buf, 1);
4791 }
4792 }
1da177e4
LT
4793}
4794
75e99585 4795/**
0d5ff566 4796 * ata_data_xfer_noirq - Transfer data by PIO
75e99585
AC
4797 * @adev: device to target
4798 * @buf: data buffer
4799 * @buflen: buffer length
4800 * @write_data: read/write
4801 *
88574551 4802 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
4803 * transfer with interrupts disabled.
4804 *
4805 * LOCKING:
4806 * Inherited from caller.
4807 */
0d5ff566
TH
4808void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
4809 unsigned int buflen, int write_data)
75e99585
AC
4810{
4811 unsigned long flags;
4812 local_irq_save(flags);
0d5ff566 4813 ata_data_xfer(adev, buf, buflen, write_data);
75e99585
AC
4814 local_irq_restore(flags);
4815}
4816
4817
6ae4cfb5 4818/**
5a5dbd18 4819 * ata_pio_sector - Transfer a sector of data.
6ae4cfb5
AL
4820 * @qc: Command on going
4821 *
5a5dbd18 4822 * Transfer qc->sect_size bytes of data from/to the ATA device.
6ae4cfb5
AL
4823 *
4824 * LOCKING:
4825 * Inherited from caller.
4826 */
4827
1da177e4
LT
4828static void ata_pio_sector(struct ata_queued_cmd *qc)
4829{
4830 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
1da177e4
LT
4831 struct ata_port *ap = qc->ap;
4832 struct page *page;
4833 unsigned int offset;
4834 unsigned char *buf;
4835
5a5dbd18 4836 if (qc->curbytes == qc->nbytes - qc->sect_size)
14be71f4 4837 ap->hsm_task_state = HSM_ST_LAST;
1da177e4 4838
45711f1a 4839 page = sg_page(qc->cursg);
87260216 4840 offset = qc->cursg->offset + qc->cursg_ofs;
1da177e4
LT
4841
4842 /* get the current page and offset */
4843 page = nth_page(page, (offset >> PAGE_SHIFT));
4844 offset %= PAGE_SIZE;
4845
1da177e4
LT
4846 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4847
91b8b313
AL
4848 if (PageHighMem(page)) {
4849 unsigned long flags;
4850
a6b2c5d4 4851 /* FIXME: use a bounce buffer */
91b8b313
AL
4852 local_irq_save(flags);
4853 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4854
91b8b313 4855 /* do the actual data transfer */
5a5dbd18 4856 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
1da177e4 4857
91b8b313
AL
4858 kunmap_atomic(buf, KM_IRQ0);
4859 local_irq_restore(flags);
4860 } else {
4861 buf = page_address(page);
5a5dbd18 4862 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
91b8b313 4863 }
1da177e4 4864
5a5dbd18
ML
4865 qc->curbytes += qc->sect_size;
4866 qc->cursg_ofs += qc->sect_size;
1da177e4 4867
87260216
JA
4868 if (qc->cursg_ofs == qc->cursg->length) {
4869 qc->cursg = sg_next(qc->cursg);
1da177e4
LT
4870 qc->cursg_ofs = 0;
4871 }
1da177e4 4872}
1da177e4 4873
07f6f7d0 4874/**
5a5dbd18 4875 * ata_pio_sectors - Transfer one or many sectors.
07f6f7d0
AL
4876 * @qc: Command on going
4877 *
5a5dbd18 4878 * Transfer one or many sectors of data from/to the
07f6f7d0
AL
4879 * ATA device for the DRQ request.
4880 *
4881 * LOCKING:
4882 * Inherited from caller.
4883 */
1da177e4 4884
07f6f7d0
AL
4885static void ata_pio_sectors(struct ata_queued_cmd *qc)
4886{
4887 if (is_multi_taskfile(&qc->tf)) {
4888 /* READ/WRITE MULTIPLE */
4889 unsigned int nsect;
4890
587005de 4891 WARN_ON(qc->dev->multi_count == 0);
1da177e4 4892
5a5dbd18 4893 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
726f0785 4894 qc->dev->multi_count);
07f6f7d0
AL
4895 while (nsect--)
4896 ata_pio_sector(qc);
4897 } else
4898 ata_pio_sector(qc);
4cc980b3
AL
4899
4900 ata_altstatus(qc->ap); /* flush */
07f6f7d0
AL
4901}
4902
c71c1857
AL
4903/**
4904 * atapi_send_cdb - Write CDB bytes to hardware
4905 * @ap: Port to which ATAPI device is attached.
4906 * @qc: Taskfile currently active
4907 *
4908 * When device has indicated its readiness to accept
4909 * a CDB, this function is called. Send the CDB.
4910 *
4911 * LOCKING:
4912 * caller.
4913 */
4914
4915static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4916{
4917 /* send SCSI cdb */
4918 DPRINTK("send cdb\n");
db024d53 4919 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 4920
a6b2c5d4 4921 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
4922 ata_altstatus(ap); /* flush */
4923
4924 switch (qc->tf.protocol) {
4925 case ATA_PROT_ATAPI:
4926 ap->hsm_task_state = HSM_ST;
4927 break;
4928 case ATA_PROT_ATAPI_NODATA:
4929 ap->hsm_task_state = HSM_ST_LAST;
4930 break;
4931 case ATA_PROT_ATAPI_DMA:
4932 ap->hsm_task_state = HSM_ST_LAST;
4933 /* initiate bmdma */
4934 ap->ops->bmdma_start(qc);
4935 break;
4936 }
1da177e4
LT
4937}
4938
6ae4cfb5
AL
4939/**
4940 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
4941 * @qc: Command on going
4942 * @bytes: number of bytes
4943 *
4944 * Transfer Transfer data from/to the ATAPI device.
4945 *
4946 * LOCKING:
4947 * Inherited from caller.
4948 *
4949 */
4950
1da177e4
LT
4951static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4952{
4953 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4954 struct scatterlist *sg = qc->__sg;
0874ee76 4955 struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem);
1da177e4
LT
4956 struct ata_port *ap = qc->ap;
4957 struct page *page;
4958 unsigned char *buf;
4959 unsigned int offset, count;
0874ee76 4960 int no_more_sg = 0;
1da177e4 4961
563a6e1f 4962 if (qc->curbytes + bytes >= qc->nbytes)
14be71f4 4963 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4964
4965next_sg:
0874ee76 4966 if (unlikely(no_more_sg)) {
7fb6ec28 4967 /*
563a6e1f
AL
4968 * The end of qc->sg is reached and the device expects
4969 * more data to transfer. In order not to overrun qc->sg
4970 * and fulfill length specified in the byte count register,
4971 * - for read case, discard trailing data from the device
4972 * - for write case, padding zero data to the device
4973 */
4974 u16 pad_buf[1] = { 0 };
4975 unsigned int words = bytes >> 1;
4976 unsigned int i;
4977
4978 if (words) /* warning if bytes > 1 */
f15a1daf
TH
4979 ata_dev_printk(qc->dev, KERN_WARNING,
4980 "%u bytes trailing data\n", bytes);
563a6e1f
AL
4981
4982 for (i = 0; i < words; i++)
a6b2c5d4 4983 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
563a6e1f 4984
14be71f4 4985 ap->hsm_task_state = HSM_ST_LAST;
563a6e1f
AL
4986 return;
4987 }
4988
87260216 4989 sg = qc->cursg;
1da177e4 4990
45711f1a 4991 page = sg_page(sg);
1da177e4
LT
4992 offset = sg->offset + qc->cursg_ofs;
4993
4994 /* get the current page and offset */
4995 page = nth_page(page, (offset >> PAGE_SHIFT));
4996 offset %= PAGE_SIZE;
4997
6952df03 4998 /* don't overrun current sg */
32529e01 4999 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
5000
5001 /* don't cross page boundaries */
5002 count = min(count, (unsigned int)PAGE_SIZE - offset);
5003
7282aa4b
AL
5004 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5005
91b8b313
AL
5006 if (PageHighMem(page)) {
5007 unsigned long flags;
5008
a6b2c5d4 5009 /* FIXME: use bounce buffer */
91b8b313
AL
5010 local_irq_save(flags);
5011 buf = kmap_atomic(page, KM_IRQ0);
083958d3 5012
91b8b313 5013 /* do the actual data transfer */
a6b2c5d4 5014 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
7282aa4b 5015
91b8b313
AL
5016 kunmap_atomic(buf, KM_IRQ0);
5017 local_irq_restore(flags);
5018 } else {
5019 buf = page_address(page);
a6b2c5d4 5020 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
91b8b313 5021 }
1da177e4
LT
5022
5023 bytes -= count;
5024 qc->curbytes += count;
5025 qc->cursg_ofs += count;
5026
32529e01 5027 if (qc->cursg_ofs == sg->length) {
0874ee76
FT
5028 if (qc->cursg == lsg)
5029 no_more_sg = 1;
5030
87260216 5031 qc->cursg = sg_next(qc->cursg);
1da177e4
LT
5032 qc->cursg_ofs = 0;
5033 }
5034
563a6e1f 5035 if (bytes)
1da177e4 5036 goto next_sg;
1da177e4
LT
5037}
5038
6ae4cfb5
AL
5039/**
5040 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
5041 * @qc: Command on going
5042 *
5043 * Transfer Transfer data from/to the ATAPI device.
5044 *
5045 * LOCKING:
5046 * Inherited from caller.
6ae4cfb5
AL
5047 */
5048
1da177e4
LT
5049static void atapi_pio_bytes(struct ata_queued_cmd *qc)
5050{
5051 struct ata_port *ap = qc->ap;
5052 struct ata_device *dev = qc->dev;
5053 unsigned int ireason, bc_lo, bc_hi, bytes;
5054 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
5055
eec4c3f3
AL
5056 /* Abuse qc->result_tf for temp storage of intermediate TF
5057 * here to save some kernel stack usage.
5058 * For normal completion, qc->result_tf is not relevant. For
5059 * error, qc->result_tf is later overwritten by ata_qc_complete().
5060 * So, the correctness of qc->result_tf is not affected.
5061 */
5062 ap->ops->tf_read(ap, &qc->result_tf);
5063 ireason = qc->result_tf.nsect;
5064 bc_lo = qc->result_tf.lbam;
5065 bc_hi = qc->result_tf.lbah;
1da177e4
LT
5066 bytes = (bc_hi << 8) | bc_lo;
5067
5068 /* shall be cleared to zero, indicating xfer of data */
5069 if (ireason & (1 << 0))
5070 goto err_out;
5071
5072 /* make sure transfer direction matches expected */
5073 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
5074 if (do_write != i_write)
5075 goto err_out;
5076
44877b4e 5077 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
312f7da2 5078
1da177e4 5079 __atapi_pio_bytes(qc, bytes);
4cc980b3 5080 ata_altstatus(ap); /* flush */
1da177e4
LT
5081
5082 return;
5083
5084err_out:
f15a1daf 5085 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
11a56d24 5086 qc->err_mask |= AC_ERR_HSM;
14be71f4 5087 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
5088}
5089
5090/**
c234fb00
AL
5091 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
5092 * @ap: the target ata_port
5093 * @qc: qc on going
1da177e4 5094 *
c234fb00
AL
5095 * RETURNS:
5096 * 1 if ok in workqueue, 0 otherwise.
1da177e4 5097 */
c234fb00
AL
5098
5099static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 5100{
c234fb00
AL
5101 if (qc->tf.flags & ATA_TFLAG_POLLING)
5102 return 1;
1da177e4 5103
c234fb00
AL
5104 if (ap->hsm_task_state == HSM_ST_FIRST) {
5105 if (qc->tf.protocol == ATA_PROT_PIO &&
5106 (qc->tf.flags & ATA_TFLAG_WRITE))
5107 return 1;
1da177e4 5108
c234fb00
AL
5109 if (is_atapi_taskfile(&qc->tf) &&
5110 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5111 return 1;
fe79e683
AL
5112 }
5113
c234fb00
AL
5114 return 0;
5115}
1da177e4 5116
c17ea20d
TH
5117/**
5118 * ata_hsm_qc_complete - finish a qc running on standard HSM
5119 * @qc: Command to complete
5120 * @in_wq: 1 if called from workqueue, 0 otherwise
5121 *
5122 * Finish @qc which is running on standard HSM.
5123 *
5124 * LOCKING:
cca3974e 5125 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
5126 * Otherwise, none on entry and grabs host lock.
5127 */
5128static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
5129{
5130 struct ata_port *ap = qc->ap;
5131 unsigned long flags;
5132
5133 if (ap->ops->error_handler) {
5134 if (in_wq) {
ba6a1308 5135 spin_lock_irqsave(ap->lock, flags);
c17ea20d 5136
cca3974e
JG
5137 /* EH might have kicked in while host lock is
5138 * released.
c17ea20d
TH
5139 */
5140 qc = ata_qc_from_tag(ap, qc->tag);
5141 if (qc) {
5142 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
83625006 5143 ap->ops->irq_on(ap);
c17ea20d
TH
5144 ata_qc_complete(qc);
5145 } else
5146 ata_port_freeze(ap);
5147 }
5148
ba6a1308 5149 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
5150 } else {
5151 if (likely(!(qc->err_mask & AC_ERR_HSM)))
5152 ata_qc_complete(qc);
5153 else
5154 ata_port_freeze(ap);
5155 }
5156 } else {
5157 if (in_wq) {
ba6a1308 5158 spin_lock_irqsave(ap->lock, flags);
83625006 5159 ap->ops->irq_on(ap);
c17ea20d 5160 ata_qc_complete(qc);
ba6a1308 5161 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
5162 } else
5163 ata_qc_complete(qc);
5164 }
5165}
5166
bb5cb290
AL
5167/**
5168 * ata_hsm_move - move the HSM to the next state.
5169 * @ap: the target ata_port
5170 * @qc: qc on going
5171 * @status: current device status
5172 * @in_wq: 1 if called from workqueue, 0 otherwise
5173 *
5174 * RETURNS:
5175 * 1 when poll next status needed, 0 otherwise.
5176 */
9a1004d0
TH
5177int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
5178 u8 status, int in_wq)
e2cec771 5179{
bb5cb290
AL
5180 unsigned long flags = 0;
5181 int poll_next;
5182
6912ccd5
AL
5183 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
5184
bb5cb290
AL
5185 /* Make sure ata_qc_issue_prot() does not throw things
5186 * like DMA polling into the workqueue. Notice that
5187 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
5188 */
c234fb00 5189 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 5190
e2cec771 5191fsm_start:
999bb6f4 5192 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
44877b4e 5193 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
999bb6f4 5194
e2cec771
AL
5195 switch (ap->hsm_task_state) {
5196 case HSM_ST_FIRST:
bb5cb290
AL
5197 /* Send first data block or PACKET CDB */
5198
5199 /* If polling, we will stay in the work queue after
5200 * sending the data. Otherwise, interrupt handler
5201 * takes over after sending the data.
5202 */
5203 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
5204
e2cec771 5205 /* check device status */
3655d1d3
AL
5206 if (unlikely((status & ATA_DRQ) == 0)) {
5207 /* handle BSY=0, DRQ=0 as error */
5208 if (likely(status & (ATA_ERR | ATA_DF)))
5209 /* device stops HSM for abort/error */
5210 qc->err_mask |= AC_ERR_DEV;
5211 else
5212 /* HSM violation. Let EH handle this */
5213 qc->err_mask |= AC_ERR_HSM;
5214
14be71f4 5215 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 5216 goto fsm_start;
1da177e4
LT
5217 }
5218
71601958
AL
5219 /* Device should not ask for data transfer (DRQ=1)
5220 * when it finds something wrong.
eee6c32f
AL
5221 * We ignore DRQ here and stop the HSM by
5222 * changing hsm_task_state to HSM_ST_ERR and
5223 * let the EH abort the command or reset the device.
71601958
AL
5224 */
5225 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
5226 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
5227 "error, dev_stat 0x%X\n", status);
3655d1d3 5228 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
5229 ap->hsm_task_state = HSM_ST_ERR;
5230 goto fsm_start;
71601958 5231 }
1da177e4 5232
bb5cb290
AL
5233 /* Send the CDB (atapi) or the first data block (ata pio out).
5234 * During the state transition, interrupt handler shouldn't
5235 * be invoked before the data transfer is complete and
5236 * hsm_task_state is changed. Hence, the following locking.
5237 */
5238 if (in_wq)
ba6a1308 5239 spin_lock_irqsave(ap->lock, flags);
1da177e4 5240
bb5cb290
AL
5241 if (qc->tf.protocol == ATA_PROT_PIO) {
5242 /* PIO data out protocol.
5243 * send first data block.
5244 */
0565c26d 5245
bb5cb290
AL
5246 /* ata_pio_sectors() might change the state
5247 * to HSM_ST_LAST. so, the state is changed here
5248 * before ata_pio_sectors().
5249 */
5250 ap->hsm_task_state = HSM_ST;
5251 ata_pio_sectors(qc);
bb5cb290
AL
5252 } else
5253 /* send CDB */
5254 atapi_send_cdb(ap, qc);
5255
5256 if (in_wq)
ba6a1308 5257 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
5258
5259 /* if polling, ata_pio_task() handles the rest.
5260 * otherwise, interrupt handler takes over from here.
5261 */
e2cec771 5262 break;
1c848984 5263
e2cec771
AL
5264 case HSM_ST:
5265 /* complete command or read/write the data register */
5266 if (qc->tf.protocol == ATA_PROT_ATAPI) {
5267 /* ATAPI PIO protocol */
5268 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
5269 /* No more data to transfer or device error.
5270 * Device error will be tagged in HSM_ST_LAST.
5271 */
e2cec771
AL
5272 ap->hsm_task_state = HSM_ST_LAST;
5273 goto fsm_start;
5274 }
1da177e4 5275
71601958
AL
5276 /* Device should not ask for data transfer (DRQ=1)
5277 * when it finds something wrong.
eee6c32f
AL
5278 * We ignore DRQ here and stop the HSM by
5279 * changing hsm_task_state to HSM_ST_ERR and
5280 * let the EH abort the command or reset the device.
71601958
AL
5281 */
5282 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
5283 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
5284 "device error, dev_stat 0x%X\n",
5285 status);
3655d1d3 5286 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
5287 ap->hsm_task_state = HSM_ST_ERR;
5288 goto fsm_start;
71601958 5289 }
1da177e4 5290
e2cec771 5291 atapi_pio_bytes(qc);
7fb6ec28 5292
e2cec771
AL
5293 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
5294 /* bad ireason reported by device */
5295 goto fsm_start;
1da177e4 5296
e2cec771
AL
5297 } else {
5298 /* ATA PIO protocol */
5299 if (unlikely((status & ATA_DRQ) == 0)) {
5300 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
5301 if (likely(status & (ATA_ERR | ATA_DF)))
5302 /* device stops HSM for abort/error */
5303 qc->err_mask |= AC_ERR_DEV;
5304 else
55a8e2c8
TH
5305 /* HSM violation. Let EH handle this.
5306 * Phantom devices also trigger this
5307 * condition. Mark hint.
5308 */
5309 qc->err_mask |= AC_ERR_HSM |
5310 AC_ERR_NODEV_HINT;
3655d1d3 5311
e2cec771
AL
5312 ap->hsm_task_state = HSM_ST_ERR;
5313 goto fsm_start;
5314 }
1da177e4 5315
eee6c32f
AL
5316 /* For PIO reads, some devices may ask for
5317 * data transfer (DRQ=1) alone with ERR=1.
5318 * We respect DRQ here and transfer one
5319 * block of junk data before changing the
5320 * hsm_task_state to HSM_ST_ERR.
5321 *
5322 * For PIO writes, ERR=1 DRQ=1 doesn't make
5323 * sense since the data block has been
5324 * transferred to the device.
71601958
AL
5325 */
5326 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
5327 /* data might be corrputed */
5328 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
5329
5330 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5331 ata_pio_sectors(qc);
eee6c32f
AL
5332 status = ata_wait_idle(ap);
5333 }
5334
3655d1d3
AL
5335 if (status & (ATA_BUSY | ATA_DRQ))
5336 qc->err_mask |= AC_ERR_HSM;
5337
eee6c32f
AL
5338 /* ata_pio_sectors() might change the
5339 * state to HSM_ST_LAST. so, the state
5340 * is changed after ata_pio_sectors().
5341 */
5342 ap->hsm_task_state = HSM_ST_ERR;
5343 goto fsm_start;
71601958
AL
5344 }
5345
e2cec771
AL
5346 ata_pio_sectors(qc);
5347
5348 if (ap->hsm_task_state == HSM_ST_LAST &&
5349 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5350 /* all data read */
52a32205 5351 status = ata_wait_idle(ap);
e2cec771
AL
5352 goto fsm_start;
5353 }
5354 }
5355
bb5cb290 5356 poll_next = 1;
1da177e4
LT
5357 break;
5358
14be71f4 5359 case HSM_ST_LAST:
6912ccd5
AL
5360 if (unlikely(!ata_ok(status))) {
5361 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
5362 ap->hsm_task_state = HSM_ST_ERR;
5363 goto fsm_start;
5364 }
5365
5366 /* no more data to transfer */
4332a771 5367 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
44877b4e 5368 ap->print_id, qc->dev->devno, status);
e2cec771 5369
6912ccd5
AL
5370 WARN_ON(qc->err_mask);
5371
e2cec771 5372 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 5373
e2cec771 5374 /* complete taskfile transaction */
c17ea20d 5375 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5376
5377 poll_next = 0;
1da177e4
LT
5378 break;
5379
14be71f4 5380 case HSM_ST_ERR:
e2cec771
AL
5381 /* make sure qc->err_mask is available to
5382 * know what's wrong and recover
5383 */
5384 WARN_ON(qc->err_mask == 0);
5385
5386 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 5387
999bb6f4 5388 /* complete taskfile transaction */
c17ea20d 5389 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5390
5391 poll_next = 0;
e2cec771
AL
5392 break;
5393 default:
bb5cb290 5394 poll_next = 0;
6912ccd5 5395 BUG();
1da177e4
LT
5396 }
5397
bb5cb290 5398 return poll_next;
1da177e4
LT
5399}
5400
65f27f38 5401static void ata_pio_task(struct work_struct *work)
8061f5f0 5402{
65f27f38
DH
5403 struct ata_port *ap =
5404 container_of(work, struct ata_port, port_task.work);
5405 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 5406 u8 status;
a1af3734 5407 int poll_next;
8061f5f0 5408
7fb6ec28 5409fsm_start:
a1af3734 5410 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 5411
a1af3734
AL
5412 /*
5413 * This is purely heuristic. This is a fast path.
5414 * Sometimes when we enter, BSY will be cleared in
5415 * a chk-status or two. If not, the drive is probably seeking
5416 * or something. Snooze for a couple msecs, then
5417 * chk-status again. If still busy, queue delayed work.
5418 */
5419 status = ata_busy_wait(ap, ATA_BUSY, 5);
5420 if (status & ATA_BUSY) {
5421 msleep(2);
5422 status = ata_busy_wait(ap, ATA_BUSY, 10);
5423 if (status & ATA_BUSY) {
31ce6dae 5424 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
a1af3734
AL
5425 return;
5426 }
8061f5f0
TH
5427 }
5428
a1af3734
AL
5429 /* move the HSM */
5430 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 5431
a1af3734
AL
5432 /* another command or interrupt handler
5433 * may be running at this point.
5434 */
5435 if (poll_next)
7fb6ec28 5436 goto fsm_start;
8061f5f0
TH
5437}
5438
1da177e4
LT
5439/**
5440 * ata_qc_new - Request an available ATA command, for queueing
5441 * @ap: Port associated with device @dev
5442 * @dev: Device from whom we request an available command structure
5443 *
5444 * LOCKING:
0cba632b 5445 * None.
1da177e4
LT
5446 */
5447
5448static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5449{
5450 struct ata_queued_cmd *qc = NULL;
5451 unsigned int i;
5452
e3180499 5453 /* no command while frozen */
b51e9e5d 5454 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
5455 return NULL;
5456
2ab7db1f
TH
5457 /* the last tag is reserved for internal command. */
5458 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 5459 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 5460 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
5461 break;
5462 }
5463
5464 if (qc)
5465 qc->tag = i;
5466
5467 return qc;
5468}
5469
5470/**
5471 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
5472 * @dev: Device from whom we request an available command structure
5473 *
5474 * LOCKING:
0cba632b 5475 * None.
1da177e4
LT
5476 */
5477
3373efd8 5478struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 5479{
9af5c9c9 5480 struct ata_port *ap = dev->link->ap;
1da177e4
LT
5481 struct ata_queued_cmd *qc;
5482
5483 qc = ata_qc_new(ap);
5484 if (qc) {
1da177e4
LT
5485 qc->scsicmd = NULL;
5486 qc->ap = ap;
5487 qc->dev = dev;
1da177e4 5488
2c13b7ce 5489 ata_qc_reinit(qc);
1da177e4
LT
5490 }
5491
5492 return qc;
5493}
5494
1da177e4
LT
5495/**
5496 * ata_qc_free - free unused ata_queued_cmd
5497 * @qc: Command to complete
5498 *
5499 * Designed to free unused ata_queued_cmd object
5500 * in case something prevents using it.
5501 *
5502 * LOCKING:
cca3974e 5503 * spin_lock_irqsave(host lock)
1da177e4
LT
5504 */
5505void ata_qc_free(struct ata_queued_cmd *qc)
5506{
4ba946e9
TH
5507 struct ata_port *ap = qc->ap;
5508 unsigned int tag;
5509
a4631474 5510 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 5511
4ba946e9
TH
5512 qc->flags = 0;
5513 tag = qc->tag;
5514 if (likely(ata_tag_valid(tag))) {
4ba946e9 5515 qc->tag = ATA_TAG_POISON;
6cec4a39 5516 clear_bit(tag, &ap->qc_allocated);
4ba946e9 5517 }
1da177e4
LT
5518}
5519
76014427 5520void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 5521{
dedaf2b0 5522 struct ata_port *ap = qc->ap;
9af5c9c9 5523 struct ata_link *link = qc->dev->link;
dedaf2b0 5524
a4631474
TH
5525 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5526 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
5527
5528 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5529 ata_sg_clean(qc);
5530
7401abf2 5531 /* command should be marked inactive atomically with qc completion */
da917d69 5532 if (qc->tf.protocol == ATA_PROT_NCQ) {
9af5c9c9 5533 link->sactive &= ~(1 << qc->tag);
da917d69
TH
5534 if (!link->sactive)
5535 ap->nr_active_links--;
5536 } else {
9af5c9c9 5537 link->active_tag = ATA_TAG_POISON;
da917d69
TH
5538 ap->nr_active_links--;
5539 }
5540
5541 /* clear exclusive status */
5542 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5543 ap->excl_link == link))
5544 ap->excl_link = NULL;
7401abf2 5545
3f3791d3
AL
5546 /* atapi: mark qc as inactive to prevent the interrupt handler
5547 * from completing the command twice later, before the error handler
5548 * is called. (when rc != 0 and atapi request sense is needed)
5549 */
5550 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 5551 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 5552
1da177e4 5553 /* call completion callback */
77853bf2 5554 qc->complete_fn(qc);
1da177e4
LT
5555}
5556
39599a53
TH
5557static void fill_result_tf(struct ata_queued_cmd *qc)
5558{
5559 struct ata_port *ap = qc->ap;
5560
39599a53 5561 qc->result_tf.flags = qc->tf.flags;
4742d54f 5562 ap->ops->tf_read(ap, &qc->result_tf);
39599a53
TH
5563}
5564
f686bcb8
TH
5565/**
5566 * ata_qc_complete - Complete an active ATA command
5567 * @qc: Command to complete
5568 * @err_mask: ATA Status register contents
5569 *
5570 * Indicate to the mid and upper layers that an ATA
5571 * command has completed, with either an ok or not-ok status.
5572 *
5573 * LOCKING:
cca3974e 5574 * spin_lock_irqsave(host lock)
f686bcb8
TH
5575 */
5576void ata_qc_complete(struct ata_queued_cmd *qc)
5577{
5578 struct ata_port *ap = qc->ap;
5579
5580 /* XXX: New EH and old EH use different mechanisms to
5581 * synchronize EH with regular execution path.
5582 *
5583 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5584 * Normal execution path is responsible for not accessing a
5585 * failed qc. libata core enforces the rule by returning NULL
5586 * from ata_qc_from_tag() for failed qcs.
5587 *
5588 * Old EH depends on ata_qc_complete() nullifying completion
5589 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5590 * not synchronize with interrupt handler. Only PIO task is
5591 * taken care of.
5592 */
5593 if (ap->ops->error_handler) {
b51e9e5d 5594 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
5595
5596 if (unlikely(qc->err_mask))
5597 qc->flags |= ATA_QCFLAG_FAILED;
5598
5599 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5600 if (!ata_tag_internal(qc->tag)) {
5601 /* always fill result TF for failed qc */
39599a53 5602 fill_result_tf(qc);
f686bcb8
TH
5603 ata_qc_schedule_eh(qc);
5604 return;
5605 }
5606 }
5607
5608 /* read result TF if requested */
5609 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5610 fill_result_tf(qc);
f686bcb8
TH
5611
5612 __ata_qc_complete(qc);
5613 } else {
5614 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5615 return;
5616
5617 /* read result TF if failed or requested */
5618 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5619 fill_result_tf(qc);
f686bcb8
TH
5620
5621 __ata_qc_complete(qc);
5622 }
5623}
5624
dedaf2b0
TH
5625/**
5626 * ata_qc_complete_multiple - Complete multiple qcs successfully
5627 * @ap: port in question
5628 * @qc_active: new qc_active mask
5629 * @finish_qc: LLDD callback invoked before completing a qc
5630 *
5631 * Complete in-flight commands. This functions is meant to be
5632 * called from low-level driver's interrupt routine to complete
5633 * requests normally. ap->qc_active and @qc_active is compared
5634 * and commands are completed accordingly.
5635 *
5636 * LOCKING:
cca3974e 5637 * spin_lock_irqsave(host lock)
dedaf2b0
TH
5638 *
5639 * RETURNS:
5640 * Number of completed commands on success, -errno otherwise.
5641 */
5642int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5643 void (*finish_qc)(struct ata_queued_cmd *))
5644{
5645 int nr_done = 0;
5646 u32 done_mask;
5647 int i;
5648
5649 done_mask = ap->qc_active ^ qc_active;
5650
5651 if (unlikely(done_mask & qc_active)) {
5652 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5653 "(%08x->%08x)\n", ap->qc_active, qc_active);
5654 return -EINVAL;
5655 }
5656
5657 for (i = 0; i < ATA_MAX_QUEUE; i++) {
5658 struct ata_queued_cmd *qc;
5659
5660 if (!(done_mask & (1 << i)))
5661 continue;
5662
5663 if ((qc = ata_qc_from_tag(ap, i))) {
5664 if (finish_qc)
5665 finish_qc(qc);
5666 ata_qc_complete(qc);
5667 nr_done++;
5668 }
5669 }
5670
5671 return nr_done;
5672}
5673
1da177e4
LT
5674static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
5675{
5676 struct ata_port *ap = qc->ap;
5677
5678 switch (qc->tf.protocol) {
3dc1d881 5679 case ATA_PROT_NCQ:
1da177e4
LT
5680 case ATA_PROT_DMA:
5681 case ATA_PROT_ATAPI_DMA:
5682 return 1;
5683
5684 case ATA_PROT_ATAPI:
5685 case ATA_PROT_PIO:
1da177e4
LT
5686 if (ap->flags & ATA_FLAG_PIO_DMA)
5687 return 1;
5688
5689 /* fall through */
5690
5691 default:
5692 return 0;
5693 }
5694
5695 /* never reached */
5696}
5697
5698/**
5699 * ata_qc_issue - issue taskfile to device
5700 * @qc: command to issue to device
5701 *
5702 * Prepare an ATA command to submission to device.
5703 * This includes mapping the data into a DMA-able
5704 * area, filling in the S/G table, and finally
5705 * writing the taskfile to hardware, starting the command.
5706 *
5707 * LOCKING:
cca3974e 5708 * spin_lock_irqsave(host lock)
1da177e4 5709 */
8e0e694a 5710void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
5711{
5712 struct ata_port *ap = qc->ap;
9af5c9c9 5713 struct ata_link *link = qc->dev->link;
1da177e4 5714
dedaf2b0
TH
5715 /* Make sure only one non-NCQ command is outstanding. The
5716 * check is skipped for old EH because it reuses active qc to
5717 * request ATAPI sense.
5718 */
9af5c9c9 5719 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
dedaf2b0
TH
5720
5721 if (qc->tf.protocol == ATA_PROT_NCQ) {
9af5c9c9 5722 WARN_ON(link->sactive & (1 << qc->tag));
da917d69
TH
5723
5724 if (!link->sactive)
5725 ap->nr_active_links++;
9af5c9c9 5726 link->sactive |= 1 << qc->tag;
dedaf2b0 5727 } else {
9af5c9c9 5728 WARN_ON(link->sactive);
da917d69
TH
5729
5730 ap->nr_active_links++;
9af5c9c9 5731 link->active_tag = qc->tag;
dedaf2b0
TH
5732 }
5733
e4a70e76 5734 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 5735 ap->qc_active |= 1 << qc->tag;
e4a70e76 5736
1da177e4
LT
5737 if (ata_should_dma_map(qc)) {
5738 if (qc->flags & ATA_QCFLAG_SG) {
5739 if (ata_sg_setup(qc))
8e436af9 5740 goto sg_err;
1da177e4
LT
5741 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
5742 if (ata_sg_setup_one(qc))
8e436af9 5743 goto sg_err;
1da177e4
LT
5744 }
5745 } else {
5746 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5747 }
5748
5749 ap->ops->qc_prep(qc);
5750
8e0e694a
TH
5751 qc->err_mask |= ap->ops->qc_issue(qc);
5752 if (unlikely(qc->err_mask))
5753 goto err;
5754 return;
1da177e4 5755
8e436af9
TH
5756sg_err:
5757 qc->flags &= ~ATA_QCFLAG_DMAMAP;
8e0e694a
TH
5758 qc->err_mask |= AC_ERR_SYSTEM;
5759err:
5760 ata_qc_complete(qc);
1da177e4
LT
5761}
5762
5763/**
5764 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
5765 * @qc: command to issue to device
5766 *
5767 * Using various libata functions and hooks, this function
5768 * starts an ATA command. ATA commands are grouped into
5769 * classes called "protocols", and issuing each type of protocol
5770 * is slightly different.
5771 *
0baab86b
EF
5772 * May be used as the qc_issue() entry in ata_port_operations.
5773 *
1da177e4 5774 * LOCKING:
cca3974e 5775 * spin_lock_irqsave(host lock)
1da177e4
LT
5776 *
5777 * RETURNS:
9a3d9eb0 5778 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
5779 */
5780
9a3d9eb0 5781unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
5782{
5783 struct ata_port *ap = qc->ap;
5784
e50362ec
AL
5785 /* Use polling pio if the LLD doesn't handle
5786 * interrupt driven pio and atapi CDB interrupt.
5787 */
5788 if (ap->flags & ATA_FLAG_PIO_POLLING) {
5789 switch (qc->tf.protocol) {
5790 case ATA_PROT_PIO:
e3472cbe 5791 case ATA_PROT_NODATA:
e50362ec
AL
5792 case ATA_PROT_ATAPI:
5793 case ATA_PROT_ATAPI_NODATA:
5794 qc->tf.flags |= ATA_TFLAG_POLLING;
5795 break;
5796 case ATA_PROT_ATAPI_DMA:
5797 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 5798 /* see ata_dma_blacklisted() */
e50362ec
AL
5799 BUG();
5800 break;
5801 default:
5802 break;
5803 }
5804 }
5805
312f7da2 5806 /* select the device */
1da177e4
LT
5807 ata_dev_select(ap, qc->dev->devno, 1, 0);
5808
312f7da2 5809 /* start the command */
1da177e4
LT
5810 switch (qc->tf.protocol) {
5811 case ATA_PROT_NODATA:
312f7da2
AL
5812 if (qc->tf.flags & ATA_TFLAG_POLLING)
5813 ata_qc_set_polling(qc);
5814
e5338254 5815 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
5816 ap->hsm_task_state = HSM_ST_LAST;
5817
5818 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5819 ata_port_queue_task(ap, ata_pio_task, qc, 0);
312f7da2 5820
1da177e4
LT
5821 break;
5822
5823 case ATA_PROT_DMA:
587005de 5824 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5825
1da177e4
LT
5826 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5827 ap->ops->bmdma_setup(qc); /* set up bmdma */
5828 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 5829 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
5830 break;
5831
312f7da2
AL
5832 case ATA_PROT_PIO:
5833 if (qc->tf.flags & ATA_TFLAG_POLLING)
5834 ata_qc_set_polling(qc);
1da177e4 5835
e5338254 5836 ata_tf_to_host(ap, &qc->tf);
312f7da2 5837
54f00389
AL
5838 if (qc->tf.flags & ATA_TFLAG_WRITE) {
5839 /* PIO data out protocol */
5840 ap->hsm_task_state = HSM_ST_FIRST;
31ce6dae 5841 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5842
5843 /* always send first data block using
e27486db 5844 * the ata_pio_task() codepath.
54f00389 5845 */
312f7da2 5846 } else {
54f00389
AL
5847 /* PIO data in protocol */
5848 ap->hsm_task_state = HSM_ST;
5849
5850 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5851 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5852
5853 /* if polling, ata_pio_task() handles the rest.
5854 * otherwise, interrupt handler takes over from here.
5855 */
312f7da2
AL
5856 }
5857
1da177e4
LT
5858 break;
5859
1da177e4 5860 case ATA_PROT_ATAPI:
1da177e4 5861 case ATA_PROT_ATAPI_NODATA:
312f7da2
AL
5862 if (qc->tf.flags & ATA_TFLAG_POLLING)
5863 ata_qc_set_polling(qc);
5864
e5338254 5865 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 5866
312f7da2
AL
5867 ap->hsm_task_state = HSM_ST_FIRST;
5868
5869 /* send cdb by polling if no cdb interrupt */
5870 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
5871 (qc->tf.flags & ATA_TFLAG_POLLING))
31ce6dae 5872 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5873 break;
5874
5875 case ATA_PROT_ATAPI_DMA:
587005de 5876 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5877
1da177e4
LT
5878 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5879 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
5880 ap->hsm_task_state = HSM_ST_FIRST;
5881
5882 /* send cdb by polling if no cdb interrupt */
5883 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
31ce6dae 5884 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5885 break;
5886
5887 default:
5888 WARN_ON(1);
9a3d9eb0 5889 return AC_ERR_SYSTEM;
1da177e4
LT
5890 }
5891
5892 return 0;
5893}
5894
1da177e4
LT
5895/**
5896 * ata_host_intr - Handle host interrupt for given (port, task)
5897 * @ap: Port on which interrupt arrived (possibly...)
5898 * @qc: Taskfile currently active in engine
5899 *
5900 * Handle host interrupt for given queued command. Currently,
5901 * only DMA interrupts are handled. All other commands are
5902 * handled via polling with interrupts disabled (nIEN bit).
5903 *
5904 * LOCKING:
cca3974e 5905 * spin_lock_irqsave(host lock)
1da177e4
LT
5906 *
5907 * RETURNS:
5908 * One if interrupt was handled, zero if not (shared irq).
5909 */
5910
5911inline unsigned int ata_host_intr (struct ata_port *ap,
5912 struct ata_queued_cmd *qc)
5913{
9af5c9c9 5914 struct ata_eh_info *ehi = &ap->link.eh_info;
312f7da2 5915 u8 status, host_stat = 0;
1da177e4 5916
312f7da2 5917 VPRINTK("ata%u: protocol %d task_state %d\n",
44877b4e 5918 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 5919
312f7da2
AL
5920 /* Check whether we are expecting interrupt in this state */
5921 switch (ap->hsm_task_state) {
5922 case HSM_ST_FIRST:
6912ccd5
AL
5923 /* Some pre-ATAPI-4 devices assert INTRQ
5924 * at this state when ready to receive CDB.
5925 */
1da177e4 5926
312f7da2
AL
5927 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5928 * The flag was turned on only for atapi devices.
5929 * No need to check is_atapi_taskfile(&qc->tf) again.
5930 */
5931 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 5932 goto idle_irq;
1da177e4 5933 break;
312f7da2
AL
5934 case HSM_ST_LAST:
5935 if (qc->tf.protocol == ATA_PROT_DMA ||
5936 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5937 /* check status of DMA engine */
5938 host_stat = ap->ops->bmdma_status(ap);
44877b4e
TH
5939 VPRINTK("ata%u: host_stat 0x%X\n",
5940 ap->print_id, host_stat);
312f7da2
AL
5941
5942 /* if it's not our irq... */
5943 if (!(host_stat & ATA_DMA_INTR))
5944 goto idle_irq;
5945
5946 /* before we do anything else, clear DMA-Start bit */
5947 ap->ops->bmdma_stop(qc);
a4f16610
AL
5948
5949 if (unlikely(host_stat & ATA_DMA_ERR)) {
5950 /* error when transfering data to/from memory */
5951 qc->err_mask |= AC_ERR_HOST_BUS;
5952 ap->hsm_task_state = HSM_ST_ERR;
5953 }
312f7da2
AL
5954 }
5955 break;
5956 case HSM_ST:
5957 break;
1da177e4
LT
5958 default:
5959 goto idle_irq;
5960 }
5961
312f7da2
AL
5962 /* check altstatus */
5963 status = ata_altstatus(ap);
5964 if (status & ATA_BUSY)
5965 goto idle_irq;
1da177e4 5966
312f7da2
AL
5967 /* check main status, clearing INTRQ */
5968 status = ata_chk_status(ap);
5969 if (unlikely(status & ATA_BUSY))
5970 goto idle_irq;
1da177e4 5971
312f7da2
AL
5972 /* ack bmdma irq events */
5973 ap->ops->irq_clear(ap);
1da177e4 5974
bb5cb290 5975 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
5976
5977 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5978 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5979 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5980
1da177e4
LT
5981 return 1; /* irq handled */
5982
5983idle_irq:
5984 ap->stats.idle_irq++;
5985
5986#ifdef ATA_IRQ_TRAP
5987 if ((ap->stats.idle_irq % 1000) == 0) {
6d32d30f
JG
5988 ata_chk_status(ap);
5989 ap->ops->irq_clear(ap);
f15a1daf 5990 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 5991 return 1;
1da177e4
LT
5992 }
5993#endif
5994 return 0; /* irq not handled */
5995}
5996
5997/**
5998 * ata_interrupt - Default ATA host interrupt handler
0cba632b 5999 * @irq: irq line (unused)
cca3974e 6000 * @dev_instance: pointer to our ata_host information structure
1da177e4 6001 *
0cba632b
JG
6002 * Default interrupt handler for PCI IDE devices. Calls
6003 * ata_host_intr() for each port that is not disabled.
6004 *
1da177e4 6005 * LOCKING:
cca3974e 6006 * Obtains host lock during operation.
1da177e4
LT
6007 *
6008 * RETURNS:
0cba632b 6009 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
6010 */
6011
7d12e780 6012irqreturn_t ata_interrupt (int irq, void *dev_instance)
1da177e4 6013{
cca3974e 6014 struct ata_host *host = dev_instance;
1da177e4
LT
6015 unsigned int i;
6016 unsigned int handled = 0;
6017 unsigned long flags;
6018
6019 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 6020 spin_lock_irqsave(&host->lock, flags);
1da177e4 6021
cca3974e 6022 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
6023 struct ata_port *ap;
6024
cca3974e 6025 ap = host->ports[i];
c1389503 6026 if (ap &&
029f5468 6027 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
6028 struct ata_queued_cmd *qc;
6029
9af5c9c9 6030 qc = ata_qc_from_tag(ap, ap->link.active_tag);
312f7da2 6031 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 6032 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
6033 handled |= ata_host_intr(ap, qc);
6034 }
6035 }
6036
cca3974e 6037 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
6038
6039 return IRQ_RETVAL(handled);
6040}
6041
34bf2170
TH
6042/**
6043 * sata_scr_valid - test whether SCRs are accessible
936fd732 6044 * @link: ATA link to test SCR accessibility for
34bf2170 6045 *
936fd732 6046 * Test whether SCRs are accessible for @link.
34bf2170
TH
6047 *
6048 * LOCKING:
6049 * None.
6050 *
6051 * RETURNS:
6052 * 1 if SCRs are accessible, 0 otherwise.
6053 */
936fd732 6054int sata_scr_valid(struct ata_link *link)
34bf2170 6055{
936fd732
TH
6056 struct ata_port *ap = link->ap;
6057
a16abc0b 6058 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
34bf2170
TH
6059}
6060
6061/**
6062 * sata_scr_read - read SCR register of the specified port
936fd732 6063 * @link: ATA link to read SCR for
34bf2170
TH
6064 * @reg: SCR to read
6065 * @val: Place to store read value
6066 *
936fd732 6067 * Read SCR register @reg of @link into *@val. This function is
633273a3
TH
6068 * guaranteed to succeed if @link is ap->link, the cable type of
6069 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
6070 *
6071 * LOCKING:
633273a3 6072 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6073 *
6074 * RETURNS:
6075 * 0 on success, negative errno on failure.
6076 */
936fd732 6077int sata_scr_read(struct ata_link *link, int reg, u32 *val)
34bf2170 6078{
633273a3
TH
6079 if (ata_is_host_link(link)) {
6080 struct ata_port *ap = link->ap;
936fd732 6081
633273a3
TH
6082 if (sata_scr_valid(link))
6083 return ap->ops->scr_read(ap, reg, val);
6084 return -EOPNOTSUPP;
6085 }
6086
6087 return sata_pmp_scr_read(link, reg, val);
34bf2170
TH
6088}
6089
6090/**
6091 * sata_scr_write - write SCR register of the specified port
936fd732 6092 * @link: ATA link to write SCR for
34bf2170
TH
6093 * @reg: SCR to write
6094 * @val: value to write
6095 *
936fd732 6096 * Write @val to SCR register @reg of @link. This function is
633273a3
TH
6097 * guaranteed to succeed if @link is ap->link, the cable type of
6098 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
6099 *
6100 * LOCKING:
633273a3 6101 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6102 *
6103 * RETURNS:
6104 * 0 on success, negative errno on failure.
6105 */
936fd732 6106int sata_scr_write(struct ata_link *link, int reg, u32 val)
34bf2170 6107{
633273a3
TH
6108 if (ata_is_host_link(link)) {
6109 struct ata_port *ap = link->ap;
6110
6111 if (sata_scr_valid(link))
6112 return ap->ops->scr_write(ap, reg, val);
6113 return -EOPNOTSUPP;
6114 }
936fd732 6115
633273a3 6116 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
6117}
6118
6119/**
6120 * sata_scr_write_flush - write SCR register of the specified port and flush
936fd732 6121 * @link: ATA link to write SCR for
34bf2170
TH
6122 * @reg: SCR to write
6123 * @val: value to write
6124 *
6125 * This function is identical to sata_scr_write() except that this
6126 * function performs flush after writing to the register.
6127 *
6128 * LOCKING:
633273a3 6129 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6130 *
6131 * RETURNS:
6132 * 0 on success, negative errno on failure.
6133 */
936fd732 6134int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
34bf2170 6135{
633273a3
TH
6136 if (ata_is_host_link(link)) {
6137 struct ata_port *ap = link->ap;
6138 int rc;
da3dbb17 6139
633273a3
TH
6140 if (sata_scr_valid(link)) {
6141 rc = ap->ops->scr_write(ap, reg, val);
6142 if (rc == 0)
6143 rc = ap->ops->scr_read(ap, reg, &val);
6144 return rc;
6145 }
6146 return -EOPNOTSUPP;
34bf2170 6147 }
633273a3
TH
6148
6149 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
6150}
6151
6152/**
936fd732
TH
6153 * ata_link_online - test whether the given link is online
6154 * @link: ATA link to test
34bf2170 6155 *
936fd732
TH
6156 * Test whether @link is online. Note that this function returns
6157 * 0 if online status of @link cannot be obtained, so
6158 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
6159 *
6160 * LOCKING:
6161 * None.
6162 *
6163 * RETURNS:
6164 * 1 if the port online status is available and online.
6165 */
936fd732 6166int ata_link_online(struct ata_link *link)
34bf2170
TH
6167{
6168 u32 sstatus;
6169
936fd732
TH
6170 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6171 (sstatus & 0xf) == 0x3)
34bf2170
TH
6172 return 1;
6173 return 0;
6174}
6175
6176/**
936fd732
TH
6177 * ata_link_offline - test whether the given link is offline
6178 * @link: ATA link to test
34bf2170 6179 *
936fd732
TH
6180 * Test whether @link is offline. Note that this function
6181 * returns 0 if offline status of @link cannot be obtained, so
6182 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
6183 *
6184 * LOCKING:
6185 * None.
6186 *
6187 * RETURNS:
6188 * 1 if the port offline status is available and offline.
6189 */
936fd732 6190int ata_link_offline(struct ata_link *link)
34bf2170
TH
6191{
6192 u32 sstatus;
6193
936fd732
TH
6194 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6195 (sstatus & 0xf) != 0x3)
34bf2170
TH
6196 return 1;
6197 return 0;
6198}
0baab86b 6199
77b08fb5 6200int ata_flush_cache(struct ata_device *dev)
9b847548 6201{
977e6b9f 6202 unsigned int err_mask;
9b847548
JA
6203 u8 cmd;
6204
6205 if (!ata_try_flush_cache(dev))
6206 return 0;
6207
6fc49adb 6208 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
6209 cmd = ATA_CMD_FLUSH_EXT;
6210 else
6211 cmd = ATA_CMD_FLUSH;
6212
4f34337b
AC
6213 /* This is wrong. On a failed flush we get back the LBA of the lost
6214 sector and we should (assuming it wasn't aborted as unknown) issue
6215 a further flush command to continue the writeback until it
6216 does not error */
977e6b9f
TH
6217 err_mask = ata_do_simple_cmd(dev, cmd);
6218 if (err_mask) {
6219 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
6220 return -EIO;
6221 }
6222
6223 return 0;
9b847548
JA
6224}
6225
6ffa01d8 6226#ifdef CONFIG_PM
cca3974e
JG
6227static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
6228 unsigned int action, unsigned int ehi_flags,
6229 int wait)
500530f6
TH
6230{
6231 unsigned long flags;
6232 int i, rc;
6233
cca3974e
JG
6234 for (i = 0; i < host->n_ports; i++) {
6235 struct ata_port *ap = host->ports[i];
e3667ebf 6236 struct ata_link *link;
500530f6
TH
6237
6238 /* Previous resume operation might still be in
6239 * progress. Wait for PM_PENDING to clear.
6240 */
6241 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
6242 ata_port_wait_eh(ap);
6243 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6244 }
6245
6246 /* request PM ops to EH */
6247 spin_lock_irqsave(ap->lock, flags);
6248
6249 ap->pm_mesg = mesg;
6250 if (wait) {
6251 rc = 0;
6252 ap->pm_result = &rc;
6253 }
6254
6255 ap->pflags |= ATA_PFLAG_PM_PENDING;
e3667ebf
TH
6256 __ata_port_for_each_link(link, ap) {
6257 link->eh_info.action |= action;
6258 link->eh_info.flags |= ehi_flags;
6259 }
500530f6
TH
6260
6261 ata_port_schedule_eh(ap);
6262
6263 spin_unlock_irqrestore(ap->lock, flags);
6264
6265 /* wait and check result */
6266 if (wait) {
6267 ata_port_wait_eh(ap);
6268 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6269 if (rc)
6270 return rc;
6271 }
6272 }
6273
6274 return 0;
6275}
6276
6277/**
cca3974e
JG
6278 * ata_host_suspend - suspend host
6279 * @host: host to suspend
500530f6
TH
6280 * @mesg: PM message
6281 *
cca3974e 6282 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
6283 * function requests EH to perform PM operations and waits for EH
6284 * to finish.
6285 *
6286 * LOCKING:
6287 * Kernel thread context (may sleep).
6288 *
6289 * RETURNS:
6290 * 0 on success, -errno on failure.
6291 */
cca3974e 6292int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6 6293{
9666f400 6294 int rc;
500530f6 6295
cca3974e 6296 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
9666f400
TH
6297 if (rc == 0)
6298 host->dev->power.power_state = mesg;
500530f6
TH
6299 return rc;
6300}
6301
6302/**
cca3974e
JG
6303 * ata_host_resume - resume host
6304 * @host: host to resume
500530f6 6305 *
cca3974e 6306 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
6307 * function requests EH to perform PM operations and returns.
6308 * Note that all resume operations are performed parallely.
6309 *
6310 * LOCKING:
6311 * Kernel thread context (may sleep).
6312 */
cca3974e 6313void ata_host_resume(struct ata_host *host)
500530f6 6314{
cca3974e
JG
6315 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
6316 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
6317 host->dev->power.power_state = PMSG_ON;
500530f6 6318}
6ffa01d8 6319#endif
500530f6 6320
c893a3ae
RD
6321/**
6322 * ata_port_start - Set port up for dma.
6323 * @ap: Port to initialize
6324 *
6325 * Called just after data structures for each port are
6326 * initialized. Allocates space for PRD table.
6327 *
6328 * May be used as the port_start() entry in ata_port_operations.
6329 *
6330 * LOCKING:
6331 * Inherited from caller.
6332 */
f0d36efd 6333int ata_port_start(struct ata_port *ap)
1da177e4 6334{
2f1f610b 6335 struct device *dev = ap->dev;
6037d6bb 6336 int rc;
1da177e4 6337
f0d36efd
TH
6338 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6339 GFP_KERNEL);
1da177e4
LT
6340 if (!ap->prd)
6341 return -ENOMEM;
6342
6037d6bb 6343 rc = ata_pad_alloc(ap, dev);
f0d36efd 6344 if (rc)
6037d6bb 6345 return rc;
1da177e4 6346
f0d36efd
TH
6347 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
6348 (unsigned long long)ap->prd_dma);
1da177e4
LT
6349 return 0;
6350}
6351
3ef3b43d
TH
6352/**
6353 * ata_dev_init - Initialize an ata_device structure
6354 * @dev: Device structure to initialize
6355 *
6356 * Initialize @dev in preparation for probing.
6357 *
6358 * LOCKING:
6359 * Inherited from caller.
6360 */
6361void ata_dev_init(struct ata_device *dev)
6362{
9af5c9c9
TH
6363 struct ata_link *link = dev->link;
6364 struct ata_port *ap = link->ap;
72fa4b74
TH
6365 unsigned long flags;
6366
5a04bf4b 6367 /* SATA spd limit is bound to the first device */
9af5c9c9
TH
6368 link->sata_spd_limit = link->hw_sata_spd_limit;
6369 link->sata_spd = 0;
5a04bf4b 6370
72fa4b74
TH
6371 /* High bits of dev->flags are used to record warm plug
6372 * requests which occur asynchronously. Synchronize using
cca3974e 6373 * host lock.
72fa4b74 6374 */
ba6a1308 6375 spin_lock_irqsave(ap->lock, flags);
72fa4b74 6376 dev->flags &= ~ATA_DFLAG_INIT_MASK;
3dcc323f 6377 dev->horkage = 0;
ba6a1308 6378 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 6379
72fa4b74
TH
6380 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
6381 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
6382 dev->pio_mask = UINT_MAX;
6383 dev->mwdma_mask = UINT_MAX;
6384 dev->udma_mask = UINT_MAX;
6385}
6386
4fb37a25
TH
6387/**
6388 * ata_link_init - Initialize an ata_link structure
6389 * @ap: ATA port link is attached to
6390 * @link: Link structure to initialize
8989805d 6391 * @pmp: Port multiplier port number
4fb37a25
TH
6392 *
6393 * Initialize @link.
6394 *
6395 * LOCKING:
6396 * Kernel thread context (may sleep)
6397 */
fb7fd614 6398void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
4fb37a25
TH
6399{
6400 int i;
6401
6402 /* clear everything except for devices */
6403 memset(link, 0, offsetof(struct ata_link, device[0]));
6404
6405 link->ap = ap;
8989805d 6406 link->pmp = pmp;
4fb37a25
TH
6407 link->active_tag = ATA_TAG_POISON;
6408 link->hw_sata_spd_limit = UINT_MAX;
6409
6410 /* can't use iterator, ap isn't initialized yet */
6411 for (i = 0; i < ATA_MAX_DEVICES; i++) {
6412 struct ata_device *dev = &link->device[i];
6413
6414 dev->link = link;
6415 dev->devno = dev - link->device;
6416 ata_dev_init(dev);
6417 }
6418}
6419
6420/**
6421 * sata_link_init_spd - Initialize link->sata_spd_limit
6422 * @link: Link to configure sata_spd_limit for
6423 *
6424 * Initialize @link->[hw_]sata_spd_limit to the currently
6425 * configured value.
6426 *
6427 * LOCKING:
6428 * Kernel thread context (may sleep).
6429 *
6430 * RETURNS:
6431 * 0 on success, -errno on failure.
6432 */
fb7fd614 6433int sata_link_init_spd(struct ata_link *link)
4fb37a25
TH
6434{
6435 u32 scontrol, spd;
6436 int rc;
6437
6438 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
6439 if (rc)
6440 return rc;
6441
6442 spd = (scontrol >> 4) & 0xf;
6443 if (spd)
6444 link->hw_sata_spd_limit &= (1 << spd) - 1;
6445
6446 link->sata_spd_limit = link->hw_sata_spd_limit;
6447
6448 return 0;
6449}
6450
1da177e4 6451/**
f3187195
TH
6452 * ata_port_alloc - allocate and initialize basic ATA port resources
6453 * @host: ATA host this allocated port belongs to
1da177e4 6454 *
f3187195
TH
6455 * Allocate and initialize basic ATA port resources.
6456 *
6457 * RETURNS:
6458 * Allocate ATA port on success, NULL on failure.
0cba632b 6459 *
1da177e4 6460 * LOCKING:
f3187195 6461 * Inherited from calling layer (may sleep).
1da177e4 6462 */
f3187195 6463struct ata_port *ata_port_alloc(struct ata_host *host)
1da177e4 6464{
f3187195 6465 struct ata_port *ap;
1da177e4 6466
f3187195
TH
6467 DPRINTK("ENTER\n");
6468
6469 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6470 if (!ap)
6471 return NULL;
6472
f4d6d004 6473 ap->pflags |= ATA_PFLAG_INITIALIZING;
cca3974e 6474 ap->lock = &host->lock;
198e0fed 6475 ap->flags = ATA_FLAG_DISABLED;
f3187195 6476 ap->print_id = -1;
1da177e4 6477 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 6478 ap->host = host;
f3187195 6479 ap->dev = host->dev;
1da177e4 6480 ap->last_ctl = 0xFF;
bd5d825c
BP
6481
6482#if defined(ATA_VERBOSE_DEBUG)
6483 /* turn on all debugging levels */
6484 ap->msg_enable = 0x00FF;
6485#elif defined(ATA_DEBUG)
6486 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 6487#else
0dd4b21f 6488 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 6489#endif
1da177e4 6490
65f27f38
DH
6491 INIT_DELAYED_WORK(&ap->port_task, NULL);
6492 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6493 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 6494 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 6495 init_waitqueue_head(&ap->eh_wait_q);
5ddf24c5
TH
6496 init_timer_deferrable(&ap->fastdrain_timer);
6497 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
6498 ap->fastdrain_timer.data = (unsigned long)ap;
1da177e4 6499
838df628 6500 ap->cbl = ATA_CBL_NONE;
838df628 6501
8989805d 6502 ata_link_init(ap, &ap->link, 0);
1da177e4
LT
6503
6504#ifdef ATA_IRQ_TRAP
6505 ap->stats.unhandled_irq = 1;
6506 ap->stats.idle_irq = 1;
6507#endif
1da177e4 6508 return ap;
1da177e4
LT
6509}
6510
f0d36efd
TH
6511static void ata_host_release(struct device *gendev, void *res)
6512{
6513 struct ata_host *host = dev_get_drvdata(gendev);
6514 int i;
6515
6516 for (i = 0; i < host->n_ports; i++) {
6517 struct ata_port *ap = host->ports[i];
6518
ecef7253
TH
6519 if (!ap)
6520 continue;
6521
6522 if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop)
f0d36efd 6523 ap->ops->port_stop(ap);
f0d36efd
TH
6524 }
6525
ecef7253 6526 if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop)
f0d36efd 6527 host->ops->host_stop(host);
1aa56cca 6528
1aa506e4
TH
6529 for (i = 0; i < host->n_ports; i++) {
6530 struct ata_port *ap = host->ports[i];
6531
4911487a
TH
6532 if (!ap)
6533 continue;
6534
6535 if (ap->scsi_host)
1aa506e4
TH
6536 scsi_host_put(ap->scsi_host);
6537
633273a3 6538 kfree(ap->pmp_link);
4911487a 6539 kfree(ap);
1aa506e4
TH
6540 host->ports[i] = NULL;
6541 }
6542
1aa56cca 6543 dev_set_drvdata(gendev, NULL);
f0d36efd
TH
6544}
6545
f3187195
TH
6546/**
6547 * ata_host_alloc - allocate and init basic ATA host resources
6548 * @dev: generic device this host is associated with
6549 * @max_ports: maximum number of ATA ports associated with this host
6550 *
6551 * Allocate and initialize basic ATA host resources. LLD calls
6552 * this function to allocate a host, initializes it fully and
6553 * attaches it using ata_host_register().
6554 *
6555 * @max_ports ports are allocated and host->n_ports is
6556 * initialized to @max_ports. The caller is allowed to decrease
6557 * host->n_ports before calling ata_host_register(). The unused
6558 * ports will be automatically freed on registration.
6559 *
6560 * RETURNS:
6561 * Allocate ATA host on success, NULL on failure.
6562 *
6563 * LOCKING:
6564 * Inherited from calling layer (may sleep).
6565 */
6566struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6567{
6568 struct ata_host *host;
6569 size_t sz;
6570 int i;
6571
6572 DPRINTK("ENTER\n");
6573
6574 if (!devres_open_group(dev, NULL, GFP_KERNEL))
6575 return NULL;
6576
6577 /* alloc a container for our list of ATA ports (buses) */
6578 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6579 /* alloc a container for our list of ATA ports (buses) */
6580 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6581 if (!host)
6582 goto err_out;
6583
6584 devres_add(dev, host);
6585 dev_set_drvdata(dev, host);
6586
6587 spin_lock_init(&host->lock);
6588 host->dev = dev;
6589 host->n_ports = max_ports;
6590
6591 /* allocate ports bound to this host */
6592 for (i = 0; i < max_ports; i++) {
6593 struct ata_port *ap;
6594
6595 ap = ata_port_alloc(host);
6596 if (!ap)
6597 goto err_out;
6598
6599 ap->port_no = i;
6600 host->ports[i] = ap;
6601 }
6602
6603 devres_remove_group(dev, NULL);
6604 return host;
6605
6606 err_out:
6607 devres_release_group(dev, NULL);
6608 return NULL;
6609}
6610
f5cda257
TH
6611/**
6612 * ata_host_alloc_pinfo - alloc host and init with port_info array
6613 * @dev: generic device this host is associated with
6614 * @ppi: array of ATA port_info to initialize host with
6615 * @n_ports: number of ATA ports attached to this host
6616 *
6617 * Allocate ATA host and initialize with info from @ppi. If NULL
6618 * terminated, @ppi may contain fewer entries than @n_ports. The
6619 * last entry will be used for the remaining ports.
6620 *
6621 * RETURNS:
6622 * Allocate ATA host on success, NULL on failure.
6623 *
6624 * LOCKING:
6625 * Inherited from calling layer (may sleep).
6626 */
6627struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6628 const struct ata_port_info * const * ppi,
6629 int n_ports)
6630{
6631 const struct ata_port_info *pi;
6632 struct ata_host *host;
6633 int i, j;
6634
6635 host = ata_host_alloc(dev, n_ports);
6636 if (!host)
6637 return NULL;
6638
6639 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6640 struct ata_port *ap = host->ports[i];
6641
6642 if (ppi[j])
6643 pi = ppi[j++];
6644
6645 ap->pio_mask = pi->pio_mask;
6646 ap->mwdma_mask = pi->mwdma_mask;
6647 ap->udma_mask = pi->udma_mask;
6648 ap->flags |= pi->flags;
0c88758b 6649 ap->link.flags |= pi->link_flags;
f5cda257
TH
6650 ap->ops = pi->port_ops;
6651
6652 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6653 host->ops = pi->port_ops;
6654 if (!host->private_data && pi->private_data)
6655 host->private_data = pi->private_data;
6656 }
6657
6658 return host;
6659}
6660
ecef7253
TH
6661/**
6662 * ata_host_start - start and freeze ports of an ATA host
6663 * @host: ATA host to start ports for
6664 *
6665 * Start and then freeze ports of @host. Started status is
6666 * recorded in host->flags, so this function can be called
6667 * multiple times. Ports are guaranteed to get started only
f3187195
TH
6668 * once. If host->ops isn't initialized yet, its set to the
6669 * first non-dummy port ops.
ecef7253
TH
6670 *
6671 * LOCKING:
6672 * Inherited from calling layer (may sleep).
6673 *
6674 * RETURNS:
6675 * 0 if all ports are started successfully, -errno otherwise.
6676 */
6677int ata_host_start(struct ata_host *host)
6678{
6679 int i, rc;
6680
6681 if (host->flags & ATA_HOST_STARTED)
6682 return 0;
6683
6684 for (i = 0; i < host->n_ports; i++) {
6685 struct ata_port *ap = host->ports[i];
6686
f3187195
TH
6687 if (!host->ops && !ata_port_is_dummy(ap))
6688 host->ops = ap->ops;
6689
ecef7253
TH
6690 if (ap->ops->port_start) {
6691 rc = ap->ops->port_start(ap);
6692 if (rc) {
6693 ata_port_printk(ap, KERN_ERR, "failed to "
6694 "start port (errno=%d)\n", rc);
6695 goto err_out;
6696 }
6697 }
6698
6699 ata_eh_freeze_port(ap);
6700 }
6701
6702 host->flags |= ATA_HOST_STARTED;
6703 return 0;
6704
6705 err_out:
6706 while (--i >= 0) {
6707 struct ata_port *ap = host->ports[i];
6708
6709 if (ap->ops->port_stop)
6710 ap->ops->port_stop(ap);
6711 }
6712 return rc;
6713}
6714
b03732f0 6715/**
cca3974e
JG
6716 * ata_sas_host_init - Initialize a host struct
6717 * @host: host to initialize
6718 * @dev: device host is attached to
6719 * @flags: host flags
6720 * @ops: port_ops
b03732f0
BK
6721 *
6722 * LOCKING:
6723 * PCI/etc. bus probe sem.
6724 *
6725 */
f3187195 6726/* KILLME - the only user left is ipr */
cca3974e
JG
6727void ata_host_init(struct ata_host *host, struct device *dev,
6728 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 6729{
cca3974e
JG
6730 spin_lock_init(&host->lock);
6731 host->dev = dev;
6732 host->flags = flags;
6733 host->ops = ops;
b03732f0
BK
6734}
6735
f3187195
TH
6736/**
6737 * ata_host_register - register initialized ATA host
6738 * @host: ATA host to register
6739 * @sht: template for SCSI host
6740 *
6741 * Register initialized ATA host. @host is allocated using
6742 * ata_host_alloc() and fully initialized by LLD. This function
6743 * starts ports, registers @host with ATA and SCSI layers and
6744 * probe registered devices.
6745 *
6746 * LOCKING:
6747 * Inherited from calling layer (may sleep).
6748 *
6749 * RETURNS:
6750 * 0 on success, -errno otherwise.
6751 */
6752int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6753{
6754 int i, rc;
6755
6756 /* host must have been started */
6757 if (!(host->flags & ATA_HOST_STARTED)) {
6758 dev_printk(KERN_ERR, host->dev,
6759 "BUG: trying to register unstarted host\n");
6760 WARN_ON(1);
6761 return -EINVAL;
6762 }
6763
6764 /* Blow away unused ports. This happens when LLD can't
6765 * determine the exact number of ports to allocate at
6766 * allocation time.
6767 */
6768 for (i = host->n_ports; host->ports[i]; i++)
6769 kfree(host->ports[i]);
6770
6771 /* give ports names and add SCSI hosts */
6772 for (i = 0; i < host->n_ports; i++)
6773 host->ports[i]->print_id = ata_print_id++;
6774
6775 rc = ata_scsi_add_hosts(host, sht);
6776 if (rc)
6777 return rc;
6778
fafbae87
TH
6779 /* associate with ACPI nodes */
6780 ata_acpi_associate(host);
6781
f3187195
TH
6782 /* set cable, sata_spd_limit and report */
6783 for (i = 0; i < host->n_ports; i++) {
6784 struct ata_port *ap = host->ports[i];
f3187195
TH
6785 unsigned long xfer_mask;
6786
6787 /* set SATA cable type if still unset */
6788 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6789 ap->cbl = ATA_CBL_SATA;
6790
6791 /* init sata_spd_limit to the current value */
4fb37a25 6792 sata_link_init_spd(&ap->link);
f3187195 6793
cbcdd875 6794 /* print per-port info to dmesg */
f3187195
TH
6795 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6796 ap->udma_mask);
6797
abf6e8ed 6798 if (!ata_port_is_dummy(ap)) {
cbcdd875
TH
6799 ata_port_printk(ap, KERN_INFO,
6800 "%cATA max %s %s\n",
a16abc0b 6801 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
f3187195 6802 ata_mode_string(xfer_mask),
cbcdd875 6803 ap->link.eh_info.desc);
abf6e8ed
TH
6804 ata_ehi_clear_desc(&ap->link.eh_info);
6805 } else
f3187195
TH
6806 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
6807 }
6808
6809 /* perform each probe synchronously */
6810 DPRINTK("probe begin\n");
6811 for (i = 0; i < host->n_ports; i++) {
6812 struct ata_port *ap = host->ports[i];
6813 int rc;
6814
6815 /* probe */
6816 if (ap->ops->error_handler) {
9af5c9c9 6817 struct ata_eh_info *ehi = &ap->link.eh_info;
f3187195
TH
6818 unsigned long flags;
6819
6820 ata_port_probe(ap);
6821
6822 /* kick EH for boot probing */
6823 spin_lock_irqsave(ap->lock, flags);
6824
f58229f8
TH
6825 ehi->probe_mask =
6826 (1 << ata_link_max_devices(&ap->link)) - 1;
f3187195
TH
6827 ehi->action |= ATA_EH_SOFTRESET;
6828 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6829
f4d6d004 6830 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
f3187195
TH
6831 ap->pflags |= ATA_PFLAG_LOADING;
6832 ata_port_schedule_eh(ap);
6833
6834 spin_unlock_irqrestore(ap->lock, flags);
6835
6836 /* wait for EH to finish */
6837 ata_port_wait_eh(ap);
6838 } else {
6839 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6840 rc = ata_bus_probe(ap);
6841 DPRINTK("ata%u: bus probe end\n", ap->print_id);
6842
6843 if (rc) {
6844 /* FIXME: do something useful here?
6845 * Current libata behavior will
6846 * tear down everything when
6847 * the module is removed
6848 * or the h/w is unplugged.
6849 */
6850 }
6851 }
6852 }
6853
6854 /* probes are done, now scan each port's disk(s) */
6855 DPRINTK("host probe begin\n");
6856 for (i = 0; i < host->n_ports; i++) {
6857 struct ata_port *ap = host->ports[i];
6858
1ae46317 6859 ata_scsi_scan_host(ap, 1);
f3187195
TH
6860 }
6861
6862 return 0;
6863}
6864
f5cda257
TH
6865/**
6866 * ata_host_activate - start host, request IRQ and register it
6867 * @host: target ATA host
6868 * @irq: IRQ to request
6869 * @irq_handler: irq_handler used when requesting IRQ
6870 * @irq_flags: irq_flags used when requesting IRQ
6871 * @sht: scsi_host_template to use when registering the host
6872 *
6873 * After allocating an ATA host and initializing it, most libata
6874 * LLDs perform three steps to activate the host - start host,
6875 * request IRQ and register it. This helper takes necessasry
6876 * arguments and performs the three steps in one go.
6877 *
6878 * LOCKING:
6879 * Inherited from calling layer (may sleep).
6880 *
6881 * RETURNS:
6882 * 0 on success, -errno otherwise.
6883 */
6884int ata_host_activate(struct ata_host *host, int irq,
6885 irq_handler_t irq_handler, unsigned long irq_flags,
6886 struct scsi_host_template *sht)
6887{
cbcdd875 6888 int i, rc;
f5cda257
TH
6889
6890 rc = ata_host_start(host);
6891 if (rc)
6892 return rc;
6893
6894 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6895 dev_driver_string(host->dev), host);
6896 if (rc)
6897 return rc;
6898
cbcdd875
TH
6899 for (i = 0; i < host->n_ports; i++)
6900 ata_port_desc(host->ports[i], "irq %d", irq);
4031826b 6901
f5cda257
TH
6902 rc = ata_host_register(host, sht);
6903 /* if failed, just free the IRQ and leave ports alone */
6904 if (rc)
6905 devm_free_irq(host->dev, irq, host);
6906
6907 return rc;
6908}
6909
720ba126
TH
6910/**
6911 * ata_port_detach - Detach ATA port in prepration of device removal
6912 * @ap: ATA port to be detached
6913 *
6914 * Detach all ATA devices and the associated SCSI devices of @ap;
6915 * then, remove the associated SCSI host. @ap is guaranteed to
6916 * be quiescent on return from this function.
6917 *
6918 * LOCKING:
6919 * Kernel thread context (may sleep).
6920 */
6921void ata_port_detach(struct ata_port *ap)
6922{
6923 unsigned long flags;
41bda9c9 6924 struct ata_link *link;
f58229f8 6925 struct ata_device *dev;
720ba126
TH
6926
6927 if (!ap->ops->error_handler)
c3cf30a9 6928 goto skip_eh;
720ba126
TH
6929
6930 /* tell EH we're leaving & flush EH */
ba6a1308 6931 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 6932 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 6933 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6934
6935 ata_port_wait_eh(ap);
6936
6937 /* EH is now guaranteed to see UNLOADING, so no new device
6938 * will be attached. Disable all existing devices.
6939 */
ba6a1308 6940 spin_lock_irqsave(ap->lock, flags);
720ba126 6941
41bda9c9
TH
6942 ata_port_for_each_link(link, ap) {
6943 ata_link_for_each_dev(dev, link)
6944 ata_dev_disable(dev);
6945 }
720ba126 6946
ba6a1308 6947 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6948
6949 /* Final freeze & EH. All in-flight commands are aborted. EH
6950 * will be skipped and retrials will be terminated with bad
6951 * target.
6952 */
ba6a1308 6953 spin_lock_irqsave(ap->lock, flags);
720ba126 6954 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 6955 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6956
6957 ata_port_wait_eh(ap);
45a66c1c 6958 cancel_rearming_delayed_work(&ap->hotplug_task);
720ba126 6959
c3cf30a9 6960 skip_eh:
720ba126 6961 /* remove the associated SCSI host */
cca3974e 6962 scsi_remove_host(ap->scsi_host);
720ba126
TH
6963}
6964
0529c159
TH
6965/**
6966 * ata_host_detach - Detach all ports of an ATA host
6967 * @host: Host to detach
6968 *
6969 * Detach all ports of @host.
6970 *
6971 * LOCKING:
6972 * Kernel thread context (may sleep).
6973 */
6974void ata_host_detach(struct ata_host *host)
6975{
6976 int i;
6977
6978 for (i = 0; i < host->n_ports; i++)
6979 ata_port_detach(host->ports[i]);
6980}
6981
1da177e4
LT
6982/**
6983 * ata_std_ports - initialize ioaddr with standard port offsets.
6984 * @ioaddr: IO address structure to be initialized
0baab86b
EF
6985 *
6986 * Utility function which initializes data_addr, error_addr,
6987 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
6988 * device_addr, status_addr, and command_addr to standard offsets
6989 * relative to cmd_addr.
6990 *
6991 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 6992 */
0baab86b 6993
1da177e4
LT
6994void ata_std_ports(struct ata_ioports *ioaddr)
6995{
6996 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
6997 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
6998 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
6999 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
7000 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
7001 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
7002 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
7003 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
7004 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
7005 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
7006}
7007
0baab86b 7008
374b1873
JG
7009#ifdef CONFIG_PCI
7010
1da177e4
LT
7011/**
7012 * ata_pci_remove_one - PCI layer callback for device removal
7013 * @pdev: PCI device that was removed
7014 *
b878ca5d
TH
7015 * PCI layer indicates to libata via this hook that hot-unplug or
7016 * module unload event has occurred. Detach all ports. Resource
7017 * release is handled via devres.
1da177e4
LT
7018 *
7019 * LOCKING:
7020 * Inherited from PCI layer (may sleep).
7021 */
f0d36efd 7022void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4 7023{
2855568b 7024 struct device *dev = &pdev->dev;
cca3974e 7025 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 7026
b878ca5d 7027 ata_host_detach(host);
1da177e4
LT
7028}
7029
7030/* move to PCI subsystem */
057ace5e 7031int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
7032{
7033 unsigned long tmp = 0;
7034
7035 switch (bits->width) {
7036 case 1: {
7037 u8 tmp8 = 0;
7038 pci_read_config_byte(pdev, bits->reg, &tmp8);
7039 tmp = tmp8;
7040 break;
7041 }
7042 case 2: {
7043 u16 tmp16 = 0;
7044 pci_read_config_word(pdev, bits->reg, &tmp16);
7045 tmp = tmp16;
7046 break;
7047 }
7048 case 4: {
7049 u32 tmp32 = 0;
7050 pci_read_config_dword(pdev, bits->reg, &tmp32);
7051 tmp = tmp32;
7052 break;
7053 }
7054
7055 default:
7056 return -EINVAL;
7057 }
7058
7059 tmp &= bits->mask;
7060
7061 return (tmp == bits->val) ? 1 : 0;
7062}
9b847548 7063
6ffa01d8 7064#ifdef CONFIG_PM
3c5100c1 7065void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
7066{
7067 pci_save_state(pdev);
4c90d971 7068 pci_disable_device(pdev);
500530f6 7069
4c90d971 7070 if (mesg.event == PM_EVENT_SUSPEND)
500530f6 7071 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
7072}
7073
553c4aa6 7074int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 7075{
553c4aa6
TH
7076 int rc;
7077
9b847548
JA
7078 pci_set_power_state(pdev, PCI_D0);
7079 pci_restore_state(pdev);
553c4aa6 7080
b878ca5d 7081 rc = pcim_enable_device(pdev);
553c4aa6
TH
7082 if (rc) {
7083 dev_printk(KERN_ERR, &pdev->dev,
7084 "failed to enable device after resume (%d)\n", rc);
7085 return rc;
7086 }
7087
9b847548 7088 pci_set_master(pdev);
553c4aa6 7089 return 0;
500530f6
TH
7090}
7091
3c5100c1 7092int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 7093{
cca3974e 7094 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
7095 int rc = 0;
7096
cca3974e 7097 rc = ata_host_suspend(host, mesg);
500530f6
TH
7098 if (rc)
7099 return rc;
7100
3c5100c1 7101 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
7102
7103 return 0;
7104}
7105
7106int ata_pci_device_resume(struct pci_dev *pdev)
7107{
cca3974e 7108 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 7109 int rc;
500530f6 7110
553c4aa6
TH
7111 rc = ata_pci_device_do_resume(pdev);
7112 if (rc == 0)
7113 ata_host_resume(host);
7114 return rc;
9b847548 7115}
6ffa01d8
TH
7116#endif /* CONFIG_PM */
7117
1da177e4
LT
7118#endif /* CONFIG_PCI */
7119
7120
1da177e4
LT
7121static int __init ata_init(void)
7122{
a8601e5f 7123 ata_probe_timeout *= HZ;
1da177e4
LT
7124 ata_wq = create_workqueue("ata");
7125 if (!ata_wq)
7126 return -ENOMEM;
7127
453b07ac
TH
7128 ata_aux_wq = create_singlethread_workqueue("ata_aux");
7129 if (!ata_aux_wq) {
7130 destroy_workqueue(ata_wq);
7131 return -ENOMEM;
7132 }
7133
1da177e4
LT
7134 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
7135 return 0;
7136}
7137
7138static void __exit ata_exit(void)
7139{
7140 destroy_workqueue(ata_wq);
453b07ac 7141 destroy_workqueue(ata_aux_wq);
1da177e4
LT
7142}
7143
a4625085 7144subsys_initcall(ata_init);
1da177e4
LT
7145module_exit(ata_exit);
7146
67846b30 7147static unsigned long ratelimit_time;
34af946a 7148static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
7149
7150int ata_ratelimit(void)
7151{
7152 int rc;
7153 unsigned long flags;
7154
7155 spin_lock_irqsave(&ata_ratelimit_lock, flags);
7156
7157 if (time_after(jiffies, ratelimit_time)) {
7158 rc = 1;
7159 ratelimit_time = jiffies + (HZ/5);
7160 } else
7161 rc = 0;
7162
7163 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
7164
7165 return rc;
7166}
7167
c22daff4
TH
7168/**
7169 * ata_wait_register - wait until register value changes
7170 * @reg: IO-mapped register
7171 * @mask: Mask to apply to read register value
7172 * @val: Wait condition
7173 * @interval_msec: polling interval in milliseconds
7174 * @timeout_msec: timeout in milliseconds
7175 *
7176 * Waiting for some bits of register to change is a common
7177 * operation for ATA controllers. This function reads 32bit LE
7178 * IO-mapped register @reg and tests for the following condition.
7179 *
7180 * (*@reg & mask) != val
7181 *
7182 * If the condition is met, it returns; otherwise, the process is
7183 * repeated after @interval_msec until timeout.
7184 *
7185 * LOCKING:
7186 * Kernel thread context (may sleep)
7187 *
7188 * RETURNS:
7189 * The final register value.
7190 */
7191u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
7192 unsigned long interval_msec,
7193 unsigned long timeout_msec)
7194{
7195 unsigned long timeout;
7196 u32 tmp;
7197
7198 tmp = ioread32(reg);
7199
7200 /* Calculate timeout _after_ the first read to make sure
7201 * preceding writes reach the controller before starting to
7202 * eat away the timeout.
7203 */
7204 timeout = jiffies + (timeout_msec * HZ) / 1000;
7205
7206 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
7207 msleep(interval_msec);
7208 tmp = ioread32(reg);
7209 }
7210
7211 return tmp;
7212}
7213
dd5b06c4
TH
7214/*
7215 * Dummy port_ops
7216 */
7217static void ata_dummy_noret(struct ata_port *ap) { }
7218static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
7219static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
7220
7221static u8 ata_dummy_check_status(struct ata_port *ap)
7222{
7223 return ATA_DRDY;
7224}
7225
7226static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7227{
7228 return AC_ERR_SYSTEM;
7229}
7230
7231const struct ata_port_operations ata_dummy_port_ops = {
dd5b06c4
TH
7232 .check_status = ata_dummy_check_status,
7233 .check_altstatus = ata_dummy_check_status,
7234 .dev_select = ata_noop_dev_select,
7235 .qc_prep = ata_noop_qc_prep,
7236 .qc_issue = ata_dummy_qc_issue,
7237 .freeze = ata_dummy_noret,
7238 .thaw = ata_dummy_noret,
7239 .error_handler = ata_dummy_noret,
7240 .post_internal_cmd = ata_dummy_qc_noret,
7241 .irq_clear = ata_dummy_noret,
7242 .port_start = ata_dummy_ret0,
7243 .port_stop = ata_dummy_noret,
7244};
7245
21b0ad4f
TH
7246const struct ata_port_info ata_dummy_port_info = {
7247 .port_ops = &ata_dummy_port_ops,
7248};
7249
1da177e4
LT
7250/*
7251 * libata is essentially a library of internal helper functions for
7252 * low-level ATA host controller drivers. As such, the API/ABI is
7253 * likely to change as new drivers are added and updated.
7254 * Do not depend on ABI/API stability.
7255 */
7256
e9c83914
TH
7257EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7258EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7259EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 7260EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
21b0ad4f 7261EXPORT_SYMBOL_GPL(ata_dummy_port_info);
1da177e4
LT
7262EXPORT_SYMBOL_GPL(ata_std_bios_param);
7263EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 7264EXPORT_SYMBOL_GPL(ata_host_init);
f3187195 7265EXPORT_SYMBOL_GPL(ata_host_alloc);
f5cda257 7266EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
ecef7253 7267EXPORT_SYMBOL_GPL(ata_host_start);
f3187195 7268EXPORT_SYMBOL_GPL(ata_host_register);
f5cda257 7269EXPORT_SYMBOL_GPL(ata_host_activate);
0529c159 7270EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4
LT
7271EXPORT_SYMBOL_GPL(ata_sg_init);
7272EXPORT_SYMBOL_GPL(ata_sg_init_one);
9a1004d0 7273EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 7274EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 7275EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 7276EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
7277EXPORT_SYMBOL_GPL(ata_tf_load);
7278EXPORT_SYMBOL_GPL(ata_tf_read);
7279EXPORT_SYMBOL_GPL(ata_noop_dev_select);
7280EXPORT_SYMBOL_GPL(ata_std_dev_select);
43727fbc 7281EXPORT_SYMBOL_GPL(sata_print_link_status);
1da177e4
LT
7282EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7283EXPORT_SYMBOL_GPL(ata_tf_from_fis);
7284EXPORT_SYMBOL_GPL(ata_check_status);
7285EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
7286EXPORT_SYMBOL_GPL(ata_exec_command);
7287EXPORT_SYMBOL_GPL(ata_port_start);
d92e74d3 7288EXPORT_SYMBOL_GPL(ata_sff_port_start);
1da177e4 7289EXPORT_SYMBOL_GPL(ata_interrupt);
04351821 7290EXPORT_SYMBOL_GPL(ata_do_set_mode);
0d5ff566
TH
7291EXPORT_SYMBOL_GPL(ata_data_xfer);
7292EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
31cc23b3 7293EXPORT_SYMBOL_GPL(ata_std_qc_defer);
1da177e4 7294EXPORT_SYMBOL_GPL(ata_qc_prep);
d26fc955 7295EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
e46834cd 7296EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
7297EXPORT_SYMBOL_GPL(ata_bmdma_setup);
7298EXPORT_SYMBOL_GPL(ata_bmdma_start);
7299EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
7300EXPORT_SYMBOL_GPL(ata_bmdma_status);
7301EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
7302EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
7303EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
7304EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
7305EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
7306EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 7307EXPORT_SYMBOL_GPL(ata_port_probe);
10305f0f 7308EXPORT_SYMBOL_GPL(ata_dev_disable);
3c567b7d 7309EXPORT_SYMBOL_GPL(sata_set_spd);
936fd732
TH
7310EXPORT_SYMBOL_GPL(sata_link_debounce);
7311EXPORT_SYMBOL_GPL(sata_link_resume);
1da177e4
LT
7312EXPORT_SYMBOL_GPL(sata_phy_reset);
7313EXPORT_SYMBOL_GPL(__sata_phy_reset);
7314EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 7315EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 7316EXPORT_SYMBOL_GPL(ata_std_softreset);
cc0680a5 7317EXPORT_SYMBOL_GPL(sata_link_hardreset);
c2bd5804
TH
7318EXPORT_SYMBOL_GPL(sata_std_hardreset);
7319EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
7320EXPORT_SYMBOL_GPL(ata_dev_classify);
7321EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 7322EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 7323EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 7324EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 7325EXPORT_SYMBOL_GPL(ata_busy_sleep);
d4b2bab4 7326EXPORT_SYMBOL_GPL(ata_wait_ready);
86e45b6b 7327EXPORT_SYMBOL_GPL(ata_port_queue_task);
1da177e4
LT
7328EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
7329EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 7330EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 7331EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 7332EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4 7333EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
7334EXPORT_SYMBOL_GPL(sata_scr_valid);
7335EXPORT_SYMBOL_GPL(sata_scr_read);
7336EXPORT_SYMBOL_GPL(sata_scr_write);
7337EXPORT_SYMBOL_GPL(sata_scr_write_flush);
936fd732
TH
7338EXPORT_SYMBOL_GPL(ata_link_online);
7339EXPORT_SYMBOL_GPL(ata_link_offline);
6ffa01d8 7340#ifdef CONFIG_PM
cca3974e
JG
7341EXPORT_SYMBOL_GPL(ata_host_suspend);
7342EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 7343#endif /* CONFIG_PM */
6a62a04d
TH
7344EXPORT_SYMBOL_GPL(ata_id_string);
7345EXPORT_SYMBOL_GPL(ata_id_c_string);
10305f0f 7346EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
1da177e4
LT
7347EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7348
1bc4ccff 7349EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
452503f9
AC
7350EXPORT_SYMBOL_GPL(ata_timing_compute);
7351EXPORT_SYMBOL_GPL(ata_timing_merge);
7352
1da177e4
LT
7353#ifdef CONFIG_PCI
7354EXPORT_SYMBOL_GPL(pci_test_config_bits);
d583bc18 7355EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
1626aeb8 7356EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
d583bc18 7357EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
1da177e4
LT
7358EXPORT_SYMBOL_GPL(ata_pci_init_one);
7359EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6ffa01d8 7360#ifdef CONFIG_PM
500530f6
TH
7361EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7362EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
7363EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7364EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 7365#endif /* CONFIG_PM */
67951ade
AC
7366EXPORT_SYMBOL_GPL(ata_pci_default_filter);
7367EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 7368#endif /* CONFIG_PCI */
9b847548 7369
31f88384 7370EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch);
3af9a77a
TH
7371EXPORT_SYMBOL_GPL(sata_pmp_std_prereset);
7372EXPORT_SYMBOL_GPL(sata_pmp_std_hardreset);
7373EXPORT_SYMBOL_GPL(sata_pmp_std_postreset);
7374EXPORT_SYMBOL_GPL(sata_pmp_do_eh);
7375
b64bbc39
TH
7376EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7377EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7378EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
cbcdd875
TH
7379EXPORT_SYMBOL_GPL(ata_port_desc);
7380#ifdef CONFIG_PCI
7381EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7382#endif /* CONFIG_PCI */
ece1d636 7383EXPORT_SYMBOL_GPL(ata_eng_timeout);
7b70fc03 7384EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
dbd82616 7385EXPORT_SYMBOL_GPL(ata_link_abort);
7b70fc03 7386EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499 7387EXPORT_SYMBOL_GPL(ata_port_freeze);
7d77b247 7388EXPORT_SYMBOL_GPL(sata_async_notification);
e3180499
TH
7389EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7390EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
7391EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7392EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 7393EXPORT_SYMBOL_GPL(ata_do_eh);
83625006 7394EXPORT_SYMBOL_GPL(ata_irq_on);
a619f981 7395EXPORT_SYMBOL_GPL(ata_dev_try_classify);
be0d18df
AC
7396
7397EXPORT_SYMBOL_GPL(ata_cable_40wire);
7398EXPORT_SYMBOL_GPL(ata_cable_80wire);
7399EXPORT_SYMBOL_GPL(ata_cable_unknown);
7400EXPORT_SYMBOL_GPL(ata_cable_sata);