]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/ata/libata-core.c
Merge master.kernel.org:/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6
[net-next-2.6.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
1da177e4
LT
33 */
34
1da177e4
LT
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/mm.h>
41#include <linux/highmem.h>
42#include <linux/spinlock.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/timer.h>
46#include <linux/interrupt.h>
47#include <linux/completion.h>
48#include <linux/suspend.h>
49#include <linux/workqueue.h>
67846b30 50#include <linux/jiffies.h>
378f058c 51#include <linux/scatterlist.h>
1da177e4 52#include <scsi/scsi.h>
193515d5 53#include <scsi/scsi_cmnd.h>
1da177e4
LT
54#include <scsi/scsi_host.h>
55#include <linux/libata.h>
56#include <asm/io.h>
57#include <asm/semaphore.h>
58#include <asm/byteorder.h>
59
60#include "libata.h"
61
8bc3fc47 62#define DRV_VERSION "2.21" /* must be exactly four chars */
fda0efc5
JG
63
64
d7bb4cc7 65/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
66const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
67const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
68const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 69
3373efd8
TH
70static unsigned int ata_dev_init_params(struct ata_device *dev,
71 u16 heads, u16 sectors);
72static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
73static void ata_dev_xfermask(struct ata_device *dev);
1da177e4 74
f3187195 75unsigned int ata_print_id = 1;
1da177e4
LT
76static struct workqueue_struct *ata_wq;
77
453b07ac
TH
78struct workqueue_struct *ata_aux_wq;
79
418dc1f5 80int atapi_enabled = 1;
1623c81e
JG
81module_param(atapi_enabled, int, 0444);
82MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
83
95de719a
AL
84int atapi_dmadir = 0;
85module_param(atapi_dmadir, int, 0444);
86MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
87
c3c013a2
JG
88int libata_fua = 0;
89module_param_named(fua, libata_fua, int, 0444);
90MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
91
1e999736
AC
92static int ata_ignore_hpa = 0;
93module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
94MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
95
a8601e5f
AM
96static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
97module_param(ata_probe_timeout, int, 0444);
98MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
99
d7d0dad6
JG
100int libata_noacpi = 1;
101module_param_named(noacpi, libata_noacpi, int, 0444);
11ef697b
KCA
102MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
103
1da177e4
LT
104MODULE_AUTHOR("Jeff Garzik");
105MODULE_DESCRIPTION("Library module for ATA devices");
106MODULE_LICENSE("GPL");
107MODULE_VERSION(DRV_VERSION);
108
0baab86b 109
1da177e4
LT
110/**
111 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
112 * @tf: Taskfile to convert
113 * @fis: Buffer into which data will output
114 * @pmp: Port multiplier port
115 *
116 * Converts a standard ATA taskfile to a Serial ATA
117 * FIS structure (Register - Host to Device).
118 *
119 * LOCKING:
120 * Inherited from caller.
121 */
122
057ace5e 123void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
1da177e4
LT
124{
125 fis[0] = 0x27; /* Register - Host to Device FIS */
126 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
127 bit 7 indicates Command FIS */
128 fis[2] = tf->command;
129 fis[3] = tf->feature;
130
131 fis[4] = tf->lbal;
132 fis[5] = tf->lbam;
133 fis[6] = tf->lbah;
134 fis[7] = tf->device;
135
136 fis[8] = tf->hob_lbal;
137 fis[9] = tf->hob_lbam;
138 fis[10] = tf->hob_lbah;
139 fis[11] = tf->hob_feature;
140
141 fis[12] = tf->nsect;
142 fis[13] = tf->hob_nsect;
143 fis[14] = 0;
144 fis[15] = tf->ctl;
145
146 fis[16] = 0;
147 fis[17] = 0;
148 fis[18] = 0;
149 fis[19] = 0;
150}
151
152/**
153 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
154 * @fis: Buffer from which data will be input
155 * @tf: Taskfile to output
156 *
e12a1be6 157 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
158 *
159 * LOCKING:
160 * Inherited from caller.
161 */
162
057ace5e 163void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
164{
165 tf->command = fis[2]; /* status */
166 tf->feature = fis[3]; /* error */
167
168 tf->lbal = fis[4];
169 tf->lbam = fis[5];
170 tf->lbah = fis[6];
171 tf->device = fis[7];
172
173 tf->hob_lbal = fis[8];
174 tf->hob_lbam = fis[9];
175 tf->hob_lbah = fis[10];
176
177 tf->nsect = fis[12];
178 tf->hob_nsect = fis[13];
179}
180
8cbd6df1
AL
181static const u8 ata_rw_cmds[] = {
182 /* pio multi */
183 ATA_CMD_READ_MULTI,
184 ATA_CMD_WRITE_MULTI,
185 ATA_CMD_READ_MULTI_EXT,
186 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
187 0,
188 0,
189 0,
190 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
191 /* pio */
192 ATA_CMD_PIO_READ,
193 ATA_CMD_PIO_WRITE,
194 ATA_CMD_PIO_READ_EXT,
195 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
196 0,
197 0,
198 0,
199 0,
8cbd6df1
AL
200 /* dma */
201 ATA_CMD_READ,
202 ATA_CMD_WRITE,
203 ATA_CMD_READ_EXT,
9a3dccc4
TH
204 ATA_CMD_WRITE_EXT,
205 0,
206 0,
207 0,
208 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 209};
1da177e4
LT
210
211/**
8cbd6df1 212 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
213 * @tf: command to examine and configure
214 * @dev: device tf belongs to
1da177e4 215 *
2e9edbf8 216 * Examine the device configuration and tf->flags to calculate
8cbd6df1 217 * the proper read/write commands and protocol to use.
1da177e4
LT
218 *
219 * LOCKING:
220 * caller.
221 */
bd056d7e 222static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 223{
9a3dccc4 224 u8 cmd;
1da177e4 225
9a3dccc4 226 int index, fua, lba48, write;
2e9edbf8 227
9a3dccc4 228 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
229 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
230 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 231
8cbd6df1
AL
232 if (dev->flags & ATA_DFLAG_PIO) {
233 tf->protocol = ATA_PROT_PIO;
9a3dccc4 234 index = dev->multi_count ? 0 : 8;
bd056d7e 235 } else if (lba48 && (dev->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
236 /* Unable to use DMA due to host limitation */
237 tf->protocol = ATA_PROT_PIO;
0565c26d 238 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
239 } else {
240 tf->protocol = ATA_PROT_DMA;
9a3dccc4 241 index = 16;
8cbd6df1 242 }
1da177e4 243
9a3dccc4
TH
244 cmd = ata_rw_cmds[index + fua + lba48 + write];
245 if (cmd) {
246 tf->command = cmd;
247 return 0;
248 }
249 return -1;
1da177e4
LT
250}
251
35b649fe
TH
252/**
253 * ata_tf_read_block - Read block address from ATA taskfile
254 * @tf: ATA taskfile of interest
255 * @dev: ATA device @tf belongs to
256 *
257 * LOCKING:
258 * None.
259 *
260 * Read block address from @tf. This function can handle all
261 * three address formats - LBA, LBA48 and CHS. tf->protocol and
262 * flags select the address format to use.
263 *
264 * RETURNS:
265 * Block address read from @tf.
266 */
267u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
268{
269 u64 block = 0;
270
271 if (tf->flags & ATA_TFLAG_LBA) {
272 if (tf->flags & ATA_TFLAG_LBA48) {
273 block |= (u64)tf->hob_lbah << 40;
274 block |= (u64)tf->hob_lbam << 32;
275 block |= tf->hob_lbal << 24;
276 } else
277 block |= (tf->device & 0xf) << 24;
278
279 block |= tf->lbah << 16;
280 block |= tf->lbam << 8;
281 block |= tf->lbal;
282 } else {
283 u32 cyl, head, sect;
284
285 cyl = tf->lbam | (tf->lbah << 8);
286 head = tf->device & 0xf;
287 sect = tf->lbal;
288
289 block = (cyl * dev->heads + head) * dev->sectors + sect;
290 }
291
292 return block;
293}
294
bd056d7e
TH
295/**
296 * ata_build_rw_tf - Build ATA taskfile for given read/write request
297 * @tf: Target ATA taskfile
298 * @dev: ATA device @tf belongs to
299 * @block: Block address
300 * @n_block: Number of blocks
301 * @tf_flags: RW/FUA etc...
302 * @tag: tag
303 *
304 * LOCKING:
305 * None.
306 *
307 * Build ATA taskfile @tf for read/write request described by
308 * @block, @n_block, @tf_flags and @tag on @dev.
309 *
310 * RETURNS:
311 *
312 * 0 on success, -ERANGE if the request is too large for @dev,
313 * -EINVAL if the request is invalid.
314 */
315int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
316 u64 block, u32 n_block, unsigned int tf_flags,
317 unsigned int tag)
318{
319 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
320 tf->flags |= tf_flags;
321
6d1245bf 322 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
323 /* yay, NCQ */
324 if (!lba_48_ok(block, n_block))
325 return -ERANGE;
326
327 tf->protocol = ATA_PROT_NCQ;
328 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
329
330 if (tf->flags & ATA_TFLAG_WRITE)
331 tf->command = ATA_CMD_FPDMA_WRITE;
332 else
333 tf->command = ATA_CMD_FPDMA_READ;
334
335 tf->nsect = tag << 3;
336 tf->hob_feature = (n_block >> 8) & 0xff;
337 tf->feature = n_block & 0xff;
338
339 tf->hob_lbah = (block >> 40) & 0xff;
340 tf->hob_lbam = (block >> 32) & 0xff;
341 tf->hob_lbal = (block >> 24) & 0xff;
342 tf->lbah = (block >> 16) & 0xff;
343 tf->lbam = (block >> 8) & 0xff;
344 tf->lbal = block & 0xff;
345
346 tf->device = 1 << 6;
347 if (tf->flags & ATA_TFLAG_FUA)
348 tf->device |= 1 << 7;
349 } else if (dev->flags & ATA_DFLAG_LBA) {
350 tf->flags |= ATA_TFLAG_LBA;
351
352 if (lba_28_ok(block, n_block)) {
353 /* use LBA28 */
354 tf->device |= (block >> 24) & 0xf;
355 } else if (lba_48_ok(block, n_block)) {
356 if (!(dev->flags & ATA_DFLAG_LBA48))
357 return -ERANGE;
358
359 /* use LBA48 */
360 tf->flags |= ATA_TFLAG_LBA48;
361
362 tf->hob_nsect = (n_block >> 8) & 0xff;
363
364 tf->hob_lbah = (block >> 40) & 0xff;
365 tf->hob_lbam = (block >> 32) & 0xff;
366 tf->hob_lbal = (block >> 24) & 0xff;
367 } else
368 /* request too large even for LBA48 */
369 return -ERANGE;
370
371 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
372 return -EINVAL;
373
374 tf->nsect = n_block & 0xff;
375
376 tf->lbah = (block >> 16) & 0xff;
377 tf->lbam = (block >> 8) & 0xff;
378 tf->lbal = block & 0xff;
379
380 tf->device |= ATA_LBA;
381 } else {
382 /* CHS */
383 u32 sect, head, cyl, track;
384
385 /* The request -may- be too large for CHS addressing. */
386 if (!lba_28_ok(block, n_block))
387 return -ERANGE;
388
389 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
390 return -EINVAL;
391
392 /* Convert LBA to CHS */
393 track = (u32)block / dev->sectors;
394 cyl = track / dev->heads;
395 head = track % dev->heads;
396 sect = (u32)block % dev->sectors + 1;
397
398 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
399 (u32)block, track, cyl, head, sect);
400
401 /* Check whether the converted CHS can fit.
402 Cylinder: 0-65535
403 Head: 0-15
404 Sector: 1-255*/
405 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
406 return -ERANGE;
407
408 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
409 tf->lbal = sect;
410 tf->lbam = cyl;
411 tf->lbah = cyl >> 8;
412 tf->device |= head;
413 }
414
415 return 0;
416}
417
cb95d562
TH
418/**
419 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
420 * @pio_mask: pio_mask
421 * @mwdma_mask: mwdma_mask
422 * @udma_mask: udma_mask
423 *
424 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
425 * unsigned int xfer_mask.
426 *
427 * LOCKING:
428 * None.
429 *
430 * RETURNS:
431 * Packed xfer_mask.
432 */
433static unsigned int ata_pack_xfermask(unsigned int pio_mask,
434 unsigned int mwdma_mask,
435 unsigned int udma_mask)
436{
437 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
438 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
439 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
440}
441
c0489e4e
TH
442/**
443 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
444 * @xfer_mask: xfer_mask to unpack
445 * @pio_mask: resulting pio_mask
446 * @mwdma_mask: resulting mwdma_mask
447 * @udma_mask: resulting udma_mask
448 *
449 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
450 * Any NULL distination masks will be ignored.
451 */
452static void ata_unpack_xfermask(unsigned int xfer_mask,
453 unsigned int *pio_mask,
454 unsigned int *mwdma_mask,
455 unsigned int *udma_mask)
456{
457 if (pio_mask)
458 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
459 if (mwdma_mask)
460 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
461 if (udma_mask)
462 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
463}
464
cb95d562 465static const struct ata_xfer_ent {
be9a50c8 466 int shift, bits;
cb95d562
TH
467 u8 base;
468} ata_xfer_tbl[] = {
469 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
470 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
471 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
472 { -1, },
473};
474
475/**
476 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
477 * @xfer_mask: xfer_mask of interest
478 *
479 * Return matching XFER_* value for @xfer_mask. Only the highest
480 * bit of @xfer_mask is considered.
481 *
482 * LOCKING:
483 * None.
484 *
485 * RETURNS:
486 * Matching XFER_* value, 0 if no match found.
487 */
488static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
489{
490 int highbit = fls(xfer_mask) - 1;
491 const struct ata_xfer_ent *ent;
492
493 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
494 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
495 return ent->base + highbit - ent->shift;
496 return 0;
497}
498
499/**
500 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
501 * @xfer_mode: XFER_* of interest
502 *
503 * Return matching xfer_mask for @xfer_mode.
504 *
505 * LOCKING:
506 * None.
507 *
508 * RETURNS:
509 * Matching xfer_mask, 0 if no match found.
510 */
511static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
512{
513 const struct ata_xfer_ent *ent;
514
515 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
516 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
517 return 1 << (ent->shift + xfer_mode - ent->base);
518 return 0;
519}
520
521/**
522 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
523 * @xfer_mode: XFER_* of interest
524 *
525 * Return matching xfer_shift for @xfer_mode.
526 *
527 * LOCKING:
528 * None.
529 *
530 * RETURNS:
531 * Matching xfer_shift, -1 if no match found.
532 */
533static int ata_xfer_mode2shift(unsigned int xfer_mode)
534{
535 const struct ata_xfer_ent *ent;
536
537 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
538 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
539 return ent->shift;
540 return -1;
541}
542
1da177e4 543/**
1da7b0d0
TH
544 * ata_mode_string - convert xfer_mask to string
545 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
546 *
547 * Determine string which represents the highest speed
1da7b0d0 548 * (highest bit in @modemask).
1da177e4
LT
549 *
550 * LOCKING:
551 * None.
552 *
553 * RETURNS:
554 * Constant C string representing highest speed listed in
1da7b0d0 555 * @mode_mask, or the constant C string "<n/a>".
1da177e4 556 */
1da7b0d0 557static const char *ata_mode_string(unsigned int xfer_mask)
1da177e4 558{
75f554bc
TH
559 static const char * const xfer_mode_str[] = {
560 "PIO0",
561 "PIO1",
562 "PIO2",
563 "PIO3",
564 "PIO4",
b352e57d
AC
565 "PIO5",
566 "PIO6",
75f554bc
TH
567 "MWDMA0",
568 "MWDMA1",
569 "MWDMA2",
b352e57d
AC
570 "MWDMA3",
571 "MWDMA4",
75f554bc
TH
572 "UDMA/16",
573 "UDMA/25",
574 "UDMA/33",
575 "UDMA/44",
576 "UDMA/66",
577 "UDMA/100",
578 "UDMA/133",
579 "UDMA7",
580 };
1da7b0d0 581 int highbit;
1da177e4 582
1da7b0d0
TH
583 highbit = fls(xfer_mask) - 1;
584 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
585 return xfer_mode_str[highbit];
1da177e4 586 return "<n/a>";
1da177e4
LT
587}
588
4c360c81
TH
589static const char *sata_spd_string(unsigned int spd)
590{
591 static const char * const spd_str[] = {
592 "1.5 Gbps",
593 "3.0 Gbps",
594 };
595
596 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
597 return "<unknown>";
598 return spd_str[spd - 1];
599}
600
3373efd8 601void ata_dev_disable(struct ata_device *dev)
0b8efb0a 602{
0dd4b21f 603 if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
f15a1daf 604 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
4ae72a1e
TH
605 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
606 ATA_DNXFER_QUIET);
0b8efb0a
TH
607 dev->class++;
608 }
609}
610
1da177e4 611/**
0d5ff566 612 * ata_devchk - PATA device presence detection
1da177e4
LT
613 * @ap: ATA channel to examine
614 * @device: Device to examine (starting at zero)
615 *
616 * This technique was originally described in
617 * Hale Landis's ATADRVR (www.ata-atapi.com), and
618 * later found its way into the ATA/ATAPI spec.
619 *
620 * Write a pattern to the ATA shadow registers,
621 * and if a device is present, it will respond by
622 * correctly storing and echoing back the
623 * ATA shadow register contents.
624 *
625 * LOCKING:
626 * caller.
627 */
628
0d5ff566 629static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1da177e4
LT
630{
631 struct ata_ioports *ioaddr = &ap->ioaddr;
632 u8 nsect, lbal;
633
634 ap->ops->dev_select(ap, device);
635
0d5ff566
TH
636 iowrite8(0x55, ioaddr->nsect_addr);
637 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 638
0d5ff566
TH
639 iowrite8(0xaa, ioaddr->nsect_addr);
640 iowrite8(0x55, ioaddr->lbal_addr);
1da177e4 641
0d5ff566
TH
642 iowrite8(0x55, ioaddr->nsect_addr);
643 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 644
0d5ff566
TH
645 nsect = ioread8(ioaddr->nsect_addr);
646 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
647
648 if ((nsect == 0x55) && (lbal == 0xaa))
649 return 1; /* we found a device */
650
651 return 0; /* nothing found */
652}
653
1da177e4
LT
654/**
655 * ata_dev_classify - determine device type based on ATA-spec signature
656 * @tf: ATA taskfile register set for device to be identified
657 *
658 * Determine from taskfile register contents whether a device is
659 * ATA or ATAPI, as per "Signature and persistence" section
660 * of ATA/PI spec (volume 1, sect 5.14).
661 *
662 * LOCKING:
663 * None.
664 *
665 * RETURNS:
666 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
667 * the event of failure.
668 */
669
057ace5e 670unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
671{
672 /* Apple's open source Darwin code hints that some devices only
673 * put a proper signature into the LBA mid/high registers,
674 * So, we only check those. It's sufficient for uniqueness.
675 */
676
677 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
678 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
679 DPRINTK("found ATA device by sig\n");
680 return ATA_DEV_ATA;
681 }
682
683 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
684 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
685 DPRINTK("found ATAPI device by sig\n");
686 return ATA_DEV_ATAPI;
687 }
688
689 DPRINTK("unknown device\n");
690 return ATA_DEV_UNKNOWN;
691}
692
693/**
694 * ata_dev_try_classify - Parse returned ATA device signature
695 * @ap: ATA channel to examine
696 * @device: Device to examine (starting at zero)
b4dc7623 697 * @r_err: Value of error register on completion
1da177e4
LT
698 *
699 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
700 * an ATA/ATAPI-defined set of values is placed in the ATA
701 * shadow registers, indicating the results of device detection
702 * and diagnostics.
703 *
704 * Select the ATA device, and read the values from the ATA shadow
705 * registers. Then parse according to the Error register value,
706 * and the spec-defined values examined by ata_dev_classify().
707 *
708 * LOCKING:
709 * caller.
b4dc7623
TH
710 *
711 * RETURNS:
712 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4
LT
713 */
714
a619f981 715unsigned int
b4dc7623 716ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
1da177e4 717{
1da177e4
LT
718 struct ata_taskfile tf;
719 unsigned int class;
720 u8 err;
721
722 ap->ops->dev_select(ap, device);
723
724 memset(&tf, 0, sizeof(tf));
725
1da177e4 726 ap->ops->tf_read(ap, &tf);
0169e284 727 err = tf.feature;
b4dc7623
TH
728 if (r_err)
729 *r_err = err;
1da177e4 730
93590859
AC
731 /* see if device passed diags: if master then continue and warn later */
732 if (err == 0 && device == 0)
733 /* diagnostic fail : do nothing _YET_ */
734 ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
735 else if (err == 1)
1da177e4
LT
736 /* do nothing */ ;
737 else if ((device == 0) && (err == 0x81))
738 /* do nothing */ ;
739 else
b4dc7623 740 return ATA_DEV_NONE;
1da177e4 741
b4dc7623 742 /* determine if device is ATA or ATAPI */
1da177e4 743 class = ata_dev_classify(&tf);
b4dc7623 744
1da177e4 745 if (class == ATA_DEV_UNKNOWN)
b4dc7623 746 return ATA_DEV_NONE;
1da177e4 747 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
b4dc7623
TH
748 return ATA_DEV_NONE;
749 return class;
1da177e4
LT
750}
751
752/**
6a62a04d 753 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
754 * @id: IDENTIFY DEVICE results we will examine
755 * @s: string into which data is output
756 * @ofs: offset into identify device page
757 * @len: length of string to return. must be an even number.
758 *
759 * The strings in the IDENTIFY DEVICE page are broken up into
760 * 16-bit chunks. Run through the string, and output each
761 * 8-bit chunk linearly, regardless of platform.
762 *
763 * LOCKING:
764 * caller.
765 */
766
6a62a04d
TH
767void ata_id_string(const u16 *id, unsigned char *s,
768 unsigned int ofs, unsigned int len)
1da177e4
LT
769{
770 unsigned int c;
771
772 while (len > 0) {
773 c = id[ofs] >> 8;
774 *s = c;
775 s++;
776
777 c = id[ofs] & 0xff;
778 *s = c;
779 s++;
780
781 ofs++;
782 len -= 2;
783 }
784}
785
0e949ff3 786/**
6a62a04d 787 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
788 * @id: IDENTIFY DEVICE results we will examine
789 * @s: string into which data is output
790 * @ofs: offset into identify device page
791 * @len: length of string to return. must be an odd number.
792 *
6a62a04d 793 * This function is identical to ata_id_string except that it
0e949ff3
TH
794 * trims trailing spaces and terminates the resulting string with
795 * null. @len must be actual maximum length (even number) + 1.
796 *
797 * LOCKING:
798 * caller.
799 */
6a62a04d
TH
800void ata_id_c_string(const u16 *id, unsigned char *s,
801 unsigned int ofs, unsigned int len)
0e949ff3
TH
802{
803 unsigned char *p;
804
805 WARN_ON(!(len & 1));
806
6a62a04d 807 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
808
809 p = s + strnlen(s, len - 1);
810 while (p > s && p[-1] == ' ')
811 p--;
812 *p = '\0';
813}
0baab86b 814
1e999736
AC
815static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
816{
817 u64 sectors = 0;
818
819 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
820 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
821 sectors |= (tf->hob_lbal & 0xff) << 24;
822 sectors |= (tf->lbah & 0xff) << 16;
823 sectors |= (tf->lbam & 0xff) << 8;
824 sectors |= (tf->lbal & 0xff);
825
826 return ++sectors;
827}
828
829static u64 ata_tf_to_lba(struct ata_taskfile *tf)
830{
831 u64 sectors = 0;
832
833 sectors |= (tf->device & 0x0f) << 24;
834 sectors |= (tf->lbah & 0xff) << 16;
835 sectors |= (tf->lbam & 0xff) << 8;
836 sectors |= (tf->lbal & 0xff);
837
838 return ++sectors;
839}
840
841/**
842 * ata_read_native_max_address_ext - LBA48 native max query
843 * @dev: Device to query
844 *
845 * Perform an LBA48 size query upon the device in question. Return the
846 * actual LBA48 size or zero if the command fails.
847 */
848
849static u64 ata_read_native_max_address_ext(struct ata_device *dev)
850{
851 unsigned int err;
852 struct ata_taskfile tf;
853
854 ata_tf_init(dev, &tf);
855
856 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
857 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | ATA_TFLAG_ISADDR;
858 tf.protocol |= ATA_PROT_NODATA;
859 tf.device |= 0x40;
860
861 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
862 if (err)
863 return 0;
864
865 return ata_tf_to_lba48(&tf);
866}
867
868/**
869 * ata_read_native_max_address - LBA28 native max query
870 * @dev: Device to query
871 *
872 * Performa an LBA28 size query upon the device in question. Return the
873 * actual LBA28 size or zero if the command fails.
874 */
875
876static u64 ata_read_native_max_address(struct ata_device *dev)
877{
878 unsigned int err;
879 struct ata_taskfile tf;
880
881 ata_tf_init(dev, &tf);
882
883 tf.command = ATA_CMD_READ_NATIVE_MAX;
884 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
885 tf.protocol |= ATA_PROT_NODATA;
886 tf.device |= 0x40;
887
888 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
889 if (err)
890 return 0;
891
892 return ata_tf_to_lba(&tf);
893}
894
895/**
896 * ata_set_native_max_address_ext - LBA48 native max set
897 * @dev: Device to query
6b38d1d1 898 * @new_sectors: new max sectors value to set for the device
1e999736
AC
899 *
900 * Perform an LBA48 size set max upon the device in question. Return the
901 * actual LBA48 size or zero if the command fails.
902 */
903
904static u64 ata_set_native_max_address_ext(struct ata_device *dev, u64 new_sectors)
905{
906 unsigned int err;
907 struct ata_taskfile tf;
908
909 new_sectors--;
910
911 ata_tf_init(dev, &tf);
912
913 tf.command = ATA_CMD_SET_MAX_EXT;
914 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | ATA_TFLAG_ISADDR;
915 tf.protocol |= ATA_PROT_NODATA;
916 tf.device |= 0x40;
917
918 tf.lbal = (new_sectors >> 0) & 0xff;
919 tf.lbam = (new_sectors >> 8) & 0xff;
920 tf.lbah = (new_sectors >> 16) & 0xff;
921
922 tf.hob_lbal = (new_sectors >> 24) & 0xff;
923 tf.hob_lbam = (new_sectors >> 32) & 0xff;
924 tf.hob_lbah = (new_sectors >> 40) & 0xff;
925
926 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
927 if (err)
928 return 0;
929
930 return ata_tf_to_lba48(&tf);
931}
932
933/**
934 * ata_set_native_max_address - LBA28 native max set
935 * @dev: Device to query
6b38d1d1 936 * @new_sectors: new max sectors value to set for the device
1e999736
AC
937 *
938 * Perform an LBA28 size set max upon the device in question. Return the
939 * actual LBA28 size or zero if the command fails.
940 */
941
942static u64 ata_set_native_max_address(struct ata_device *dev, u64 new_sectors)
943{
944 unsigned int err;
945 struct ata_taskfile tf;
946
947 new_sectors--;
948
949 ata_tf_init(dev, &tf);
950
951 tf.command = ATA_CMD_SET_MAX;
952 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
953 tf.protocol |= ATA_PROT_NODATA;
954
955 tf.lbal = (new_sectors >> 0) & 0xff;
956 tf.lbam = (new_sectors >> 8) & 0xff;
957 tf.lbah = (new_sectors >> 16) & 0xff;
958 tf.device |= ((new_sectors >> 24) & 0x0f) | 0x40;
959
960 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
961 if (err)
962 return 0;
963
964 return ata_tf_to_lba(&tf);
965}
966
967/**
968 * ata_hpa_resize - Resize a device with an HPA set
969 * @dev: Device to resize
970 *
971 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
972 * it if required to the full size of the media. The caller must check
973 * the drive has the HPA feature set enabled.
974 */
975
976static u64 ata_hpa_resize(struct ata_device *dev)
977{
978 u64 sectors = dev->n_sectors;
979 u64 hpa_sectors;
a617c09f 980
1e999736
AC
981 if (ata_id_has_lba48(dev->id))
982 hpa_sectors = ata_read_native_max_address_ext(dev);
983 else
984 hpa_sectors = ata_read_native_max_address(dev);
985
986 /* if no hpa, both should be equal */
bd1d5ec6
AM
987 ata_dev_printk(dev, KERN_INFO, "%s 1: sectors = %lld, "
988 "hpa_sectors = %lld\n",
989 __FUNCTION__, (long long)sectors, (long long)hpa_sectors);
1e999736
AC
990
991 if (hpa_sectors > sectors) {
992 ata_dev_printk(dev, KERN_INFO,
993 "Host Protected Area detected:\n"
994 "\tcurrent size: %lld sectors\n"
995 "\tnative size: %lld sectors\n",
bd1d5ec6 996 (long long)sectors, (long long)hpa_sectors);
1e999736
AC
997
998 if (ata_ignore_hpa) {
999 if (ata_id_has_lba48(dev->id))
1000 hpa_sectors = ata_set_native_max_address_ext(dev, hpa_sectors);
1001 else
bd1d5ec6
AM
1002 hpa_sectors = ata_set_native_max_address(dev,
1003 hpa_sectors);
1e999736
AC
1004
1005 if (hpa_sectors) {
bd1d5ec6
AM
1006 ata_dev_printk(dev, KERN_INFO, "native size "
1007 "increased to %lld sectors\n",
1008 (long long)hpa_sectors);
1e999736
AC
1009 return hpa_sectors;
1010 }
1011 }
1012 }
1013 return sectors;
1014}
1015
2940740b
TH
1016static u64 ata_id_n_sectors(const u16 *id)
1017{
1018 if (ata_id_has_lba(id)) {
1019 if (ata_id_has_lba48(id))
1020 return ata_id_u64(id, 100);
1021 else
1022 return ata_id_u32(id, 60);
1023 } else {
1024 if (ata_id_current_chs_valid(id))
1025 return ata_id_u32(id, 57);
1026 else
1027 return id[1] * id[3] * id[6];
1028 }
1029}
1030
10305f0f
AC
1031/**
1032 * ata_id_to_dma_mode - Identify DMA mode from id block
1033 * @dev: device to identify
cc261267 1034 * @unknown: mode to assume if we cannot tell
10305f0f
AC
1035 *
1036 * Set up the timing values for the device based upon the identify
1037 * reported values for the DMA mode. This function is used by drivers
1038 * which rely upon firmware configured modes, but wish to report the
1039 * mode correctly when possible.
1040 *
1041 * In addition we emit similarly formatted messages to the default
1042 * ata_dev_set_mode handler, in order to provide consistency of
1043 * presentation.
1044 */
1045
1046void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
1047{
1048 unsigned int mask;
1049 u8 mode;
1050
1051 /* Pack the DMA modes */
1052 mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
1053 if (dev->id[53] & 0x04)
1054 mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
1055
1056 /* Select the mode in use */
1057 mode = ata_xfer_mask2mode(mask);
1058
1059 if (mode != 0) {
1060 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
1061 ata_mode_string(mask));
1062 } else {
1063 /* SWDMA perhaps ? */
1064 mode = unknown;
1065 ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
1066 }
1067
1068 /* Configure the device reporting */
1069 dev->xfer_mode = mode;
1070 dev->xfer_shift = ata_xfer_mode2shift(mode);
1071}
1072
0baab86b
EF
1073/**
1074 * ata_noop_dev_select - Select device 0/1 on ATA bus
1075 * @ap: ATA channel to manipulate
1076 * @device: ATA device (numbered from zero) to select
1077 *
1078 * This function performs no actual function.
1079 *
1080 * May be used as the dev_select() entry in ata_port_operations.
1081 *
1082 * LOCKING:
1083 * caller.
1084 */
1da177e4
LT
1085void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
1086{
1087}
1088
0baab86b 1089
1da177e4
LT
1090/**
1091 * ata_std_dev_select - Select device 0/1 on ATA bus
1092 * @ap: ATA channel to manipulate
1093 * @device: ATA device (numbered from zero) to select
1094 *
1095 * Use the method defined in the ATA specification to
1096 * make either device 0, or device 1, active on the
0baab86b
EF
1097 * ATA channel. Works with both PIO and MMIO.
1098 *
1099 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
1100 *
1101 * LOCKING:
1102 * caller.
1103 */
1104
1105void ata_std_dev_select (struct ata_port *ap, unsigned int device)
1106{
1107 u8 tmp;
1108
1109 if (device == 0)
1110 tmp = ATA_DEVICE_OBS;
1111 else
1112 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1113
0d5ff566 1114 iowrite8(tmp, ap->ioaddr.device_addr);
1da177e4
LT
1115 ata_pause(ap); /* needed; also flushes, for mmio */
1116}
1117
1118/**
1119 * ata_dev_select - Select device 0/1 on ATA bus
1120 * @ap: ATA channel to manipulate
1121 * @device: ATA device (numbered from zero) to select
1122 * @wait: non-zero to wait for Status register BSY bit to clear
1123 * @can_sleep: non-zero if context allows sleeping
1124 *
1125 * Use the method defined in the ATA specification to
1126 * make either device 0, or device 1, active on the
1127 * ATA channel.
1128 *
1129 * This is a high-level version of ata_std_dev_select(),
1130 * which additionally provides the services of inserting
1131 * the proper pauses and status polling, where needed.
1132 *
1133 * LOCKING:
1134 * caller.
1135 */
1136
1137void ata_dev_select(struct ata_port *ap, unsigned int device,
1138 unsigned int wait, unsigned int can_sleep)
1139{
88574551 1140 if (ata_msg_probe(ap))
44877b4e
TH
1141 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
1142 "device %u, wait %u\n", device, wait);
1da177e4
LT
1143
1144 if (wait)
1145 ata_wait_idle(ap);
1146
1147 ap->ops->dev_select(ap, device);
1148
1149 if (wait) {
1150 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
1151 msleep(150);
1152 ata_wait_idle(ap);
1153 }
1154}
1155
1156/**
1157 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 1158 * @id: IDENTIFY DEVICE page to dump
1da177e4 1159 *
0bd3300a
TH
1160 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1161 * page.
1da177e4
LT
1162 *
1163 * LOCKING:
1164 * caller.
1165 */
1166
0bd3300a 1167static inline void ata_dump_id(const u16 *id)
1da177e4
LT
1168{
1169 DPRINTK("49==0x%04x "
1170 "53==0x%04x "
1171 "63==0x%04x "
1172 "64==0x%04x "
1173 "75==0x%04x \n",
0bd3300a
TH
1174 id[49],
1175 id[53],
1176 id[63],
1177 id[64],
1178 id[75]);
1da177e4
LT
1179 DPRINTK("80==0x%04x "
1180 "81==0x%04x "
1181 "82==0x%04x "
1182 "83==0x%04x "
1183 "84==0x%04x \n",
0bd3300a
TH
1184 id[80],
1185 id[81],
1186 id[82],
1187 id[83],
1188 id[84]);
1da177e4
LT
1189 DPRINTK("88==0x%04x "
1190 "93==0x%04x\n",
0bd3300a
TH
1191 id[88],
1192 id[93]);
1da177e4
LT
1193}
1194
cb95d562
TH
1195/**
1196 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1197 * @id: IDENTIFY data to compute xfer mask from
1198 *
1199 * Compute the xfermask for this device. This is not as trivial
1200 * as it seems if we must consider early devices correctly.
1201 *
1202 * FIXME: pre IDE drive timing (do we care ?).
1203 *
1204 * LOCKING:
1205 * None.
1206 *
1207 * RETURNS:
1208 * Computed xfermask
1209 */
1210static unsigned int ata_id_xfermask(const u16 *id)
1211{
1212 unsigned int pio_mask, mwdma_mask, udma_mask;
1213
1214 /* Usual case. Word 53 indicates word 64 is valid */
1215 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1216 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1217 pio_mask <<= 3;
1218 pio_mask |= 0x7;
1219 } else {
1220 /* If word 64 isn't valid then Word 51 high byte holds
1221 * the PIO timing number for the maximum. Turn it into
1222 * a mask.
1223 */
7a0f1c8a 1224 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb
AC
1225 if (mode < 5) /* Valid PIO range */
1226 pio_mask = (2 << mode) - 1;
1227 else
1228 pio_mask = 1;
cb95d562
TH
1229
1230 /* But wait.. there's more. Design your standards by
1231 * committee and you too can get a free iordy field to
1232 * process. However its the speeds not the modes that
1233 * are supported... Note drivers using the timing API
1234 * will get this right anyway
1235 */
1236 }
1237
1238 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1239
b352e57d
AC
1240 if (ata_id_is_cfa(id)) {
1241 /*
1242 * Process compact flash extended modes
1243 */
1244 int pio = id[163] & 0x7;
1245 int dma = (id[163] >> 3) & 7;
1246
1247 if (pio)
1248 pio_mask |= (1 << 5);
1249 if (pio > 1)
1250 pio_mask |= (1 << 6);
1251 if (dma)
1252 mwdma_mask |= (1 << 3);
1253 if (dma > 1)
1254 mwdma_mask |= (1 << 4);
1255 }
1256
fb21f0d0
TH
1257 udma_mask = 0;
1258 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1259 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1260
1261 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1262}
1263
86e45b6b
TH
1264/**
1265 * ata_port_queue_task - Queue port_task
1266 * @ap: The ata_port to queue port_task for
e2a7f77a 1267 * @fn: workqueue function to be scheduled
65f27f38 1268 * @data: data for @fn to use
e2a7f77a 1269 * @delay: delay time for workqueue function
86e45b6b
TH
1270 *
1271 * Schedule @fn(@data) for execution after @delay jiffies using
1272 * port_task. There is one port_task per port and it's the
1273 * user(low level driver)'s responsibility to make sure that only
1274 * one task is active at any given time.
1275 *
1276 * libata core layer takes care of synchronization between
1277 * port_task and EH. ata_port_queue_task() may be ignored for EH
1278 * synchronization.
1279 *
1280 * LOCKING:
1281 * Inherited from caller.
1282 */
65f27f38 1283void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
86e45b6b
TH
1284 unsigned long delay)
1285{
1286 int rc;
1287
b51e9e5d 1288 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
86e45b6b
TH
1289 return;
1290
65f27f38
DH
1291 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1292 ap->port_task_data = data;
86e45b6b 1293
52bad64d 1294 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1295
1296 /* rc == 0 means that another user is using port task */
1297 WARN_ON(rc == 0);
1298}
1299
1300/**
1301 * ata_port_flush_task - Flush port_task
1302 * @ap: The ata_port to flush port_task for
1303 *
1304 * After this function completes, port_task is guranteed not to
1305 * be running or scheduled.
1306 *
1307 * LOCKING:
1308 * Kernel thread context (may sleep)
1309 */
1310void ata_port_flush_task(struct ata_port *ap)
1311{
1312 unsigned long flags;
1313
1314 DPRINTK("ENTER\n");
1315
ba6a1308 1316 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 1317 ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK;
ba6a1308 1318 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b
TH
1319
1320 DPRINTK("flush #1\n");
28e53bdd 1321 cancel_work_sync(&ap->port_task.work); /* akpm: seems unneeded */
86e45b6b
TH
1322
1323 /*
1324 * At this point, if a task is running, it's guaranteed to see
1325 * the FLUSH flag; thus, it will never queue pio tasks again.
1326 * Cancel and flush.
1327 */
1328 if (!cancel_delayed_work(&ap->port_task)) {
0dd4b21f 1329 if (ata_msg_ctl(ap))
88574551
TH
1330 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
1331 __FUNCTION__);
28e53bdd 1332 cancel_work_sync(&ap->port_task.work);
86e45b6b
TH
1333 }
1334
ba6a1308 1335 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 1336 ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK;
ba6a1308 1337 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b 1338
0dd4b21f
BP
1339 if (ata_msg_ctl(ap))
1340 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
86e45b6b
TH
1341}
1342
7102d230 1343static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1344{
77853bf2 1345 struct completion *waiting = qc->private_data;
a2a7a662 1346
a2a7a662 1347 complete(waiting);
a2a7a662
TH
1348}
1349
1350/**
2432697b 1351 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1352 * @dev: Device to which the command is sent
1353 * @tf: Taskfile registers for the command and the result
d69cf37d 1354 * @cdb: CDB for packet command
a2a7a662 1355 * @dma_dir: Data tranfer direction of the command
2432697b
TH
1356 * @sg: sg list for the data buffer of the command
1357 * @n_elem: Number of sg entries
a2a7a662
TH
1358 *
1359 * Executes libata internal command with timeout. @tf contains
1360 * command on entry and result on return. Timeout and error
1361 * conditions are reported via return value. No recovery action
1362 * is taken after a command times out. It's caller's duty to
1363 * clean up after timeout.
1364 *
1365 * LOCKING:
1366 * None. Should be called with kernel context, might sleep.
551e8889
TH
1367 *
1368 * RETURNS:
1369 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1370 */
2432697b
TH
1371unsigned ata_exec_internal_sg(struct ata_device *dev,
1372 struct ata_taskfile *tf, const u8 *cdb,
1373 int dma_dir, struct scatterlist *sg,
1374 unsigned int n_elem)
a2a7a662 1375{
3373efd8 1376 struct ata_port *ap = dev->ap;
a2a7a662
TH
1377 u8 command = tf->command;
1378 struct ata_queued_cmd *qc;
2ab7db1f 1379 unsigned int tag, preempted_tag;
dedaf2b0 1380 u32 preempted_sactive, preempted_qc_active;
60be6b9a 1381 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1382 unsigned long flags;
77853bf2 1383 unsigned int err_mask;
d95a717f 1384 int rc;
a2a7a662 1385
ba6a1308 1386 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1387
e3180499 1388 /* no internal command while frozen */
b51e9e5d 1389 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1390 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1391 return AC_ERR_SYSTEM;
1392 }
1393
2ab7db1f 1394 /* initialize internal qc */
a2a7a662 1395
2ab7db1f
TH
1396 /* XXX: Tag 0 is used for drivers with legacy EH as some
1397 * drivers choke if any other tag is given. This breaks
1398 * ata_tag_internal() test for those drivers. Don't use new
1399 * EH stuff without converting to it.
1400 */
1401 if (ap->ops->error_handler)
1402 tag = ATA_TAG_INTERNAL;
1403 else
1404 tag = 0;
1405
6cec4a39 1406 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1407 BUG();
f69499f4 1408 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1409
1410 qc->tag = tag;
1411 qc->scsicmd = NULL;
1412 qc->ap = ap;
1413 qc->dev = dev;
1414 ata_qc_reinit(qc);
1415
1416 preempted_tag = ap->active_tag;
dedaf2b0
TH
1417 preempted_sactive = ap->sactive;
1418 preempted_qc_active = ap->qc_active;
2ab7db1f 1419 ap->active_tag = ATA_TAG_POISON;
dedaf2b0
TH
1420 ap->sactive = 0;
1421 ap->qc_active = 0;
2ab7db1f
TH
1422
1423 /* prepare & issue qc */
a2a7a662 1424 qc->tf = *tf;
d69cf37d
TH
1425 if (cdb)
1426 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1427 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1428 qc->dma_dir = dma_dir;
1429 if (dma_dir != DMA_NONE) {
2432697b
TH
1430 unsigned int i, buflen = 0;
1431
1432 for (i = 0; i < n_elem; i++)
1433 buflen += sg[i].length;
1434
1435 ata_sg_init(qc, sg, n_elem);
49c80429 1436 qc->nbytes = buflen;
a2a7a662
TH
1437 }
1438
77853bf2 1439 qc->private_data = &wait;
a2a7a662
TH
1440 qc->complete_fn = ata_qc_complete_internal;
1441
8e0e694a 1442 ata_qc_issue(qc);
a2a7a662 1443
ba6a1308 1444 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1445
a8601e5f 1446 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
d95a717f
TH
1447
1448 ata_port_flush_task(ap);
41ade50c 1449
d95a717f 1450 if (!rc) {
ba6a1308 1451 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1452
1453 /* We're racing with irq here. If we lose, the
1454 * following test prevents us from completing the qc
d95a717f
TH
1455 * twice. If we win, the port is frozen and will be
1456 * cleaned up by ->post_internal_cmd().
a2a7a662 1457 */
77853bf2 1458 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1459 qc->err_mask |= AC_ERR_TIMEOUT;
1460
1461 if (ap->ops->error_handler)
1462 ata_port_freeze(ap);
1463 else
1464 ata_qc_complete(qc);
f15a1daf 1465
0dd4b21f
BP
1466 if (ata_msg_warn(ap))
1467 ata_dev_printk(dev, KERN_WARNING,
88574551 1468 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1469 }
1470
ba6a1308 1471 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1472 }
1473
d95a717f
TH
1474 /* do post_internal_cmd */
1475 if (ap->ops->post_internal_cmd)
1476 ap->ops->post_internal_cmd(qc);
1477
a51d644a
TH
1478 /* perform minimal error analysis */
1479 if (qc->flags & ATA_QCFLAG_FAILED) {
1480 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1481 qc->err_mask |= AC_ERR_DEV;
1482
1483 if (!qc->err_mask)
1484 qc->err_mask |= AC_ERR_OTHER;
1485
1486 if (qc->err_mask & ~AC_ERR_OTHER)
1487 qc->err_mask &= ~AC_ERR_OTHER;
d95a717f
TH
1488 }
1489
15869303 1490 /* finish up */
ba6a1308 1491 spin_lock_irqsave(ap->lock, flags);
15869303 1492
e61e0672 1493 *tf = qc->result_tf;
77853bf2
TH
1494 err_mask = qc->err_mask;
1495
1496 ata_qc_free(qc);
2ab7db1f 1497 ap->active_tag = preempted_tag;
dedaf2b0
TH
1498 ap->sactive = preempted_sactive;
1499 ap->qc_active = preempted_qc_active;
77853bf2 1500
1f7dd3e9
TH
1501 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1502 * Until those drivers are fixed, we detect the condition
1503 * here, fail the command with AC_ERR_SYSTEM and reenable the
1504 * port.
1505 *
1506 * Note that this doesn't change any behavior as internal
1507 * command failure results in disabling the device in the
1508 * higher layer for LLDDs without new reset/EH callbacks.
1509 *
1510 * Kill the following code as soon as those drivers are fixed.
1511 */
198e0fed 1512 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1513 err_mask |= AC_ERR_SYSTEM;
1514 ata_port_probe(ap);
1515 }
1516
ba6a1308 1517 spin_unlock_irqrestore(ap->lock, flags);
15869303 1518
77853bf2 1519 return err_mask;
a2a7a662
TH
1520}
1521
2432697b 1522/**
33480a0e 1523 * ata_exec_internal - execute libata internal command
2432697b
TH
1524 * @dev: Device to which the command is sent
1525 * @tf: Taskfile registers for the command and the result
1526 * @cdb: CDB for packet command
1527 * @dma_dir: Data tranfer direction of the command
1528 * @buf: Data buffer of the command
1529 * @buflen: Length of data buffer
1530 *
1531 * Wrapper around ata_exec_internal_sg() which takes simple
1532 * buffer instead of sg list.
1533 *
1534 * LOCKING:
1535 * None. Should be called with kernel context, might sleep.
1536 *
1537 * RETURNS:
1538 * Zero on success, AC_ERR_* mask on failure
1539 */
1540unsigned ata_exec_internal(struct ata_device *dev,
1541 struct ata_taskfile *tf, const u8 *cdb,
1542 int dma_dir, void *buf, unsigned int buflen)
1543{
33480a0e
TH
1544 struct scatterlist *psg = NULL, sg;
1545 unsigned int n_elem = 0;
2432697b 1546
33480a0e
TH
1547 if (dma_dir != DMA_NONE) {
1548 WARN_ON(!buf);
1549 sg_init_one(&sg, buf, buflen);
1550 psg = &sg;
1551 n_elem++;
1552 }
2432697b 1553
33480a0e 1554 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
2432697b
TH
1555}
1556
977e6b9f
TH
1557/**
1558 * ata_do_simple_cmd - execute simple internal command
1559 * @dev: Device to which the command is sent
1560 * @cmd: Opcode to execute
1561 *
1562 * Execute a 'simple' command, that only consists of the opcode
1563 * 'cmd' itself, without filling any other registers
1564 *
1565 * LOCKING:
1566 * Kernel thread context (may sleep).
1567 *
1568 * RETURNS:
1569 * Zero on success, AC_ERR_* mask on failure
e58eb583 1570 */
77b08fb5 1571unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1572{
1573 struct ata_taskfile tf;
e58eb583
TH
1574
1575 ata_tf_init(dev, &tf);
1576
1577 tf.command = cmd;
1578 tf.flags |= ATA_TFLAG_DEVICE;
1579 tf.protocol = ATA_PROT_NODATA;
1580
977e6b9f 1581 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
e58eb583
TH
1582}
1583
1bc4ccff
AC
1584/**
1585 * ata_pio_need_iordy - check if iordy needed
1586 * @adev: ATA device
1587 *
1588 * Check if the current speed of the device requires IORDY. Used
1589 * by various controllers for chip configuration.
1590 */
a617c09f 1591
1bc4ccff
AC
1592unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1593{
432729f0
AC
1594 /* Controller doesn't support IORDY. Probably a pointless check
1595 as the caller should know this */
1596 if (adev->ap->flags & ATA_FLAG_NO_IORDY)
1bc4ccff 1597 return 0;
432729f0
AC
1598 /* PIO3 and higher it is mandatory */
1599 if (adev->pio_mode > XFER_PIO_2)
1600 return 1;
1601 /* We turn it on when possible */
1602 if (ata_id_has_iordy(adev->id))
1bc4ccff 1603 return 1;
432729f0
AC
1604 return 0;
1605}
2e9edbf8 1606
432729f0
AC
1607/**
1608 * ata_pio_mask_no_iordy - Return the non IORDY mask
1609 * @adev: ATA device
1610 *
1611 * Compute the highest mode possible if we are not using iordy. Return
1612 * -1 if no iordy mode is available.
1613 */
a617c09f 1614
432729f0
AC
1615static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1616{
1bc4ccff 1617 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1bc4ccff 1618 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
432729f0 1619 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1bc4ccff
AC
1620 /* Is the speed faster than the drive allows non IORDY ? */
1621 if (pio) {
1622 /* This is cycle times not frequency - watch the logic! */
1623 if (pio > 240) /* PIO2 is 240nS per cycle */
432729f0
AC
1624 return 3 << ATA_SHIFT_PIO;
1625 return 7 << ATA_SHIFT_PIO;
1bc4ccff
AC
1626 }
1627 }
432729f0 1628 return 3 << ATA_SHIFT_PIO;
1bc4ccff
AC
1629}
1630
1da177e4 1631/**
49016aca 1632 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1633 * @dev: target device
1634 * @p_class: pointer to class of the target device (may be changed)
bff04647 1635 * @flags: ATA_READID_* flags
fe635c7e 1636 * @id: buffer to read IDENTIFY data into
1da177e4 1637 *
49016aca
TH
1638 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1639 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1640 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1641 * for pre-ATA4 drives.
1da177e4
LT
1642 *
1643 * LOCKING:
49016aca
TH
1644 * Kernel thread context (may sleep)
1645 *
1646 * RETURNS:
1647 * 0 on success, -errno otherwise.
1da177e4 1648 */
a9beec95 1649int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1650 unsigned int flags, u16 *id)
1da177e4 1651{
3373efd8 1652 struct ata_port *ap = dev->ap;
49016aca 1653 unsigned int class = *p_class;
a0123703 1654 struct ata_taskfile tf;
49016aca
TH
1655 unsigned int err_mask = 0;
1656 const char *reason;
54936f8b 1657 int may_fallback = 1, tried_spinup = 0;
49016aca 1658 int rc;
1da177e4 1659
0dd4b21f 1660 if (ata_msg_ctl(ap))
44877b4e 1661 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1662
49016aca 1663 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
49016aca 1664 retry:
3373efd8 1665 ata_tf_init(dev, &tf);
a0123703 1666
49016aca
TH
1667 switch (class) {
1668 case ATA_DEV_ATA:
a0123703 1669 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1670 break;
1671 case ATA_DEV_ATAPI:
a0123703 1672 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1673 break;
1674 default:
1675 rc = -ENODEV;
1676 reason = "unsupported class";
1677 goto err_out;
1da177e4
LT
1678 }
1679
a0123703 1680 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1681
1682 /* Some devices choke if TF registers contain garbage. Make
1683 * sure those are properly initialized.
1684 */
1685 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1686
1687 /* Device presence detection is unreliable on some
1688 * controllers. Always poll IDENTIFY if available.
1689 */
1690 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 1691
3373efd8 1692 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
49016aca 1693 id, sizeof(id[0]) * ATA_ID_WORDS);
a0123703 1694 if (err_mask) {
800b3996 1695 if (err_mask & AC_ERR_NODEV_HINT) {
55a8e2c8 1696 DPRINTK("ata%u.%d: NODEV after polling detection\n",
44877b4e 1697 ap->print_id, dev->devno);
55a8e2c8
TH
1698 return -ENOENT;
1699 }
1700
54936f8b
TH
1701 /* Device or controller might have reported the wrong
1702 * device class. Give a shot at the other IDENTIFY if
1703 * the current one is aborted by the device.
1704 */
1705 if (may_fallback &&
1706 (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1707 may_fallback = 0;
1708
1709 if (class == ATA_DEV_ATA)
1710 class = ATA_DEV_ATAPI;
1711 else
1712 class = ATA_DEV_ATA;
1713 goto retry;
1714 }
1715
49016aca
TH
1716 rc = -EIO;
1717 reason = "I/O error";
1da177e4
LT
1718 goto err_out;
1719 }
1720
54936f8b
TH
1721 /* Falling back doesn't make sense if ID data was read
1722 * successfully at least once.
1723 */
1724 may_fallback = 0;
1725
49016aca 1726 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1727
49016aca 1728 /* sanity check */
a4f5749b
TH
1729 rc = -EINVAL;
1730 reason = "device reports illegal type";
1731
1732 if (class == ATA_DEV_ATA) {
1733 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1734 goto err_out;
1735 } else {
1736 if (ata_id_is_ata(id))
1737 goto err_out;
49016aca
TH
1738 }
1739
169439c2
ML
1740 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1741 tried_spinup = 1;
1742 /*
1743 * Drive powered-up in standby mode, and requires a specific
1744 * SET_FEATURES spin-up subcommand before it will accept
1745 * anything other than the original IDENTIFY command.
1746 */
1747 ata_tf_init(dev, &tf);
1748 tf.command = ATA_CMD_SET_FEATURES;
1749 tf.feature = SETFEATURES_SPINUP;
1750 tf.protocol = ATA_PROT_NODATA;
1751 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1752 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1753 if (err_mask) {
1754 rc = -EIO;
1755 reason = "SPINUP failed";
1756 goto err_out;
1757 }
1758 /*
1759 * If the drive initially returned incomplete IDENTIFY info,
1760 * we now must reissue the IDENTIFY command.
1761 */
1762 if (id[2] == 0x37c8)
1763 goto retry;
1764 }
1765
bff04647 1766 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
1767 /*
1768 * The exact sequence expected by certain pre-ATA4 drives is:
1769 * SRST RESET
1770 * IDENTIFY
1771 * INITIALIZE DEVICE PARAMETERS
1772 * anything else..
1773 * Some drives were very specific about that exact sequence.
1774 */
1775 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 1776 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
1777 if (err_mask) {
1778 rc = -EIO;
1779 reason = "INIT_DEV_PARAMS failed";
1780 goto err_out;
1781 }
1782
1783 /* current CHS translation info (id[53-58]) might be
1784 * changed. reread the identify device info.
1785 */
bff04647 1786 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
1787 goto retry;
1788 }
1789 }
1790
1791 *p_class = class;
fe635c7e 1792
49016aca
TH
1793 return 0;
1794
1795 err_out:
88574551 1796 if (ata_msg_warn(ap))
0dd4b21f 1797 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 1798 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
1799 return rc;
1800}
1801
3373efd8 1802static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 1803{
3373efd8 1804 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
1805}
1806
a6e6ce8e
TH
1807static void ata_dev_config_ncq(struct ata_device *dev,
1808 char *desc, size_t desc_sz)
1809{
1810 struct ata_port *ap = dev->ap;
1811 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1812
1813 if (!ata_id_has_ncq(dev->id)) {
1814 desc[0] = '\0';
1815 return;
1816 }
6919a0a6
AC
1817 if (ata_device_blacklisted(dev) & ATA_HORKAGE_NONCQ) {
1818 snprintf(desc, desc_sz, "NCQ (not used)");
1819 return;
1820 }
a6e6ce8e 1821 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 1822 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
1823 dev->flags |= ATA_DFLAG_NCQ;
1824 }
1825
1826 if (hdepth >= ddepth)
1827 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1828 else
1829 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1830}
1831
49016aca 1832/**
ffeae418 1833 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
1834 * @dev: Target device to configure
1835 *
1836 * Configure @dev according to @dev->id. Generic and low-level
1837 * driver specific fixups are also applied.
49016aca
TH
1838 *
1839 * LOCKING:
ffeae418
TH
1840 * Kernel thread context (may sleep)
1841 *
1842 * RETURNS:
1843 * 0 on success, -errno otherwise
49016aca 1844 */
efdaedc4 1845int ata_dev_configure(struct ata_device *dev)
49016aca 1846{
3373efd8 1847 struct ata_port *ap = dev->ap;
efdaedc4 1848 int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO;
1148c3a7 1849 const u16 *id = dev->id;
ff8854b2 1850 unsigned int xfer_mask;
b352e57d 1851 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
1852 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
1853 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 1854 int rc;
49016aca 1855
0dd4b21f 1856 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
44877b4e
TH
1857 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
1858 __FUNCTION__);
ffeae418 1859 return 0;
49016aca
TH
1860 }
1861
0dd4b21f 1862 if (ata_msg_probe(ap))
44877b4e 1863 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1864
08573a86 1865 /* set _SDD */
3a32a8e9 1866 rc = ata_acpi_push_id(dev);
08573a86
KCA
1867 if (rc) {
1868 ata_dev_printk(dev, KERN_WARNING, "failed to set _SDD(%d)\n",
1869 rc);
1870 }
1871
1872 /* retrieve and execute the ATA task file of _GTF */
1873 ata_acpi_exec_tfs(ap);
1874
c39f5ebe 1875 /* print device capabilities */
0dd4b21f 1876 if (ata_msg_probe(ap))
88574551
TH
1877 ata_dev_printk(dev, KERN_DEBUG,
1878 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1879 "85:%04x 86:%04x 87:%04x 88:%04x\n",
0dd4b21f 1880 __FUNCTION__,
f15a1daf
TH
1881 id[49], id[82], id[83], id[84],
1882 id[85], id[86], id[87], id[88]);
c39f5ebe 1883
208a9933 1884 /* initialize to-be-configured parameters */
ea1dd4e1 1885 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
1886 dev->max_sectors = 0;
1887 dev->cdb_len = 0;
1888 dev->n_sectors = 0;
1889 dev->cylinders = 0;
1890 dev->heads = 0;
1891 dev->sectors = 0;
1892
1da177e4
LT
1893 /*
1894 * common ATA, ATAPI feature tests
1895 */
1896
ff8854b2 1897 /* find max transfer mode; for printk only */
1148c3a7 1898 xfer_mask = ata_id_xfermask(id);
1da177e4 1899
0dd4b21f
BP
1900 if (ata_msg_probe(ap))
1901 ata_dump_id(id);
1da177e4
LT
1902
1903 /* ATA-specific feature tests */
1904 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
1905 if (ata_id_is_cfa(id)) {
1906 if (id[162] & 1) /* CPRM may make this media unusable */
44877b4e
TH
1907 ata_dev_printk(dev, KERN_WARNING,
1908 "supports DRM functions and may "
1909 "not be fully accessable.\n");
b352e57d
AC
1910 snprintf(revbuf, 7, "CFA");
1911 }
1912 else
1913 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1914
1148c3a7 1915 dev->n_sectors = ata_id_n_sectors(id);
2940740b 1916
3f64f565 1917 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
591a6e8e 1918 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
3f64f565
EM
1919 sizeof(fwrevbuf));
1920
591a6e8e 1921 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
3f64f565
EM
1922 sizeof(modelbuf));
1923
1924 if (dev->id[59] & 0x100)
1925 dev->multi_count = dev->id[59] & 0xff;
1926
1148c3a7 1927 if (ata_id_has_lba(id)) {
4c2d721a 1928 const char *lba_desc;
a6e6ce8e 1929 char ncq_desc[20];
8bf62ece 1930
4c2d721a
TH
1931 lba_desc = "LBA";
1932 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 1933 if (ata_id_has_lba48(id)) {
8bf62ece 1934 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 1935 lba_desc = "LBA48";
6fc49adb
TH
1936
1937 if (dev->n_sectors >= (1UL << 28) &&
1938 ata_id_has_flush_ext(id))
1939 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 1940 }
8bf62ece 1941
1e999736
AC
1942 if (ata_id_hpa_enabled(dev->id))
1943 dev->n_sectors = ata_hpa_resize(dev);
1944
a6e6ce8e
TH
1945 /* config NCQ */
1946 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1947
8bf62ece 1948 /* print device info to dmesg */
3f64f565
EM
1949 if (ata_msg_drv(ap) && print_info) {
1950 ata_dev_printk(dev, KERN_INFO,
1951 "%s: %s, %s, max %s\n",
1952 revbuf, modelbuf, fwrevbuf,
1953 ata_mode_string(xfer_mask));
1954 ata_dev_printk(dev, KERN_INFO,
1955 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 1956 (unsigned long long)dev->n_sectors,
3f64f565
EM
1957 dev->multi_count, lba_desc, ncq_desc);
1958 }
ffeae418 1959 } else {
8bf62ece
AL
1960 /* CHS */
1961
1962 /* Default translation */
1148c3a7
TH
1963 dev->cylinders = id[1];
1964 dev->heads = id[3];
1965 dev->sectors = id[6];
8bf62ece 1966
1148c3a7 1967 if (ata_id_current_chs_valid(id)) {
8bf62ece 1968 /* Current CHS translation is valid. */
1148c3a7
TH
1969 dev->cylinders = id[54];
1970 dev->heads = id[55];
1971 dev->sectors = id[56];
8bf62ece
AL
1972 }
1973
1974 /* print device info to dmesg */
3f64f565 1975 if (ata_msg_drv(ap) && print_info) {
88574551 1976 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
1977 "%s: %s, %s, max %s\n",
1978 revbuf, modelbuf, fwrevbuf,
1979 ata_mode_string(xfer_mask));
a84471fe 1980 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
1981 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
1982 (unsigned long long)dev->n_sectors,
1983 dev->multi_count, dev->cylinders,
1984 dev->heads, dev->sectors);
1985 }
07f6f7d0
AL
1986 }
1987
6e7846e9 1988 dev->cdb_len = 16;
1da177e4
LT
1989 }
1990
1991 /* ATAPI-specific feature tests */
2c13b7ce 1992 else if (dev->class == ATA_DEV_ATAPI) {
08a556db
AL
1993 char *cdb_intr_string = "";
1994
1148c3a7 1995 rc = atapi_cdb_len(id);
1da177e4 1996 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 1997 if (ata_msg_warn(ap))
88574551
TH
1998 ata_dev_printk(dev, KERN_WARNING,
1999 "unsupported CDB len\n");
ffeae418 2000 rc = -EINVAL;
1da177e4
LT
2001 goto err_out_nosup;
2002 }
6e7846e9 2003 dev->cdb_len = (unsigned int) rc;
1da177e4 2004
08a556db 2005 if (ata_id_cdb_intr(dev->id)) {
312f7da2 2006 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
2007 cdb_intr_string = ", CDB intr";
2008 }
312f7da2 2009
1da177e4 2010 /* print device info to dmesg */
5afc8142 2011 if (ata_msg_drv(ap) && print_info)
12436c30
TH
2012 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
2013 ata_mode_string(xfer_mask),
2014 cdb_intr_string);
1da177e4
LT
2015 }
2016
914ed354
TH
2017 /* determine max_sectors */
2018 dev->max_sectors = ATA_MAX_SECTORS;
2019 if (dev->flags & ATA_DFLAG_LBA48)
2020 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2021
93590859
AC
2022 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2023 /* Let the user know. We don't want to disallow opens for
2024 rescue purposes, or in case the vendor is just a blithering
2025 idiot */
2026 if (print_info) {
2027 ata_dev_printk(dev, KERN_WARNING,
2028"Drive reports diagnostics failure. This may indicate a drive\n");
2029 ata_dev_printk(dev, KERN_WARNING,
2030"fault or invalid emulation. Contact drive vendor for information.\n");
2031 }
2032 }
2033
4b2f3ede 2034 /* limit bridge transfers to udma5, 200 sectors */
3373efd8 2035 if (ata_dev_knobble(dev)) {
5afc8142 2036 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
2037 ata_dev_printk(dev, KERN_INFO,
2038 "applying bridge limits\n");
5a529139 2039 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
2040 dev->max_sectors = ATA_MAX_SECTORS;
2041 }
2042
18d6e9d5 2043 if (ata_device_blacklisted(dev) & ATA_HORKAGE_MAX_SEC_128)
03ec52de
TH
2044 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2045 dev->max_sectors);
18d6e9d5 2046
6f23a31d
AL
2047 /* limit ATAPI DMA to R/W commands only */
2048 if (ata_device_blacklisted(dev) & ATA_HORKAGE_DMA_RW_ONLY)
2049 dev->horkage |= ATA_HORKAGE_DMA_RW_ONLY;
2050
4b2f3ede 2051 if (ap->ops->dev_config)
cd0d3bbc 2052 ap->ops->dev_config(dev);
4b2f3ede 2053
0dd4b21f
BP
2054 if (ata_msg_probe(ap))
2055 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2056 __FUNCTION__, ata_chk_status(ap));
ffeae418 2057 return 0;
1da177e4
LT
2058
2059err_out_nosup:
0dd4b21f 2060 if (ata_msg_probe(ap))
88574551
TH
2061 ata_dev_printk(dev, KERN_DEBUG,
2062 "%s: EXIT, err\n", __FUNCTION__);
ffeae418 2063 return rc;
1da177e4
LT
2064}
2065
be0d18df 2066/**
2e41e8e6 2067 * ata_cable_40wire - return 40 wire cable type
be0d18df
AC
2068 * @ap: port
2069 *
2e41e8e6 2070 * Helper method for drivers which want to hardwire 40 wire cable
be0d18df
AC
2071 * detection.
2072 */
2073
2074int ata_cable_40wire(struct ata_port *ap)
2075{
2076 return ATA_CBL_PATA40;
2077}
2078
2079/**
2e41e8e6 2080 * ata_cable_80wire - return 80 wire cable type
be0d18df
AC
2081 * @ap: port
2082 *
2e41e8e6 2083 * Helper method for drivers which want to hardwire 80 wire cable
be0d18df
AC
2084 * detection.
2085 */
2086
2087int ata_cable_80wire(struct ata_port *ap)
2088{
2089 return ATA_CBL_PATA80;
2090}
2091
2092/**
2093 * ata_cable_unknown - return unknown PATA cable.
2094 * @ap: port
2095 *
2096 * Helper method for drivers which have no PATA cable detection.
2097 */
2098
2099int ata_cable_unknown(struct ata_port *ap)
2100{
2101 return ATA_CBL_PATA_UNK;
2102}
2103
2104/**
2105 * ata_cable_sata - return SATA cable type
2106 * @ap: port
2107 *
2108 * Helper method for drivers which have SATA cables
2109 */
2110
2111int ata_cable_sata(struct ata_port *ap)
2112{
2113 return ATA_CBL_SATA;
2114}
2115
1da177e4
LT
2116/**
2117 * ata_bus_probe - Reset and probe ATA bus
2118 * @ap: Bus to probe
2119 *
0cba632b
JG
2120 * Master ATA bus probing function. Initiates a hardware-dependent
2121 * bus reset, then attempts to identify any devices found on
2122 * the bus.
2123 *
1da177e4 2124 * LOCKING:
0cba632b 2125 * PCI/etc. bus probe sem.
1da177e4
LT
2126 *
2127 * RETURNS:
96072e69 2128 * Zero on success, negative errno otherwise.
1da177e4
LT
2129 */
2130
80289167 2131int ata_bus_probe(struct ata_port *ap)
1da177e4 2132{
28ca5c57 2133 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 2134 int tries[ATA_MAX_DEVICES];
4ae72a1e 2135 int i, rc;
e82cbdb9 2136 struct ata_device *dev;
1da177e4 2137
28ca5c57 2138 ata_port_probe(ap);
c19ba8af 2139
14d2bac1
TH
2140 for (i = 0; i < ATA_MAX_DEVICES; i++)
2141 tries[i] = ATA_PROBE_MAX_TRIES;
2142
2143 retry:
2044470c 2144 /* reset and determine device classes */
52783c5d 2145 ap->ops->phy_reset(ap);
2061a47a 2146
52783c5d
TH
2147 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2148 dev = &ap->device[i];
c19ba8af 2149
52783c5d
TH
2150 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2151 dev->class != ATA_DEV_UNKNOWN)
2152 classes[dev->devno] = dev->class;
2153 else
2154 classes[dev->devno] = ATA_DEV_NONE;
2044470c 2155
52783c5d 2156 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 2157 }
1da177e4 2158
52783c5d 2159 ata_port_probe(ap);
2044470c 2160
b6079ca4
AC
2161 /* after the reset the device state is PIO 0 and the controller
2162 state is undefined. Record the mode */
2163
2164 for (i = 0; i < ATA_MAX_DEVICES; i++)
2165 ap->device[i].pio_mode = XFER_PIO_0;
2166
f31f0cc2
JG
2167 /* read IDENTIFY page and configure devices. We have to do the identify
2168 specific sequence bass-ackwards so that PDIAG- is released by
2169 the slave device */
2170
2171 for (i = ATA_MAX_DEVICES - 1; i >= 0; i--) {
e82cbdb9 2172 dev = &ap->device[i];
28ca5c57 2173
ec573755
TH
2174 if (tries[i])
2175 dev->class = classes[i];
ffeae418 2176
14d2bac1 2177 if (!ata_dev_enabled(dev))
ffeae418 2178 continue;
ffeae418 2179
bff04647
TH
2180 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2181 dev->id);
14d2bac1
TH
2182 if (rc)
2183 goto fail;
f31f0cc2
JG
2184 }
2185
be0d18df
AC
2186 /* Now ask for the cable type as PDIAG- should have been released */
2187 if (ap->ops->cable_detect)
2188 ap->cbl = ap->ops->cable_detect(ap);
2189
f31f0cc2
JG
2190 /* After the identify sequence we can now set up the devices. We do
2191 this in the normal order so that the user doesn't get confused */
2192
2193 for(i = 0; i < ATA_MAX_DEVICES; i++) {
2194 dev = &ap->device[i];
2195 if (!ata_dev_enabled(dev))
2196 continue;
14d2bac1 2197
efdaedc4
TH
2198 ap->eh_context.i.flags |= ATA_EHI_PRINTINFO;
2199 rc = ata_dev_configure(dev);
2200 ap->eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
2201 if (rc)
2202 goto fail;
1da177e4
LT
2203 }
2204
e82cbdb9 2205 /* configure transfer mode */
3adcebb2 2206 rc = ata_set_mode(ap, &dev);
4ae72a1e 2207 if (rc)
51713d35 2208 goto fail;
1da177e4 2209
e82cbdb9
TH
2210 for (i = 0; i < ATA_MAX_DEVICES; i++)
2211 if (ata_dev_enabled(&ap->device[i]))
2212 return 0;
1da177e4 2213
e82cbdb9
TH
2214 /* no device present, disable port */
2215 ata_port_disable(ap);
1da177e4 2216 ap->ops->port_disable(ap);
96072e69 2217 return -ENODEV;
14d2bac1
TH
2218
2219 fail:
4ae72a1e
TH
2220 tries[dev->devno]--;
2221
14d2bac1
TH
2222 switch (rc) {
2223 case -EINVAL:
4ae72a1e 2224 /* eeek, something went very wrong, give up */
14d2bac1
TH
2225 tries[dev->devno] = 0;
2226 break;
4ae72a1e
TH
2227
2228 case -ENODEV:
2229 /* give it just one more chance */
2230 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 2231 case -EIO:
4ae72a1e
TH
2232 if (tries[dev->devno] == 1) {
2233 /* This is the last chance, better to slow
2234 * down than lose it.
2235 */
2236 sata_down_spd_limit(ap);
2237 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2238 }
14d2bac1
TH
2239 }
2240
4ae72a1e 2241 if (!tries[dev->devno])
3373efd8 2242 ata_dev_disable(dev);
ec573755 2243
14d2bac1 2244 goto retry;
1da177e4
LT
2245}
2246
2247/**
0cba632b
JG
2248 * ata_port_probe - Mark port as enabled
2249 * @ap: Port for which we indicate enablement
1da177e4 2250 *
0cba632b
JG
2251 * Modify @ap data structure such that the system
2252 * thinks that the entire port is enabled.
2253 *
cca3974e 2254 * LOCKING: host lock, or some other form of
0cba632b 2255 * serialization.
1da177e4
LT
2256 */
2257
2258void ata_port_probe(struct ata_port *ap)
2259{
198e0fed 2260 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
2261}
2262
3be680b7
TH
2263/**
2264 * sata_print_link_status - Print SATA link status
2265 * @ap: SATA port to printk link status about
2266 *
2267 * This function prints link speed and status of a SATA link.
2268 *
2269 * LOCKING:
2270 * None.
2271 */
43727fbc 2272void sata_print_link_status(struct ata_port *ap)
3be680b7 2273{
6d5f9732 2274 u32 sstatus, scontrol, tmp;
3be680b7 2275
81952c54 2276 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
3be680b7 2277 return;
81952c54 2278 sata_scr_read(ap, SCR_CONTROL, &scontrol);
3be680b7 2279
81952c54 2280 if (ata_port_online(ap)) {
3be680b7 2281 tmp = (sstatus >> 4) & 0xf;
f15a1daf
TH
2282 ata_port_printk(ap, KERN_INFO,
2283 "SATA link up %s (SStatus %X SControl %X)\n",
2284 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 2285 } else {
f15a1daf
TH
2286 ata_port_printk(ap, KERN_INFO,
2287 "SATA link down (SStatus %X SControl %X)\n",
2288 sstatus, scontrol);
3be680b7
TH
2289 }
2290}
2291
1da177e4 2292/**
780a87f7
JG
2293 * __sata_phy_reset - Wake/reset a low-level SATA PHY
2294 * @ap: SATA port associated with target SATA PHY.
1da177e4 2295 *
780a87f7
JG
2296 * This function issues commands to standard SATA Sxxx
2297 * PHY registers, to wake up the phy (and device), and
2298 * clear any reset condition.
1da177e4
LT
2299 *
2300 * LOCKING:
0cba632b 2301 * PCI/etc. bus probe sem.
1da177e4
LT
2302 *
2303 */
2304void __sata_phy_reset(struct ata_port *ap)
2305{
2306 u32 sstatus;
2307 unsigned long timeout = jiffies + (HZ * 5);
2308
2309 if (ap->flags & ATA_FLAG_SATA_RESET) {
cdcca89e 2310 /* issue phy wake/reset */
81952c54 2311 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
62ba2841
TH
2312 /* Couldn't find anything in SATA I/II specs, but
2313 * AHCI-1.1 10.4.2 says at least 1 ms. */
2314 mdelay(1);
1da177e4 2315 }
81952c54
TH
2316 /* phy wake/clear reset */
2317 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1da177e4
LT
2318
2319 /* wait for phy to become ready, if necessary */
2320 do {
2321 msleep(200);
81952c54 2322 sata_scr_read(ap, SCR_STATUS, &sstatus);
1da177e4
LT
2323 if ((sstatus & 0xf) != 1)
2324 break;
2325 } while (time_before(jiffies, timeout));
2326
3be680b7
TH
2327 /* print link status */
2328 sata_print_link_status(ap);
656563e3 2329
3be680b7 2330 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 2331 if (!ata_port_offline(ap))
1da177e4 2332 ata_port_probe(ap);
3be680b7 2333 else
1da177e4 2334 ata_port_disable(ap);
1da177e4 2335
198e0fed 2336 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2337 return;
2338
2339 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2340 ata_port_disable(ap);
2341 return;
2342 }
2343
2344 ap->cbl = ATA_CBL_SATA;
2345}
2346
2347/**
780a87f7
JG
2348 * sata_phy_reset - Reset SATA bus.
2349 * @ap: SATA port associated with target SATA PHY.
1da177e4 2350 *
780a87f7
JG
2351 * This function resets the SATA bus, and then probes
2352 * the bus for devices.
1da177e4
LT
2353 *
2354 * LOCKING:
0cba632b 2355 * PCI/etc. bus probe sem.
1da177e4
LT
2356 *
2357 */
2358void sata_phy_reset(struct ata_port *ap)
2359{
2360 __sata_phy_reset(ap);
198e0fed 2361 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2362 return;
2363 ata_bus_reset(ap);
2364}
2365
ebdfca6e
AC
2366/**
2367 * ata_dev_pair - return other device on cable
ebdfca6e
AC
2368 * @adev: device
2369 *
2370 * Obtain the other device on the same cable, or if none is
2371 * present NULL is returned
2372 */
2e9edbf8 2373
3373efd8 2374struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2375{
3373efd8 2376 struct ata_port *ap = adev->ap;
ebdfca6e 2377 struct ata_device *pair = &ap->device[1 - adev->devno];
e1211e3f 2378 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2379 return NULL;
2380 return pair;
2381}
2382
1da177e4 2383/**
780a87f7
JG
2384 * ata_port_disable - Disable port.
2385 * @ap: Port to be disabled.
1da177e4 2386 *
780a87f7
JG
2387 * Modify @ap data structure such that the system
2388 * thinks that the entire port is disabled, and should
2389 * never attempt to probe or communicate with devices
2390 * on this port.
2391 *
cca3974e 2392 * LOCKING: host lock, or some other form of
780a87f7 2393 * serialization.
1da177e4
LT
2394 */
2395
2396void ata_port_disable(struct ata_port *ap)
2397{
2398 ap->device[0].class = ATA_DEV_NONE;
2399 ap->device[1].class = ATA_DEV_NONE;
198e0fed 2400 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2401}
2402
1c3fae4d 2403/**
3c567b7d 2404 * sata_down_spd_limit - adjust SATA spd limit downward
1c3fae4d
TH
2405 * @ap: Port to adjust SATA spd limit for
2406 *
2407 * Adjust SATA spd limit of @ap downward. Note that this
2408 * function only adjusts the limit. The change must be applied
3c567b7d 2409 * using sata_set_spd().
1c3fae4d
TH
2410 *
2411 * LOCKING:
2412 * Inherited from caller.
2413 *
2414 * RETURNS:
2415 * 0 on success, negative errno on failure
2416 */
3c567b7d 2417int sata_down_spd_limit(struct ata_port *ap)
1c3fae4d 2418{
81952c54
TH
2419 u32 sstatus, spd, mask;
2420 int rc, highbit;
1c3fae4d 2421
81952c54
TH
2422 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
2423 if (rc)
2424 return rc;
1c3fae4d
TH
2425
2426 mask = ap->sata_spd_limit;
2427 if (mask <= 1)
2428 return -EINVAL;
2429 highbit = fls(mask) - 1;
2430 mask &= ~(1 << highbit);
2431
81952c54 2432 spd = (sstatus >> 4) & 0xf;
1c3fae4d
TH
2433 if (spd <= 1)
2434 return -EINVAL;
2435 spd--;
2436 mask &= (1 << spd) - 1;
2437 if (!mask)
2438 return -EINVAL;
2439
2440 ap->sata_spd_limit = mask;
2441
f15a1daf
TH
2442 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
2443 sata_spd_string(fls(mask)));
1c3fae4d
TH
2444
2445 return 0;
2446}
2447
3c567b7d 2448static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
1c3fae4d
TH
2449{
2450 u32 spd, limit;
2451
2452 if (ap->sata_spd_limit == UINT_MAX)
2453 limit = 0;
2454 else
2455 limit = fls(ap->sata_spd_limit);
2456
2457 spd = (*scontrol >> 4) & 0xf;
2458 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2459
2460 return spd != limit;
2461}
2462
2463/**
3c567b7d 2464 * sata_set_spd_needed - is SATA spd configuration needed
1c3fae4d
TH
2465 * @ap: Port in question
2466 *
2467 * Test whether the spd limit in SControl matches
2468 * @ap->sata_spd_limit. This function is used to determine
2469 * whether hardreset is necessary to apply SATA spd
2470 * configuration.
2471 *
2472 * LOCKING:
2473 * Inherited from caller.
2474 *
2475 * RETURNS:
2476 * 1 if SATA spd configuration is needed, 0 otherwise.
2477 */
3c567b7d 2478int sata_set_spd_needed(struct ata_port *ap)
1c3fae4d
TH
2479{
2480 u32 scontrol;
2481
81952c54 2482 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
1c3fae4d
TH
2483 return 0;
2484
3c567b7d 2485 return __sata_set_spd_needed(ap, &scontrol);
1c3fae4d
TH
2486}
2487
2488/**
3c567b7d 2489 * sata_set_spd - set SATA spd according to spd limit
1c3fae4d
TH
2490 * @ap: Port to set SATA spd for
2491 *
2492 * Set SATA spd of @ap according to sata_spd_limit.
2493 *
2494 * LOCKING:
2495 * Inherited from caller.
2496 *
2497 * RETURNS:
2498 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2499 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2500 */
3c567b7d 2501int sata_set_spd(struct ata_port *ap)
1c3fae4d
TH
2502{
2503 u32 scontrol;
81952c54 2504 int rc;
1c3fae4d 2505
81952c54
TH
2506 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2507 return rc;
1c3fae4d 2508
3c567b7d 2509 if (!__sata_set_spd_needed(ap, &scontrol))
1c3fae4d
TH
2510 return 0;
2511
81952c54
TH
2512 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2513 return rc;
2514
1c3fae4d
TH
2515 return 1;
2516}
2517
452503f9
AC
2518/*
2519 * This mode timing computation functionality is ported over from
2520 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2521 */
2522/*
b352e57d 2523 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2524 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2525 * for UDMA6, which is currently supported only by Maxtor drives.
2526 *
2527 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2528 */
2529
2530static const struct ata_timing ata_timing[] = {
2531
2532 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2533 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2534 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2535 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2536
b352e57d
AC
2537 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2538 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
452503f9
AC
2539 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2540 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2541 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2542
2543/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2e9edbf8 2544
452503f9
AC
2545 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2546 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2547 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2e9edbf8 2548
452503f9
AC
2549 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2550 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2551 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2552
b352e57d
AC
2553 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2554 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
452503f9
AC
2555 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2556 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2557
2558 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2559 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2560 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2561
2562/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2563
2564 { 0xFF }
2565};
2566
2567#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2568#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2569
2570static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2571{
2572 q->setup = EZ(t->setup * 1000, T);
2573 q->act8b = EZ(t->act8b * 1000, T);
2574 q->rec8b = EZ(t->rec8b * 1000, T);
2575 q->cyc8b = EZ(t->cyc8b * 1000, T);
2576 q->active = EZ(t->active * 1000, T);
2577 q->recover = EZ(t->recover * 1000, T);
2578 q->cycle = EZ(t->cycle * 1000, T);
2579 q->udma = EZ(t->udma * 1000, UT);
2580}
2581
2582void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2583 struct ata_timing *m, unsigned int what)
2584{
2585 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2586 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2587 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2588 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2589 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2590 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2591 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2592 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2593}
2594
2595static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2596{
2597 const struct ata_timing *t;
2598
2599 for (t = ata_timing; t->mode != speed; t++)
91190758 2600 if (t->mode == 0xFF)
452503f9 2601 return NULL;
2e9edbf8 2602 return t;
452503f9
AC
2603}
2604
2605int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2606 struct ata_timing *t, int T, int UT)
2607{
2608 const struct ata_timing *s;
2609 struct ata_timing p;
2610
2611 /*
2e9edbf8 2612 * Find the mode.
75b1f2f8 2613 */
452503f9
AC
2614
2615 if (!(s = ata_timing_find_mode(speed)))
2616 return -EINVAL;
2617
75b1f2f8
AL
2618 memcpy(t, s, sizeof(*s));
2619
452503f9
AC
2620 /*
2621 * If the drive is an EIDE drive, it can tell us it needs extended
2622 * PIO/MW_DMA cycle timing.
2623 */
2624
2625 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2626 memset(&p, 0, sizeof(p));
2627 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2628 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2629 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2630 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2631 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2632 }
2633 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2634 }
2635
2636 /*
2637 * Convert the timing to bus clock counts.
2638 */
2639
75b1f2f8 2640 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2641
2642 /*
c893a3ae
RD
2643 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2644 * S.M.A.R.T * and some other commands. We have to ensure that the
2645 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2646 */
2647
fd3367af 2648 if (speed > XFER_PIO_6) {
452503f9
AC
2649 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2650 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2651 }
2652
2653 /*
c893a3ae 2654 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2655 */
2656
2657 if (t->act8b + t->rec8b < t->cyc8b) {
2658 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2659 t->rec8b = t->cyc8b - t->act8b;
2660 }
2661
2662 if (t->active + t->recover < t->cycle) {
2663 t->active += (t->cycle - (t->active + t->recover)) / 2;
2664 t->recover = t->cycle - t->active;
2665 }
a617c09f 2666
4f701d1e
AC
2667 /* In a few cases quantisation may produce enough errors to
2668 leave t->cycle too low for the sum of active and recovery
2669 if so we must correct this */
2670 if (t->active + t->recover > t->cycle)
2671 t->cycle = t->active + t->recover;
452503f9
AC
2672
2673 return 0;
2674}
2675
cf176e1a
TH
2676/**
2677 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 2678 * @dev: Device to adjust xfer masks
458337db 2679 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
2680 *
2681 * Adjust xfer masks of @dev downward. Note that this function
2682 * does not apply the change. Invoking ata_set_mode() afterwards
2683 * will apply the limit.
2684 *
2685 * LOCKING:
2686 * Inherited from caller.
2687 *
2688 * RETURNS:
2689 * 0 on success, negative errno on failure
2690 */
458337db 2691int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 2692{
458337db
TH
2693 char buf[32];
2694 unsigned int orig_mask, xfer_mask;
2695 unsigned int pio_mask, mwdma_mask, udma_mask;
2696 int quiet, highbit;
cf176e1a 2697
458337db
TH
2698 quiet = !!(sel & ATA_DNXFER_QUIET);
2699 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 2700
458337db
TH
2701 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2702 dev->mwdma_mask,
2703 dev->udma_mask);
2704 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 2705
458337db
TH
2706 switch (sel) {
2707 case ATA_DNXFER_PIO:
2708 highbit = fls(pio_mask) - 1;
2709 pio_mask &= ~(1 << highbit);
2710 break;
2711
2712 case ATA_DNXFER_DMA:
2713 if (udma_mask) {
2714 highbit = fls(udma_mask) - 1;
2715 udma_mask &= ~(1 << highbit);
2716 if (!udma_mask)
2717 return -ENOENT;
2718 } else if (mwdma_mask) {
2719 highbit = fls(mwdma_mask) - 1;
2720 mwdma_mask &= ~(1 << highbit);
2721 if (!mwdma_mask)
2722 return -ENOENT;
2723 }
2724 break;
2725
2726 case ATA_DNXFER_40C:
2727 udma_mask &= ATA_UDMA_MASK_40C;
2728 break;
2729
2730 case ATA_DNXFER_FORCE_PIO0:
2731 pio_mask &= 1;
2732 case ATA_DNXFER_FORCE_PIO:
2733 mwdma_mask = 0;
2734 udma_mask = 0;
2735 break;
2736
458337db
TH
2737 default:
2738 BUG();
2739 }
2740
2741 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2742
2743 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2744 return -ENOENT;
2745
2746 if (!quiet) {
2747 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2748 snprintf(buf, sizeof(buf), "%s:%s",
2749 ata_mode_string(xfer_mask),
2750 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2751 else
2752 snprintf(buf, sizeof(buf), "%s",
2753 ata_mode_string(xfer_mask));
2754
2755 ata_dev_printk(dev, KERN_WARNING,
2756 "limiting speed to %s\n", buf);
2757 }
cf176e1a
TH
2758
2759 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2760 &dev->udma_mask);
2761
cf176e1a 2762 return 0;
cf176e1a
TH
2763}
2764
3373efd8 2765static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 2766{
baa1e78a 2767 struct ata_eh_context *ehc = &dev->ap->eh_context;
83206a29
TH
2768 unsigned int err_mask;
2769 int rc;
1da177e4 2770
e8384607 2771 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
2772 if (dev->xfer_shift == ATA_SHIFT_PIO)
2773 dev->flags |= ATA_DFLAG_PIO;
2774
3373efd8 2775 err_mask = ata_dev_set_xfermode(dev);
11750a40
AC
2776 /* Old CFA may refuse this command, which is just fine */
2777 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2778 err_mask &= ~AC_ERR_DEV;
2779
83206a29 2780 if (err_mask) {
f15a1daf
TH
2781 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2782 "(err_mask=0x%x)\n", err_mask);
83206a29
TH
2783 return -EIO;
2784 }
1da177e4 2785
baa1e78a 2786 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3373efd8 2787 rc = ata_dev_revalidate(dev, 0);
baa1e78a 2788 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
5eb45c02 2789 if (rc)
83206a29 2790 return rc;
48a8a14f 2791
23e71c3d
TH
2792 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2793 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 2794
f15a1daf
TH
2795 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2796 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
83206a29 2797 return 0;
1da177e4
LT
2798}
2799
1da177e4 2800/**
04351821 2801 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
1da177e4 2802 * @ap: port on which timings will be programmed
e82cbdb9 2803 * @r_failed_dev: out paramter for failed device
1da177e4 2804 *
04351821
AC
2805 * Standard implementation of the function used to tune and set
2806 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2807 * ata_dev_set_mode() fails, pointer to the failing device is
e82cbdb9 2808 * returned in @r_failed_dev.
780a87f7 2809 *
1da177e4 2810 * LOCKING:
0cba632b 2811 * PCI/etc. bus probe sem.
e82cbdb9
TH
2812 *
2813 * RETURNS:
2814 * 0 on success, negative errno otherwise
1da177e4 2815 */
04351821
AC
2816
2817int ata_do_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
1da177e4 2818{
e8e0619f 2819 struct ata_device *dev;
e82cbdb9 2820 int i, rc = 0, used_dma = 0, found = 0;
1da177e4 2821
3adcebb2 2822
a6d5a51c
TH
2823 /* step 1: calculate xfer_mask */
2824 for (i = 0; i < ATA_MAX_DEVICES; i++) {
acf356b1 2825 unsigned int pio_mask, dma_mask;
a6d5a51c 2826
e8e0619f
TH
2827 dev = &ap->device[i];
2828
e1211e3f 2829 if (!ata_dev_enabled(dev))
a6d5a51c
TH
2830 continue;
2831
3373efd8 2832 ata_dev_xfermask(dev);
1da177e4 2833
acf356b1
TH
2834 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2835 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2836 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2837 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 2838
4f65977d 2839 found = 1;
5444a6f4
AC
2840 if (dev->dma_mode)
2841 used_dma = 1;
a6d5a51c 2842 }
4f65977d 2843 if (!found)
e82cbdb9 2844 goto out;
a6d5a51c
TH
2845
2846 /* step 2: always set host PIO timings */
e8e0619f
TH
2847 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2848 dev = &ap->device[i];
2849 if (!ata_dev_enabled(dev))
2850 continue;
2851
2852 if (!dev->pio_mode) {
f15a1daf 2853 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 2854 rc = -EINVAL;
e82cbdb9 2855 goto out;
e8e0619f
TH
2856 }
2857
2858 dev->xfer_mode = dev->pio_mode;
2859 dev->xfer_shift = ATA_SHIFT_PIO;
2860 if (ap->ops->set_piomode)
2861 ap->ops->set_piomode(ap, dev);
2862 }
1da177e4 2863
a6d5a51c 2864 /* step 3: set host DMA timings */
e8e0619f
TH
2865 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2866 dev = &ap->device[i];
2867
2868 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2869 continue;
2870
2871 dev->xfer_mode = dev->dma_mode;
2872 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2873 if (ap->ops->set_dmamode)
2874 ap->ops->set_dmamode(ap, dev);
2875 }
1da177e4
LT
2876
2877 /* step 4: update devices' xfer mode */
83206a29 2878 for (i = 0; i < ATA_MAX_DEVICES; i++) {
e8e0619f 2879 dev = &ap->device[i];
1da177e4 2880
18d90deb 2881 /* don't update suspended devices' xfer mode */
9666f400 2882 if (!ata_dev_enabled(dev))
83206a29
TH
2883 continue;
2884
3373efd8 2885 rc = ata_dev_set_mode(dev);
5bbc53f4 2886 if (rc)
e82cbdb9 2887 goto out;
83206a29 2888 }
1da177e4 2889
e8e0619f
TH
2890 /* Record simplex status. If we selected DMA then the other
2891 * host channels are not permitted to do so.
5444a6f4 2892 */
cca3974e 2893 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 2894 ap->host->simplex_claimed = ap;
5444a6f4 2895
e82cbdb9
TH
2896 out:
2897 if (rc)
2898 *r_failed_dev = dev;
2899 return rc;
1da177e4
LT
2900}
2901
04351821
AC
2902/**
2903 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2904 * @ap: port on which timings will be programmed
2905 * @r_failed_dev: out paramter for failed device
2906 *
2907 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2908 * ata_set_mode() fails, pointer to the failing device is
2909 * returned in @r_failed_dev.
2910 *
2911 * LOCKING:
2912 * PCI/etc. bus probe sem.
2913 *
2914 * RETURNS:
2915 * 0 on success, negative errno otherwise
2916 */
2917int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2918{
2919 /* has private set_mode? */
2920 if (ap->ops->set_mode)
2921 return ap->ops->set_mode(ap, r_failed_dev);
2922 return ata_do_set_mode(ap, r_failed_dev);
2923}
2924
1fdffbce
JG
2925/**
2926 * ata_tf_to_host - issue ATA taskfile to host controller
2927 * @ap: port to which command is being issued
2928 * @tf: ATA taskfile register set
2929 *
2930 * Issues ATA taskfile register set to ATA host controller,
2931 * with proper synchronization with interrupt handler and
2932 * other threads.
2933 *
2934 * LOCKING:
cca3974e 2935 * spin_lock_irqsave(host lock)
1fdffbce
JG
2936 */
2937
2938static inline void ata_tf_to_host(struct ata_port *ap,
2939 const struct ata_taskfile *tf)
2940{
2941 ap->ops->tf_load(ap, tf);
2942 ap->ops->exec_command(ap, tf);
2943}
2944
1da177e4
LT
2945/**
2946 * ata_busy_sleep - sleep until BSY clears, or timeout
2947 * @ap: port containing status register to be polled
2948 * @tmout_pat: impatience timeout
2949 * @tmout: overall timeout
2950 *
780a87f7
JG
2951 * Sleep until ATA Status register bit BSY clears,
2952 * or a timeout occurs.
2953 *
d1adc1bb
TH
2954 * LOCKING:
2955 * Kernel thread context (may sleep).
2956 *
2957 * RETURNS:
2958 * 0 on success, -errno otherwise.
1da177e4 2959 */
d1adc1bb
TH
2960int ata_busy_sleep(struct ata_port *ap,
2961 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
2962{
2963 unsigned long timer_start, timeout;
2964 u8 status;
2965
2966 status = ata_busy_wait(ap, ATA_BUSY, 300);
2967 timer_start = jiffies;
2968 timeout = timer_start + tmout_pat;
d1adc1bb
TH
2969 while (status != 0xff && (status & ATA_BUSY) &&
2970 time_before(jiffies, timeout)) {
1da177e4
LT
2971 msleep(50);
2972 status = ata_busy_wait(ap, ATA_BUSY, 3);
2973 }
2974
d1adc1bb 2975 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 2976 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
2977 "port is slow to respond, please be patient "
2978 "(Status 0x%x)\n", status);
1da177e4
LT
2979
2980 timeout = timer_start + tmout;
d1adc1bb
TH
2981 while (status != 0xff && (status & ATA_BUSY) &&
2982 time_before(jiffies, timeout)) {
1da177e4
LT
2983 msleep(50);
2984 status = ata_chk_status(ap);
2985 }
2986
d1adc1bb
TH
2987 if (status == 0xff)
2988 return -ENODEV;
2989
1da177e4 2990 if (status & ATA_BUSY) {
f15a1daf 2991 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
2992 "(%lu secs, Status 0x%x)\n",
2993 tmout / HZ, status);
d1adc1bb 2994 return -EBUSY;
1da177e4
LT
2995 }
2996
2997 return 0;
2998}
2999
d4b2bab4
TH
3000/**
3001 * ata_wait_ready - sleep until BSY clears, or timeout
3002 * @ap: port containing status register to be polled
3003 * @deadline: deadline jiffies for the operation
3004 *
3005 * Sleep until ATA Status register bit BSY clears, or timeout
3006 * occurs.
3007 *
3008 * LOCKING:
3009 * Kernel thread context (may sleep).
3010 *
3011 * RETURNS:
3012 * 0 on success, -errno otherwise.
3013 */
3014int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3015{
3016 unsigned long start = jiffies;
3017 int warned = 0;
3018
3019 while (1) {
3020 u8 status = ata_chk_status(ap);
3021 unsigned long now = jiffies;
3022
3023 if (!(status & ATA_BUSY))
3024 return 0;
fd7fe701 3025 if (!ata_port_online(ap) && status == 0xff)
d4b2bab4
TH
3026 return -ENODEV;
3027 if (time_after(now, deadline))
3028 return -EBUSY;
3029
3030 if (!warned && time_after(now, start + 5 * HZ) &&
3031 (deadline - now > 3 * HZ)) {
3032 ata_port_printk(ap, KERN_WARNING,
3033 "port is slow to respond, please be patient "
3034 "(Status 0x%x)\n", status);
3035 warned = 1;
3036 }
3037
3038 msleep(50);
3039 }
3040}
3041
3042static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3043 unsigned long deadline)
1da177e4
LT
3044{
3045 struct ata_ioports *ioaddr = &ap->ioaddr;
3046 unsigned int dev0 = devmask & (1 << 0);
3047 unsigned int dev1 = devmask & (1 << 1);
9b89391c 3048 int rc, ret = 0;
1da177e4
LT
3049
3050 /* if device 0 was found in ata_devchk, wait for its
3051 * BSY bit to clear
3052 */
d4b2bab4
TH
3053 if (dev0) {
3054 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3055 if (rc) {
3056 if (rc != -ENODEV)
3057 return rc;
3058 ret = rc;
3059 }
d4b2bab4 3060 }
1da177e4
LT
3061
3062 /* if device 1 was found in ata_devchk, wait for
3063 * register access, then wait for BSY to clear
3064 */
1da177e4
LT
3065 while (dev1) {
3066 u8 nsect, lbal;
3067
3068 ap->ops->dev_select(ap, 1);
0d5ff566
TH
3069 nsect = ioread8(ioaddr->nsect_addr);
3070 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
3071 if ((nsect == 1) && (lbal == 1))
3072 break;
d4b2bab4
TH
3073 if (time_after(jiffies, deadline))
3074 return -EBUSY;
1da177e4
LT
3075 msleep(50); /* give drive a breather */
3076 }
d4b2bab4
TH
3077 if (dev1) {
3078 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3079 if (rc) {
3080 if (rc != -ENODEV)
3081 return rc;
3082 ret = rc;
3083 }
d4b2bab4 3084 }
1da177e4
LT
3085
3086 /* is all this really necessary? */
3087 ap->ops->dev_select(ap, 0);
3088 if (dev1)
3089 ap->ops->dev_select(ap, 1);
3090 if (dev0)
3091 ap->ops->dev_select(ap, 0);
d4b2bab4 3092
9b89391c 3093 return ret;
1da177e4
LT
3094}
3095
d4b2bab4
TH
3096static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3097 unsigned long deadline)
1da177e4
LT
3098{
3099 struct ata_ioports *ioaddr = &ap->ioaddr;
3100
44877b4e 3101 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
1da177e4
LT
3102
3103 /* software reset. causes dev0 to be selected */
0d5ff566
TH
3104 iowrite8(ap->ctl, ioaddr->ctl_addr);
3105 udelay(20); /* FIXME: flush */
3106 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3107 udelay(20); /* FIXME: flush */
3108 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3109
3110 /* spec mandates ">= 2ms" before checking status.
3111 * We wait 150ms, because that was the magic delay used for
3112 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
3113 * between when the ATA command register is written, and then
3114 * status is checked. Because waiting for "a while" before
3115 * checking status is fine, post SRST, we perform this magic
3116 * delay here as well.
09c7ad79
AC
3117 *
3118 * Old drivers/ide uses the 2mS rule and then waits for ready
1da177e4
LT
3119 */
3120 msleep(150);
3121
2e9edbf8 3122 /* Before we perform post reset processing we want to see if
298a41ca
TH
3123 * the bus shows 0xFF because the odd clown forgets the D7
3124 * pulldown resistor.
3125 */
d1adc1bb 3126 if (ata_check_status(ap) == 0xFF)
9b89391c 3127 return -ENODEV;
09c7ad79 3128
d4b2bab4 3129 return ata_bus_post_reset(ap, devmask, deadline);
1da177e4
LT
3130}
3131
3132/**
3133 * ata_bus_reset - reset host port and associated ATA channel
3134 * @ap: port to reset
3135 *
3136 * This is typically the first time we actually start issuing
3137 * commands to the ATA channel. We wait for BSY to clear, then
3138 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3139 * result. Determine what devices, if any, are on the channel
3140 * by looking at the device 0/1 error register. Look at the signature
3141 * stored in each device's taskfile registers, to determine if
3142 * the device is ATA or ATAPI.
3143 *
3144 * LOCKING:
0cba632b 3145 * PCI/etc. bus probe sem.
cca3974e 3146 * Obtains host lock.
1da177e4
LT
3147 *
3148 * SIDE EFFECTS:
198e0fed 3149 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
3150 */
3151
3152void ata_bus_reset(struct ata_port *ap)
3153{
3154 struct ata_ioports *ioaddr = &ap->ioaddr;
3155 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3156 u8 err;
aec5c3c1 3157 unsigned int dev0, dev1 = 0, devmask = 0;
9b89391c 3158 int rc;
1da177e4 3159
44877b4e 3160 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
1da177e4
LT
3161
3162 /* determine if device 0/1 are present */
3163 if (ap->flags & ATA_FLAG_SATA_RESET)
3164 dev0 = 1;
3165 else {
3166 dev0 = ata_devchk(ap, 0);
3167 if (slave_possible)
3168 dev1 = ata_devchk(ap, 1);
3169 }
3170
3171 if (dev0)
3172 devmask |= (1 << 0);
3173 if (dev1)
3174 devmask |= (1 << 1);
3175
3176 /* select device 0 again */
3177 ap->ops->dev_select(ap, 0);
3178
3179 /* issue bus reset */
9b89391c
TH
3180 if (ap->flags & ATA_FLAG_SRST) {
3181 rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
3182 if (rc && rc != -ENODEV)
aec5c3c1 3183 goto err_out;
9b89391c 3184 }
1da177e4
LT
3185
3186 /*
3187 * determine by signature whether we have ATA or ATAPI devices
3188 */
b4dc7623 3189 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
1da177e4 3190 if ((slave_possible) && (err != 0x81))
b4dc7623 3191 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
1da177e4
LT
3192
3193 /* re-enable interrupts */
83625006 3194 ap->ops->irq_on(ap);
1da177e4
LT
3195
3196 /* is double-select really necessary? */
3197 if (ap->device[1].class != ATA_DEV_NONE)
3198 ap->ops->dev_select(ap, 1);
3199 if (ap->device[0].class != ATA_DEV_NONE)
3200 ap->ops->dev_select(ap, 0);
3201
3202 /* if no devices were detected, disable this port */
3203 if ((ap->device[0].class == ATA_DEV_NONE) &&
3204 (ap->device[1].class == ATA_DEV_NONE))
3205 goto err_out;
3206
3207 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3208 /* set up device control for ATA_FLAG_SATA_RESET */
0d5ff566 3209 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3210 }
3211
3212 DPRINTK("EXIT\n");
3213 return;
3214
3215err_out:
f15a1daf 3216 ata_port_printk(ap, KERN_ERR, "disabling port\n");
1da177e4
LT
3217 ap->ops->port_disable(ap);
3218
3219 DPRINTK("EXIT\n");
3220}
3221
d7bb4cc7
TH
3222/**
3223 * sata_phy_debounce - debounce SATA phy status
3224 * @ap: ATA port to debounce SATA phy status for
3225 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3226 * @deadline: deadline jiffies for the operation
d7bb4cc7
TH
3227 *
3228 * Make sure SStatus of @ap reaches stable state, determined by
3229 * holding the same value where DET is not 1 for @duration polled
3230 * every @interval, before @timeout. Timeout constraints the
d4b2bab4
TH
3231 * beginning of the stable state. Because DET gets stuck at 1 on
3232 * some controllers after hot unplugging, this functions waits
d7bb4cc7
TH
3233 * until timeout then returns 0 if DET is stable at 1.
3234 *
d4b2bab4
TH
3235 * @timeout is further limited by @deadline. The sooner of the
3236 * two is used.
3237 *
d7bb4cc7
TH
3238 * LOCKING:
3239 * Kernel thread context (may sleep)
3240 *
3241 * RETURNS:
3242 * 0 on success, -errno on failure.
3243 */
d4b2bab4
TH
3244int sata_phy_debounce(struct ata_port *ap, const unsigned long *params,
3245 unsigned long deadline)
7a7921e8 3246{
d7bb4cc7 3247 unsigned long interval_msec = params[0];
d4b2bab4
TH
3248 unsigned long duration = msecs_to_jiffies(params[1]);
3249 unsigned long last_jiffies, t;
d7bb4cc7
TH
3250 u32 last, cur;
3251 int rc;
3252
d4b2bab4
TH
3253 t = jiffies + msecs_to_jiffies(params[2]);
3254 if (time_before(t, deadline))
3255 deadline = t;
3256
d7bb4cc7
TH
3257 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
3258 return rc;
3259 cur &= 0xf;
3260
3261 last = cur;
3262 last_jiffies = jiffies;
3263
3264 while (1) {
3265 msleep(interval_msec);
3266 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
3267 return rc;
3268 cur &= 0xf;
3269
3270 /* DET stable? */
3271 if (cur == last) {
d4b2bab4 3272 if (cur == 1 && time_before(jiffies, deadline))
d7bb4cc7
TH
3273 continue;
3274 if (time_after(jiffies, last_jiffies + duration))
3275 return 0;
3276 continue;
3277 }
3278
3279 /* unstable, start over */
3280 last = cur;
3281 last_jiffies = jiffies;
3282
d4b2bab4
TH
3283 /* check deadline */
3284 if (time_after(jiffies, deadline))
d7bb4cc7
TH
3285 return -EBUSY;
3286 }
3287}
3288
3289/**
3290 * sata_phy_resume - resume SATA phy
3291 * @ap: ATA port to resume SATA phy for
3292 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3293 * @deadline: deadline jiffies for the operation
d7bb4cc7
TH
3294 *
3295 * Resume SATA phy of @ap and debounce it.
3296 *
3297 * LOCKING:
3298 * Kernel thread context (may sleep)
3299 *
3300 * RETURNS:
3301 * 0 on success, -errno on failure.
3302 */
d4b2bab4
TH
3303int sata_phy_resume(struct ata_port *ap, const unsigned long *params,
3304 unsigned long deadline)
d7bb4cc7
TH
3305{
3306 u32 scontrol;
81952c54
TH
3307 int rc;
3308
3309 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
3310 return rc;
7a7921e8 3311
852ee16a 3312 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54
TH
3313
3314 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
3315 return rc;
7a7921e8 3316
d7bb4cc7
TH
3317 /* Some PHYs react badly if SStatus is pounded immediately
3318 * after resuming. Delay 200ms before debouncing.
3319 */
3320 msleep(200);
7a7921e8 3321
d4b2bab4 3322 return sata_phy_debounce(ap, params, deadline);
7a7921e8
TH
3323}
3324
f5914a46
TH
3325/**
3326 * ata_std_prereset - prepare for reset
3327 * @ap: ATA port to be reset
d4b2bab4 3328 * @deadline: deadline jiffies for the operation
f5914a46 3329 *
b8cffc6a
TH
3330 * @ap is about to be reset. Initialize it. Failure from
3331 * prereset makes libata abort whole reset sequence and give up
3332 * that port, so prereset should be best-effort. It does its
3333 * best to prepare for reset sequence but if things go wrong, it
3334 * should just whine, not fail.
f5914a46
TH
3335 *
3336 * LOCKING:
3337 * Kernel thread context (may sleep)
3338 *
3339 * RETURNS:
3340 * 0 on success, -errno otherwise.
3341 */
d4b2bab4 3342int ata_std_prereset(struct ata_port *ap, unsigned long deadline)
f5914a46
TH
3343{
3344 struct ata_eh_context *ehc = &ap->eh_context;
e9c83914 3345 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
3346 int rc;
3347
31daabda 3348 /* handle link resume */
28324304
TH
3349 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
3350 (ap->flags & ATA_FLAG_HRST_TO_RESUME))
3351 ehc->i.action |= ATA_EH_HARDRESET;
3352
f5914a46
TH
3353 /* if we're about to do hardreset, nothing more to do */
3354 if (ehc->i.action & ATA_EH_HARDRESET)
3355 return 0;
3356
3357 /* if SATA, resume phy */
3358 if (ap->cbl == ATA_CBL_SATA) {
d4b2bab4 3359 rc = sata_phy_resume(ap, timing, deadline);
b8cffc6a
TH
3360 /* whine about phy resume failure but proceed */
3361 if (rc && rc != -EOPNOTSUPP)
f5914a46
TH
3362 ata_port_printk(ap, KERN_WARNING, "failed to resume "
3363 "link for reset (errno=%d)\n", rc);
f5914a46
TH
3364 }
3365
3366 /* Wait for !BSY if the controller can wait for the first D2H
3367 * Reg FIS and we don't know that no device is attached.
3368 */
b8cffc6a
TH
3369 if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap)) {
3370 rc = ata_wait_ready(ap, deadline);
6dffaf61 3371 if (rc && rc != -ENODEV) {
b8cffc6a
TH
3372 ata_port_printk(ap, KERN_WARNING, "device not ready "
3373 "(errno=%d), forcing hardreset\n", rc);
3374 ehc->i.action |= ATA_EH_HARDRESET;
3375 }
3376 }
f5914a46
TH
3377
3378 return 0;
3379}
3380
c2bd5804
TH
3381/**
3382 * ata_std_softreset - reset host port via ATA SRST
3383 * @ap: port to reset
c2bd5804 3384 * @classes: resulting classes of attached devices
d4b2bab4 3385 * @deadline: deadline jiffies for the operation
c2bd5804 3386 *
52783c5d 3387 * Reset host port using ATA SRST.
c2bd5804
TH
3388 *
3389 * LOCKING:
3390 * Kernel thread context (may sleep)
3391 *
3392 * RETURNS:
3393 * 0 on success, -errno otherwise.
3394 */
d4b2bab4
TH
3395int ata_std_softreset(struct ata_port *ap, unsigned int *classes,
3396 unsigned long deadline)
c2bd5804
TH
3397{
3398 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
d4b2bab4
TH
3399 unsigned int devmask = 0;
3400 int rc;
c2bd5804
TH
3401 u8 err;
3402
3403 DPRINTK("ENTER\n");
3404
81952c54 3405 if (ata_port_offline(ap)) {
3a39746a
TH
3406 classes[0] = ATA_DEV_NONE;
3407 goto out;
3408 }
3409
c2bd5804
TH
3410 /* determine if device 0/1 are present */
3411 if (ata_devchk(ap, 0))
3412 devmask |= (1 << 0);
3413 if (slave_possible && ata_devchk(ap, 1))
3414 devmask |= (1 << 1);
3415
c2bd5804
TH
3416 /* select device 0 again */
3417 ap->ops->dev_select(ap, 0);
3418
3419 /* issue bus reset */
3420 DPRINTK("about to softreset, devmask=%x\n", devmask);
d4b2bab4 3421 rc = ata_bus_softreset(ap, devmask, deadline);
9b89391c
TH
3422 /* if link is occupied, -ENODEV too is an error */
3423 if (rc && (rc != -ENODEV || sata_scr_valid(ap))) {
d4b2bab4
TH
3424 ata_port_printk(ap, KERN_ERR, "SRST failed (errno=%d)\n", rc);
3425 return rc;
c2bd5804
TH
3426 }
3427
3428 /* determine by signature whether we have ATA or ATAPI devices */
3429 classes[0] = ata_dev_try_classify(ap, 0, &err);
3430 if (slave_possible && err != 0x81)
3431 classes[1] = ata_dev_try_classify(ap, 1, &err);
3432
3a39746a 3433 out:
c2bd5804
TH
3434 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3435 return 0;
3436}
3437
3438/**
b6103f6d 3439 * sata_port_hardreset - reset port via SATA phy reset
c2bd5804 3440 * @ap: port to reset
b6103f6d 3441 * @timing: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3442 * @deadline: deadline jiffies for the operation
c2bd5804
TH
3443 *
3444 * SATA phy-reset host port using DET bits of SControl register.
c2bd5804
TH
3445 *
3446 * LOCKING:
3447 * Kernel thread context (may sleep)
3448 *
3449 * RETURNS:
3450 * 0 on success, -errno otherwise.
3451 */
d4b2bab4
TH
3452int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing,
3453 unsigned long deadline)
c2bd5804 3454{
852ee16a 3455 u32 scontrol;
81952c54 3456 int rc;
852ee16a 3457
c2bd5804
TH
3458 DPRINTK("ENTER\n");
3459
3c567b7d 3460 if (sata_set_spd_needed(ap)) {
1c3fae4d
TH
3461 /* SATA spec says nothing about how to reconfigure
3462 * spd. To be on the safe side, turn off phy during
3463 * reconfiguration. This works for at least ICH7 AHCI
3464 * and Sil3124.
3465 */
81952c54 3466 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
b6103f6d 3467 goto out;
81952c54 3468
a34b6fc0 3469 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54
TH
3470
3471 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
b6103f6d 3472 goto out;
1c3fae4d 3473
3c567b7d 3474 sata_set_spd(ap);
1c3fae4d
TH
3475 }
3476
3477 /* issue phy wake/reset */
81952c54 3478 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
b6103f6d 3479 goto out;
81952c54 3480
852ee16a 3481 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54
TH
3482
3483 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
b6103f6d 3484 goto out;
c2bd5804 3485
1c3fae4d 3486 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
3487 * 10.4.2 says at least 1 ms.
3488 */
3489 msleep(1);
3490
1c3fae4d 3491 /* bring phy back */
d4b2bab4 3492 rc = sata_phy_resume(ap, timing, deadline);
b6103f6d
TH
3493 out:
3494 DPRINTK("EXIT, rc=%d\n", rc);
3495 return rc;
3496}
3497
3498/**
3499 * sata_std_hardreset - reset host port via SATA phy reset
3500 * @ap: port to reset
3501 * @class: resulting class of attached device
d4b2bab4 3502 * @deadline: deadline jiffies for the operation
b6103f6d
TH
3503 *
3504 * SATA phy-reset host port using DET bits of SControl register,
3505 * wait for !BSY and classify the attached device.
3506 *
3507 * LOCKING:
3508 * Kernel thread context (may sleep)
3509 *
3510 * RETURNS:
3511 * 0 on success, -errno otherwise.
3512 */
d4b2bab4
TH
3513int sata_std_hardreset(struct ata_port *ap, unsigned int *class,
3514 unsigned long deadline)
b6103f6d
TH
3515{
3516 const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context);
3517 int rc;
3518
3519 DPRINTK("ENTER\n");
3520
3521 /* do hardreset */
d4b2bab4 3522 rc = sata_port_hardreset(ap, timing, deadline);
b6103f6d
TH
3523 if (rc) {
3524 ata_port_printk(ap, KERN_ERR,
3525 "COMRESET failed (errno=%d)\n", rc);
3526 return rc;
3527 }
c2bd5804 3528
c2bd5804 3529 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 3530 if (ata_port_offline(ap)) {
c2bd5804
TH
3531 *class = ATA_DEV_NONE;
3532 DPRINTK("EXIT, link offline\n");
3533 return 0;
3534 }
3535
34fee227
TH
3536 /* wait a while before checking status, see SRST for more info */
3537 msleep(150);
3538
d4b2bab4 3539 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3540 /* link occupied, -ENODEV too is an error */
3541 if (rc) {
f15a1daf 3542 ata_port_printk(ap, KERN_ERR,
d4b2bab4
TH
3543 "COMRESET failed (errno=%d)\n", rc);
3544 return rc;
c2bd5804
TH
3545 }
3546
3a39746a
TH
3547 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3548
c2bd5804
TH
3549 *class = ata_dev_try_classify(ap, 0, NULL);
3550
3551 DPRINTK("EXIT, class=%u\n", *class);
3552 return 0;
3553}
3554
3555/**
3556 * ata_std_postreset - standard postreset callback
3557 * @ap: the target ata_port
3558 * @classes: classes of attached devices
3559 *
3560 * This function is invoked after a successful reset. Note that
3561 * the device might have been reset more than once using
3562 * different reset methods before postreset is invoked.
c2bd5804 3563 *
c2bd5804
TH
3564 * LOCKING:
3565 * Kernel thread context (may sleep)
3566 */
3567void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
3568{
dc2b3515
TH
3569 u32 serror;
3570
c2bd5804
TH
3571 DPRINTK("ENTER\n");
3572
c2bd5804 3573 /* print link status */
81952c54 3574 sata_print_link_status(ap);
c2bd5804 3575
dc2b3515
TH
3576 /* clear SError */
3577 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
3578 sata_scr_write(ap, SCR_ERROR, serror);
3579
3a39746a 3580 /* re-enable interrupts */
83625006
AI
3581 if (!ap->ops->error_handler)
3582 ap->ops->irq_on(ap);
c2bd5804
TH
3583
3584 /* is double-select really necessary? */
3585 if (classes[0] != ATA_DEV_NONE)
3586 ap->ops->dev_select(ap, 1);
3587 if (classes[1] != ATA_DEV_NONE)
3588 ap->ops->dev_select(ap, 0);
3589
3a39746a
TH
3590 /* bail out if no device is present */
3591 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3592 DPRINTK("EXIT, no device\n");
3593 return;
3594 }
3595
3596 /* set up device control */
0d5ff566
TH
3597 if (ap->ioaddr.ctl_addr)
3598 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
c2bd5804
TH
3599
3600 DPRINTK("EXIT\n");
3601}
3602
623a3128
TH
3603/**
3604 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3605 * @dev: device to compare against
3606 * @new_class: class of the new device
3607 * @new_id: IDENTIFY page of the new device
3608 *
3609 * Compare @new_class and @new_id against @dev and determine
3610 * whether @dev is the device indicated by @new_class and
3611 * @new_id.
3612 *
3613 * LOCKING:
3614 * None.
3615 *
3616 * RETURNS:
3617 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3618 */
3373efd8
TH
3619static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3620 const u16 *new_id)
623a3128
TH
3621{
3622 const u16 *old_id = dev->id;
a0cf733b
TH
3623 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3624 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
3625
3626 if (dev->class != new_class) {
f15a1daf
TH
3627 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3628 dev->class, new_class);
623a3128
TH
3629 return 0;
3630 }
3631
a0cf733b
TH
3632 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3633 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3634 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3635 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
3636
3637 if (strcmp(model[0], model[1])) {
f15a1daf
TH
3638 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3639 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
3640 return 0;
3641 }
3642
3643 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
3644 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3645 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
3646 return 0;
3647 }
3648
623a3128
TH
3649 return 1;
3650}
3651
3652/**
fe30911b
TH
3653 * ata_dev_reread_id - Re-read IDENTIFY data
3654 * @adev: target ATA device
bff04647 3655 * @readid_flags: read ID flags
623a3128
TH
3656 *
3657 * Re-read IDENTIFY page and make sure @dev is still attached to
3658 * the port.
3659 *
3660 * LOCKING:
3661 * Kernel thread context (may sleep)
3662 *
3663 * RETURNS:
3664 * 0 on success, negative errno otherwise
3665 */
fe30911b 3666int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
623a3128 3667{
5eb45c02 3668 unsigned int class = dev->class;
f15a1daf 3669 u16 *id = (void *)dev->ap->sector_buf;
623a3128
TH
3670 int rc;
3671
fe635c7e 3672 /* read ID data */
bff04647 3673 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128 3674 if (rc)
fe30911b 3675 return rc;
623a3128
TH
3676
3677 /* is the device still there? */
fe30911b
TH
3678 if (!ata_dev_same_device(dev, class, id))
3679 return -ENODEV;
623a3128 3680
fe635c7e 3681 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
fe30911b
TH
3682 return 0;
3683}
3684
3685/**
3686 * ata_dev_revalidate - Revalidate ATA device
3687 * @dev: device to revalidate
3688 * @readid_flags: read ID flags
3689 *
3690 * Re-read IDENTIFY page, make sure @dev is still attached to the
3691 * port and reconfigure it according to the new IDENTIFY page.
3692 *
3693 * LOCKING:
3694 * Kernel thread context (may sleep)
3695 *
3696 * RETURNS:
3697 * 0 on success, negative errno otherwise
3698 */
3699int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
3700{
6ddcd3b0 3701 u64 n_sectors = dev->n_sectors;
fe30911b
TH
3702 int rc;
3703
3704 if (!ata_dev_enabled(dev))
3705 return -ENODEV;
3706
3707 /* re-read ID */
3708 rc = ata_dev_reread_id(dev, readid_flags);
3709 if (rc)
3710 goto fail;
623a3128
TH
3711
3712 /* configure device according to the new ID */
efdaedc4 3713 rc = ata_dev_configure(dev);
6ddcd3b0
TH
3714 if (rc)
3715 goto fail;
3716
3717 /* verify n_sectors hasn't changed */
3718 if (dev->class == ATA_DEV_ATA && dev->n_sectors != n_sectors) {
3719 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3720 "%llu != %llu\n",
3721 (unsigned long long)n_sectors,
3722 (unsigned long long)dev->n_sectors);
3723 rc = -ENODEV;
3724 goto fail;
3725 }
3726
3727 return 0;
623a3128
TH
3728
3729 fail:
f15a1daf 3730 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
3731 return rc;
3732}
3733
6919a0a6
AC
3734struct ata_blacklist_entry {
3735 const char *model_num;
3736 const char *model_rev;
3737 unsigned long horkage;
3738};
3739
3740static const struct ata_blacklist_entry ata_device_blacklist [] = {
3741 /* Devices with DMA related problems under Linux */
3742 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3743 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3744 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3745 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3746 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3747 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3748 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3749 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3750 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3751 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3752 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3753 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3754 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3755 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3756 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3757 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3758 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3759 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3760 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3761 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3762 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3763 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3764 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3765 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3766 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3767 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
3768 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3769 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3770 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
39f19886 3771 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
6919a0a6 3772
18d6e9d5 3773 /* Weird ATAPI devices */
6f23a31d
AL
3774 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 |
3775 ATA_HORKAGE_DMA_RW_ONLY },
18d6e9d5 3776
6919a0a6
AC
3777 /* Devices we expect to fail diagnostics */
3778
3779 /* Devices where NCQ should be avoided */
3780 /* NCQ is slow */
3781 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
09125ea6
TH
3782 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3783 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30
PR
3784 /* NCQ is broken */
3785 { "Maxtor 6L250S0", "BANC1G10", ATA_HORKAGE_NONCQ },
471e44b2 3786 { "Maxtor 6B200M0", "BANC1B10", ATA_HORKAGE_NONCQ },
96442925
JA
3787 /* NCQ hard hangs device under heavier load, needs hard power cycle */
3788 { "Maxtor 6B250S0", "BANC1B70", ATA_HORKAGE_NONCQ },
36e337d0
RH
3789 /* Blacklist entries taken from Silicon Image 3124/3132
3790 Windows driver .inf file - also several Linux problem reports */
3791 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
3792 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
3793 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
6919a0a6
AC
3794
3795 /* Devices with NCQ limits */
3796
3797 /* End Marker */
3798 { }
1da177e4 3799};
2e9edbf8 3800
6919a0a6 3801unsigned long ata_device_blacklisted(const struct ata_device *dev)
1da177e4 3802{
8bfa79fc
TH
3803 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3804 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 3805 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 3806
8bfa79fc
TH
3807 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3808 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 3809
6919a0a6 3810 while (ad->model_num) {
8bfa79fc 3811 if (!strcmp(ad->model_num, model_num)) {
6919a0a6
AC
3812 if (ad->model_rev == NULL)
3813 return ad->horkage;
8bfa79fc 3814 if (!strcmp(ad->model_rev, model_rev))
6919a0a6 3815 return ad->horkage;
f4b15fef 3816 }
6919a0a6 3817 ad++;
f4b15fef 3818 }
1da177e4
LT
3819 return 0;
3820}
3821
6919a0a6
AC
3822static int ata_dma_blacklisted(const struct ata_device *dev)
3823{
3824 /* We don't support polling DMA.
3825 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3826 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3827 */
3828 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
3829 (dev->flags & ATA_DFLAG_CDB_INTR))
3830 return 1;
3831 return (ata_device_blacklisted(dev) & ATA_HORKAGE_NODMA) ? 1 : 0;
3832}
3833
a6d5a51c
TH
3834/**
3835 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
3836 * @dev: Device to compute xfermask for
3837 *
acf356b1
TH
3838 * Compute supported xfermask of @dev and store it in
3839 * dev->*_mask. This function is responsible for applying all
3840 * known limits including host controller limits, device
3841 * blacklist, etc...
a6d5a51c
TH
3842 *
3843 * LOCKING:
3844 * None.
a6d5a51c 3845 */
3373efd8 3846static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 3847{
3373efd8 3848 struct ata_port *ap = dev->ap;
cca3974e 3849 struct ata_host *host = ap->host;
a6d5a51c 3850 unsigned long xfer_mask;
1da177e4 3851
37deecb5 3852 /* controller modes available */
565083e1
TH
3853 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3854 ap->mwdma_mask, ap->udma_mask);
3855
8343f889 3856 /* drive modes available */
37deecb5
TH
3857 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3858 dev->mwdma_mask, dev->udma_mask);
3859 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 3860
b352e57d
AC
3861 /*
3862 * CFA Advanced TrueIDE timings are not allowed on a shared
3863 * cable
3864 */
3865 if (ata_dev_pair(dev)) {
3866 /* No PIO5 or PIO6 */
3867 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3868 /* No MWDMA3 or MWDMA 4 */
3869 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3870 }
3871
37deecb5
TH
3872 if (ata_dma_blacklisted(dev)) {
3873 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
3874 ata_dev_printk(dev, KERN_WARNING,
3875 "device is on DMA blacklist, disabling DMA\n");
37deecb5 3876 }
a6d5a51c 3877
14d66ab7
PV
3878 if ((host->flags & ATA_HOST_SIMPLEX) &&
3879 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5
TH
3880 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3881 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3882 "other device, disabling DMA\n");
5444a6f4 3883 }
565083e1 3884
e424675f
JG
3885 if (ap->flags & ATA_FLAG_NO_IORDY)
3886 xfer_mask &= ata_pio_mask_no_iordy(dev);
3887
5444a6f4 3888 if (ap->ops->mode_filter)
a76b62ca 3889 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
5444a6f4 3890
8343f889
RH
3891 /* Apply cable rule here. Don't apply it early because when
3892 * we handle hot plug the cable type can itself change.
3893 * Check this last so that we know if the transfer rate was
3894 * solely limited by the cable.
3895 * Unknown or 80 wire cables reported host side are checked
3896 * drive side as well. Cases where we know a 40wire cable
3897 * is used safely for 80 are not checked here.
3898 */
3899 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
3900 /* UDMA/44 or higher would be available */
3901 if((ap->cbl == ATA_CBL_PATA40) ||
3902 (ata_drive_40wire(dev->id) &&
3903 (ap->cbl == ATA_CBL_PATA_UNK ||
3904 ap->cbl == ATA_CBL_PATA80))) {
3905 ata_dev_printk(dev, KERN_WARNING,
3906 "limited to UDMA/33 due to 40-wire cable\n");
3907 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3908 }
3909
565083e1
TH
3910 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3911 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
3912}
3913
1da177e4
LT
3914/**
3915 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
3916 * @dev: Device to which command will be sent
3917 *
780a87f7
JG
3918 * Issue SET FEATURES - XFER MODE command to device @dev
3919 * on port @ap.
3920 *
1da177e4 3921 * LOCKING:
0cba632b 3922 * PCI/etc. bus probe sem.
83206a29
TH
3923 *
3924 * RETURNS:
3925 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
3926 */
3927
3373efd8 3928static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 3929{
a0123703 3930 struct ata_taskfile tf;
83206a29 3931 unsigned int err_mask;
1da177e4
LT
3932
3933 /* set up set-features taskfile */
3934 DPRINTK("set features - xfer mode\n");
3935
3373efd8 3936 ata_tf_init(dev, &tf);
a0123703
TH
3937 tf.command = ATA_CMD_SET_FEATURES;
3938 tf.feature = SETFEATURES_XFER;
3939 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3940 tf.protocol = ATA_PROT_NODATA;
3941 tf.nsect = dev->xfer_mode;
1da177e4 3942
3373efd8 3943 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1da177e4 3944
83206a29
TH
3945 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3946 return err_mask;
1da177e4
LT
3947}
3948
8bf62ece
AL
3949/**
3950 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 3951 * @dev: Device to which command will be sent
e2a7f77a
RD
3952 * @heads: Number of heads (taskfile parameter)
3953 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
3954 *
3955 * LOCKING:
6aff8f1f
TH
3956 * Kernel thread context (may sleep)
3957 *
3958 * RETURNS:
3959 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 3960 */
3373efd8
TH
3961static unsigned int ata_dev_init_params(struct ata_device *dev,
3962 u16 heads, u16 sectors)
8bf62ece 3963{
a0123703 3964 struct ata_taskfile tf;
6aff8f1f 3965 unsigned int err_mask;
8bf62ece
AL
3966
3967 /* Number of sectors per track 1-255. Number of heads 1-16 */
3968 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 3969 return AC_ERR_INVALID;
8bf62ece
AL
3970
3971 /* set up init dev params taskfile */
3972 DPRINTK("init dev params \n");
3973
3373efd8 3974 ata_tf_init(dev, &tf);
a0123703
TH
3975 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3976 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3977 tf.protocol = ATA_PROT_NODATA;
3978 tf.nsect = sectors;
3979 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 3980
3373efd8 3981 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
8bf62ece 3982
6aff8f1f
TH
3983 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3984 return err_mask;
8bf62ece
AL
3985}
3986
1da177e4 3987/**
0cba632b
JG
3988 * ata_sg_clean - Unmap DMA memory associated with command
3989 * @qc: Command containing DMA memory to be released
3990 *
3991 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
3992 *
3993 * LOCKING:
cca3974e 3994 * spin_lock_irqsave(host lock)
1da177e4 3995 */
70e6ad0c 3996void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
3997{
3998 struct ata_port *ap = qc->ap;
cedc9a47 3999 struct scatterlist *sg = qc->__sg;
1da177e4 4000 int dir = qc->dma_dir;
cedc9a47 4001 void *pad_buf = NULL;
1da177e4 4002
a4631474
TH
4003 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
4004 WARN_ON(sg == NULL);
1da177e4
LT
4005
4006 if (qc->flags & ATA_QCFLAG_SINGLE)
f131883e 4007 WARN_ON(qc->n_elem > 1);
1da177e4 4008
2c13b7ce 4009 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 4010
cedc9a47
JG
4011 /* if we padded the buffer out to 32-bit bound, and data
4012 * xfer direction is from-device, we must copy from the
4013 * pad buffer back into the supplied buffer
4014 */
4015 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4016 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4017
4018 if (qc->flags & ATA_QCFLAG_SG) {
e1410f2d 4019 if (qc->n_elem)
2f1f610b 4020 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
cedc9a47
JG
4021 /* restore last sg */
4022 sg[qc->orig_n_elem - 1].length += qc->pad_len;
4023 if (pad_buf) {
4024 struct scatterlist *psg = &qc->pad_sgent;
4025 void *addr = kmap_atomic(psg->page, KM_IRQ0);
4026 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
dfa15988 4027 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4028 }
4029 } else {
2e242fa9 4030 if (qc->n_elem)
2f1f610b 4031 dma_unmap_single(ap->dev,
e1410f2d
JG
4032 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
4033 dir);
cedc9a47
JG
4034 /* restore sg */
4035 sg->length += qc->pad_len;
4036 if (pad_buf)
4037 memcpy(qc->buf_virt + sg->length - qc->pad_len,
4038 pad_buf, qc->pad_len);
4039 }
1da177e4
LT
4040
4041 qc->flags &= ~ATA_QCFLAG_DMAMAP;
cedc9a47 4042 qc->__sg = NULL;
1da177e4
LT
4043}
4044
4045/**
4046 * ata_fill_sg - Fill PCI IDE PRD table
4047 * @qc: Metadata associated with taskfile to be transferred
4048 *
780a87f7
JG
4049 * Fill PCI IDE PRD (scatter-gather) table with segments
4050 * associated with the current disk command.
4051 *
1da177e4 4052 * LOCKING:
cca3974e 4053 * spin_lock_irqsave(host lock)
1da177e4
LT
4054 *
4055 */
4056static void ata_fill_sg(struct ata_queued_cmd *qc)
4057{
1da177e4 4058 struct ata_port *ap = qc->ap;
cedc9a47
JG
4059 struct scatterlist *sg;
4060 unsigned int idx;
1da177e4 4061
a4631474 4062 WARN_ON(qc->__sg == NULL);
f131883e 4063 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1da177e4
LT
4064
4065 idx = 0;
cedc9a47 4066 ata_for_each_sg(sg, qc) {
1da177e4
LT
4067 u32 addr, offset;
4068 u32 sg_len, len;
4069
4070 /* determine if physical DMA addr spans 64K boundary.
4071 * Note h/w doesn't support 64-bit, so we unconditionally
4072 * truncate dma_addr_t to u32.
4073 */
4074 addr = (u32) sg_dma_address(sg);
4075 sg_len = sg_dma_len(sg);
4076
4077 while (sg_len) {
4078 offset = addr & 0xffff;
4079 len = sg_len;
4080 if ((offset + sg_len) > 0x10000)
4081 len = 0x10000 - offset;
4082
4083 ap->prd[idx].addr = cpu_to_le32(addr);
4084 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
4085 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4086
4087 idx++;
4088 sg_len -= len;
4089 addr += len;
4090 }
4091 }
4092
4093 if (idx)
4094 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4095}
4096/**
4097 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4098 * @qc: Metadata associated with taskfile to check
4099 *
780a87f7
JG
4100 * Allow low-level driver to filter ATA PACKET commands, returning
4101 * a status indicating whether or not it is OK to use DMA for the
4102 * supplied PACKET command.
4103 *
1da177e4 4104 * LOCKING:
cca3974e 4105 * spin_lock_irqsave(host lock)
0cba632b 4106 *
1da177e4
LT
4107 * RETURNS: 0 when ATAPI DMA can be used
4108 * nonzero otherwise
4109 */
4110int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4111{
4112 struct ata_port *ap = qc->ap;
4113 int rc = 0; /* Assume ATAPI DMA is OK by default */
4114
6f23a31d
AL
4115 /* some drives can only do ATAPI DMA on read/write */
4116 if (unlikely(qc->dev->horkage & ATA_HORKAGE_DMA_RW_ONLY)) {
4117 struct scsi_cmnd *cmd = qc->scsicmd;
4118 u8 *scsicmd = cmd->cmnd;
4119
4120 switch (scsicmd[0]) {
4121 case READ_10:
4122 case WRITE_10:
4123 case READ_12:
4124 case WRITE_12:
4125 case READ_6:
4126 case WRITE_6:
4127 /* atapi dma maybe ok */
4128 break;
4129 default:
4130 /* turn off atapi dma */
4131 return 1;
4132 }
4133 }
4134
1da177e4
LT
4135 if (ap->ops->check_atapi_dma)
4136 rc = ap->ops->check_atapi_dma(qc);
4137
4138 return rc;
4139}
4140/**
4141 * ata_qc_prep - Prepare taskfile for submission
4142 * @qc: Metadata associated with taskfile to be prepared
4143 *
780a87f7
JG
4144 * Prepare ATA taskfile for submission.
4145 *
1da177e4 4146 * LOCKING:
cca3974e 4147 * spin_lock_irqsave(host lock)
1da177e4
LT
4148 */
4149void ata_qc_prep(struct ata_queued_cmd *qc)
4150{
4151 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4152 return;
4153
4154 ata_fill_sg(qc);
4155}
4156
e46834cd
BK
4157void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4158
0cba632b
JG
4159/**
4160 * ata_sg_init_one - Associate command with memory buffer
4161 * @qc: Command to be associated
4162 * @buf: Memory buffer
4163 * @buflen: Length of memory buffer, in bytes.
4164 *
4165 * Initialize the data-related elements of queued_cmd @qc
4166 * to point to a single memory buffer, @buf of byte length @buflen.
4167 *
4168 * LOCKING:
cca3974e 4169 * spin_lock_irqsave(host lock)
0cba632b
JG
4170 */
4171
1da177e4
LT
4172void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4173{
1da177e4
LT
4174 qc->flags |= ATA_QCFLAG_SINGLE;
4175
cedc9a47 4176 qc->__sg = &qc->sgent;
1da177e4 4177 qc->n_elem = 1;
cedc9a47 4178 qc->orig_n_elem = 1;
1da177e4 4179 qc->buf_virt = buf;
233277ca 4180 qc->nbytes = buflen;
1da177e4 4181
61c0596c 4182 sg_init_one(&qc->sgent, buf, buflen);
1da177e4
LT
4183}
4184
0cba632b
JG
4185/**
4186 * ata_sg_init - Associate command with scatter-gather table.
4187 * @qc: Command to be associated
4188 * @sg: Scatter-gather table.
4189 * @n_elem: Number of elements in s/g table.
4190 *
4191 * Initialize the data-related elements of queued_cmd @qc
4192 * to point to a scatter-gather table @sg, containing @n_elem
4193 * elements.
4194 *
4195 * LOCKING:
cca3974e 4196 * spin_lock_irqsave(host lock)
0cba632b
JG
4197 */
4198
1da177e4
LT
4199void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4200 unsigned int n_elem)
4201{
4202 qc->flags |= ATA_QCFLAG_SG;
cedc9a47 4203 qc->__sg = sg;
1da177e4 4204 qc->n_elem = n_elem;
cedc9a47 4205 qc->orig_n_elem = n_elem;
1da177e4
LT
4206}
4207
4208/**
0cba632b
JG
4209 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4210 * @qc: Command with memory buffer to be mapped.
4211 *
4212 * DMA-map the memory buffer associated with queued_cmd @qc.
1da177e4
LT
4213 *
4214 * LOCKING:
cca3974e 4215 * spin_lock_irqsave(host lock)
1da177e4
LT
4216 *
4217 * RETURNS:
0cba632b 4218 * Zero on success, negative on error.
1da177e4
LT
4219 */
4220
4221static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4222{
4223 struct ata_port *ap = qc->ap;
4224 int dir = qc->dma_dir;
cedc9a47 4225 struct scatterlist *sg = qc->__sg;
1da177e4 4226 dma_addr_t dma_address;
2e242fa9 4227 int trim_sg = 0;
1da177e4 4228
cedc9a47
JG
4229 /* we must lengthen transfers to end on a 32-bit boundary */
4230 qc->pad_len = sg->length & 3;
4231 if (qc->pad_len) {
4232 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4233 struct scatterlist *psg = &qc->pad_sgent;
4234
a4631474 4235 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4236
4237 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4238
4239 if (qc->tf.flags & ATA_TFLAG_WRITE)
4240 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
4241 qc->pad_len);
4242
4243 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4244 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4245 /* trim sg */
4246 sg->length -= qc->pad_len;
2e242fa9
TH
4247 if (sg->length == 0)
4248 trim_sg = 1;
cedc9a47
JG
4249
4250 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4251 sg->length, qc->pad_len);
4252 }
4253
2e242fa9
TH
4254 if (trim_sg) {
4255 qc->n_elem--;
e1410f2d
JG
4256 goto skip_map;
4257 }
4258
2f1f610b 4259 dma_address = dma_map_single(ap->dev, qc->buf_virt,
32529e01 4260 sg->length, dir);
537a95d9
TH
4261 if (dma_mapping_error(dma_address)) {
4262 /* restore sg */
4263 sg->length += qc->pad_len;
1da177e4 4264 return -1;
537a95d9 4265 }
1da177e4
LT
4266
4267 sg_dma_address(sg) = dma_address;
32529e01 4268 sg_dma_len(sg) = sg->length;
1da177e4 4269
2e242fa9 4270skip_map:
1da177e4
LT
4271 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
4272 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4273
4274 return 0;
4275}
4276
4277/**
0cba632b
JG
4278 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4279 * @qc: Command with scatter-gather table to be mapped.
4280 *
4281 * DMA-map the scatter-gather table associated with queued_cmd @qc.
1da177e4
LT
4282 *
4283 * LOCKING:
cca3974e 4284 * spin_lock_irqsave(host lock)
1da177e4
LT
4285 *
4286 * RETURNS:
0cba632b 4287 * Zero on success, negative on error.
1da177e4
LT
4288 *
4289 */
4290
4291static int ata_sg_setup(struct ata_queued_cmd *qc)
4292{
4293 struct ata_port *ap = qc->ap;
cedc9a47
JG
4294 struct scatterlist *sg = qc->__sg;
4295 struct scatterlist *lsg = &sg[qc->n_elem - 1];
e1410f2d 4296 int n_elem, pre_n_elem, dir, trim_sg = 0;
1da177e4 4297
44877b4e 4298 VPRINTK("ENTER, ata%u\n", ap->print_id);
a4631474 4299 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
1da177e4 4300
cedc9a47
JG
4301 /* we must lengthen transfers to end on a 32-bit boundary */
4302 qc->pad_len = lsg->length & 3;
4303 if (qc->pad_len) {
4304 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4305 struct scatterlist *psg = &qc->pad_sgent;
4306 unsigned int offset;
4307
a4631474 4308 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4309
4310 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4311
4312 /*
4313 * psg->page/offset are used to copy to-be-written
4314 * data in this function or read data in ata_sg_clean.
4315 */
4316 offset = lsg->offset + lsg->length - qc->pad_len;
4317 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
4318 psg->offset = offset_in_page(offset);
4319
4320 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4321 void *addr = kmap_atomic(psg->page, KM_IRQ0);
4322 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
dfa15988 4323 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4324 }
4325
4326 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4327 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4328 /* trim last sg */
4329 lsg->length -= qc->pad_len;
e1410f2d
JG
4330 if (lsg->length == 0)
4331 trim_sg = 1;
cedc9a47
JG
4332
4333 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4334 qc->n_elem - 1, lsg->length, qc->pad_len);
4335 }
4336
e1410f2d
JG
4337 pre_n_elem = qc->n_elem;
4338 if (trim_sg && pre_n_elem)
4339 pre_n_elem--;
4340
4341 if (!pre_n_elem) {
4342 n_elem = 0;
4343 goto skip_map;
4344 }
4345
1da177e4 4346 dir = qc->dma_dir;
2f1f610b 4347 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
537a95d9
TH
4348 if (n_elem < 1) {
4349 /* restore last sg */
4350 lsg->length += qc->pad_len;
1da177e4 4351 return -1;
537a95d9 4352 }
1da177e4
LT
4353
4354 DPRINTK("%d sg elements mapped\n", n_elem);
4355
e1410f2d 4356skip_map:
1da177e4
LT
4357 qc->n_elem = n_elem;
4358
4359 return 0;
4360}
4361
0baab86b 4362/**
c893a3ae 4363 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
4364 * @buf: Buffer to swap
4365 * @buf_words: Number of 16-bit words in buffer.
4366 *
4367 * Swap halves of 16-bit words if needed to convert from
4368 * little-endian byte order to native cpu byte order, or
4369 * vice-versa.
4370 *
4371 * LOCKING:
6f0ef4fa 4372 * Inherited from caller.
0baab86b 4373 */
1da177e4
LT
4374void swap_buf_le16(u16 *buf, unsigned int buf_words)
4375{
4376#ifdef __BIG_ENDIAN
4377 unsigned int i;
4378
4379 for (i = 0; i < buf_words; i++)
4380 buf[i] = le16_to_cpu(buf[i]);
4381#endif /* __BIG_ENDIAN */
4382}
4383
6ae4cfb5 4384/**
0d5ff566 4385 * ata_data_xfer - Transfer data by PIO
a6b2c5d4 4386 * @adev: device to target
6ae4cfb5
AL
4387 * @buf: data buffer
4388 * @buflen: buffer length
344babaa 4389 * @write_data: read/write
6ae4cfb5
AL
4390 *
4391 * Transfer data from/to the device data register by PIO.
4392 *
4393 * LOCKING:
4394 * Inherited from caller.
6ae4cfb5 4395 */
0d5ff566
TH
4396void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
4397 unsigned int buflen, int write_data)
1da177e4 4398{
a6b2c5d4 4399 struct ata_port *ap = adev->ap;
6ae4cfb5 4400 unsigned int words = buflen >> 1;
1da177e4 4401
6ae4cfb5 4402 /* Transfer multiple of 2 bytes */
1da177e4 4403 if (write_data)
0d5ff566 4404 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
1da177e4 4405 else
0d5ff566 4406 ioread16_rep(ap->ioaddr.data_addr, buf, words);
6ae4cfb5
AL
4407
4408 /* Transfer trailing 1 byte, if any. */
4409 if (unlikely(buflen & 0x01)) {
4410 u16 align_buf[1] = { 0 };
4411 unsigned char *trailing_buf = buf + buflen - 1;
4412
4413 if (write_data) {
4414 memcpy(align_buf, trailing_buf, 1);
0d5ff566 4415 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
6ae4cfb5 4416 } else {
0d5ff566 4417 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
6ae4cfb5
AL
4418 memcpy(trailing_buf, align_buf, 1);
4419 }
4420 }
1da177e4
LT
4421}
4422
75e99585 4423/**
0d5ff566 4424 * ata_data_xfer_noirq - Transfer data by PIO
75e99585
AC
4425 * @adev: device to target
4426 * @buf: data buffer
4427 * @buflen: buffer length
4428 * @write_data: read/write
4429 *
88574551 4430 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
4431 * transfer with interrupts disabled.
4432 *
4433 * LOCKING:
4434 * Inherited from caller.
4435 */
0d5ff566
TH
4436void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
4437 unsigned int buflen, int write_data)
75e99585
AC
4438{
4439 unsigned long flags;
4440 local_irq_save(flags);
0d5ff566 4441 ata_data_xfer(adev, buf, buflen, write_data);
75e99585
AC
4442 local_irq_restore(flags);
4443}
4444
4445
6ae4cfb5 4446/**
5a5dbd18 4447 * ata_pio_sector - Transfer a sector of data.
6ae4cfb5
AL
4448 * @qc: Command on going
4449 *
5a5dbd18 4450 * Transfer qc->sect_size bytes of data from/to the ATA device.
6ae4cfb5
AL
4451 *
4452 * LOCKING:
4453 * Inherited from caller.
4454 */
4455
1da177e4
LT
4456static void ata_pio_sector(struct ata_queued_cmd *qc)
4457{
4458 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4459 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4460 struct ata_port *ap = qc->ap;
4461 struct page *page;
4462 unsigned int offset;
4463 unsigned char *buf;
4464
5a5dbd18 4465 if (qc->curbytes == qc->nbytes - qc->sect_size)
14be71f4 4466 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4467
4468 page = sg[qc->cursg].page;
726f0785 4469 offset = sg[qc->cursg].offset + qc->cursg_ofs;
1da177e4
LT
4470
4471 /* get the current page and offset */
4472 page = nth_page(page, (offset >> PAGE_SHIFT));
4473 offset %= PAGE_SIZE;
4474
1da177e4
LT
4475 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4476
91b8b313
AL
4477 if (PageHighMem(page)) {
4478 unsigned long flags;
4479
a6b2c5d4 4480 /* FIXME: use a bounce buffer */
91b8b313
AL
4481 local_irq_save(flags);
4482 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4483
91b8b313 4484 /* do the actual data transfer */
5a5dbd18 4485 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
1da177e4 4486
91b8b313
AL
4487 kunmap_atomic(buf, KM_IRQ0);
4488 local_irq_restore(flags);
4489 } else {
4490 buf = page_address(page);
5a5dbd18 4491 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
91b8b313 4492 }
1da177e4 4493
5a5dbd18
ML
4494 qc->curbytes += qc->sect_size;
4495 qc->cursg_ofs += qc->sect_size;
1da177e4 4496
726f0785 4497 if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
1da177e4
LT
4498 qc->cursg++;
4499 qc->cursg_ofs = 0;
4500 }
1da177e4 4501}
1da177e4 4502
07f6f7d0 4503/**
5a5dbd18 4504 * ata_pio_sectors - Transfer one or many sectors.
07f6f7d0
AL
4505 * @qc: Command on going
4506 *
5a5dbd18 4507 * Transfer one or many sectors of data from/to the
07f6f7d0
AL
4508 * ATA device for the DRQ request.
4509 *
4510 * LOCKING:
4511 * Inherited from caller.
4512 */
1da177e4 4513
07f6f7d0
AL
4514static void ata_pio_sectors(struct ata_queued_cmd *qc)
4515{
4516 if (is_multi_taskfile(&qc->tf)) {
4517 /* READ/WRITE MULTIPLE */
4518 unsigned int nsect;
4519
587005de 4520 WARN_ON(qc->dev->multi_count == 0);
1da177e4 4521
5a5dbd18 4522 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
726f0785 4523 qc->dev->multi_count);
07f6f7d0
AL
4524 while (nsect--)
4525 ata_pio_sector(qc);
4526 } else
4527 ata_pio_sector(qc);
4528}
4529
c71c1857
AL
4530/**
4531 * atapi_send_cdb - Write CDB bytes to hardware
4532 * @ap: Port to which ATAPI device is attached.
4533 * @qc: Taskfile currently active
4534 *
4535 * When device has indicated its readiness to accept
4536 * a CDB, this function is called. Send the CDB.
4537 *
4538 * LOCKING:
4539 * caller.
4540 */
4541
4542static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4543{
4544 /* send SCSI cdb */
4545 DPRINTK("send cdb\n");
db024d53 4546 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 4547
a6b2c5d4 4548 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
4549 ata_altstatus(ap); /* flush */
4550
4551 switch (qc->tf.protocol) {
4552 case ATA_PROT_ATAPI:
4553 ap->hsm_task_state = HSM_ST;
4554 break;
4555 case ATA_PROT_ATAPI_NODATA:
4556 ap->hsm_task_state = HSM_ST_LAST;
4557 break;
4558 case ATA_PROT_ATAPI_DMA:
4559 ap->hsm_task_state = HSM_ST_LAST;
4560 /* initiate bmdma */
4561 ap->ops->bmdma_start(qc);
4562 break;
4563 }
1da177e4
LT
4564}
4565
6ae4cfb5
AL
4566/**
4567 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
4568 * @qc: Command on going
4569 * @bytes: number of bytes
4570 *
4571 * Transfer Transfer data from/to the ATAPI device.
4572 *
4573 * LOCKING:
4574 * Inherited from caller.
4575 *
4576 */
4577
1da177e4
LT
4578static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4579{
4580 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4581 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4582 struct ata_port *ap = qc->ap;
4583 struct page *page;
4584 unsigned char *buf;
4585 unsigned int offset, count;
4586
563a6e1f 4587 if (qc->curbytes + bytes >= qc->nbytes)
14be71f4 4588 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4589
4590next_sg:
563a6e1f 4591 if (unlikely(qc->cursg >= qc->n_elem)) {
7fb6ec28 4592 /*
563a6e1f
AL
4593 * The end of qc->sg is reached and the device expects
4594 * more data to transfer. In order not to overrun qc->sg
4595 * and fulfill length specified in the byte count register,
4596 * - for read case, discard trailing data from the device
4597 * - for write case, padding zero data to the device
4598 */
4599 u16 pad_buf[1] = { 0 };
4600 unsigned int words = bytes >> 1;
4601 unsigned int i;
4602
4603 if (words) /* warning if bytes > 1 */
f15a1daf
TH
4604 ata_dev_printk(qc->dev, KERN_WARNING,
4605 "%u bytes trailing data\n", bytes);
563a6e1f
AL
4606
4607 for (i = 0; i < words; i++)
a6b2c5d4 4608 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
563a6e1f 4609
14be71f4 4610 ap->hsm_task_state = HSM_ST_LAST;
563a6e1f
AL
4611 return;
4612 }
4613
cedc9a47 4614 sg = &qc->__sg[qc->cursg];
1da177e4 4615
1da177e4
LT
4616 page = sg->page;
4617 offset = sg->offset + qc->cursg_ofs;
4618
4619 /* get the current page and offset */
4620 page = nth_page(page, (offset >> PAGE_SHIFT));
4621 offset %= PAGE_SIZE;
4622
6952df03 4623 /* don't overrun current sg */
32529e01 4624 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
4625
4626 /* don't cross page boundaries */
4627 count = min(count, (unsigned int)PAGE_SIZE - offset);
4628
7282aa4b
AL
4629 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4630
91b8b313
AL
4631 if (PageHighMem(page)) {
4632 unsigned long flags;
4633
a6b2c5d4 4634 /* FIXME: use bounce buffer */
91b8b313
AL
4635 local_irq_save(flags);
4636 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4637
91b8b313 4638 /* do the actual data transfer */
a6b2c5d4 4639 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
7282aa4b 4640
91b8b313
AL
4641 kunmap_atomic(buf, KM_IRQ0);
4642 local_irq_restore(flags);
4643 } else {
4644 buf = page_address(page);
a6b2c5d4 4645 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
91b8b313 4646 }
1da177e4
LT
4647
4648 bytes -= count;
4649 qc->curbytes += count;
4650 qc->cursg_ofs += count;
4651
32529e01 4652 if (qc->cursg_ofs == sg->length) {
1da177e4
LT
4653 qc->cursg++;
4654 qc->cursg_ofs = 0;
4655 }
4656
563a6e1f 4657 if (bytes)
1da177e4 4658 goto next_sg;
1da177e4
LT
4659}
4660
6ae4cfb5
AL
4661/**
4662 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
4663 * @qc: Command on going
4664 *
4665 * Transfer Transfer data from/to the ATAPI device.
4666 *
4667 * LOCKING:
4668 * Inherited from caller.
6ae4cfb5
AL
4669 */
4670
1da177e4
LT
4671static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4672{
4673 struct ata_port *ap = qc->ap;
4674 struct ata_device *dev = qc->dev;
4675 unsigned int ireason, bc_lo, bc_hi, bytes;
4676 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4677
eec4c3f3
AL
4678 /* Abuse qc->result_tf for temp storage of intermediate TF
4679 * here to save some kernel stack usage.
4680 * For normal completion, qc->result_tf is not relevant. For
4681 * error, qc->result_tf is later overwritten by ata_qc_complete().
4682 * So, the correctness of qc->result_tf is not affected.
4683 */
4684 ap->ops->tf_read(ap, &qc->result_tf);
4685 ireason = qc->result_tf.nsect;
4686 bc_lo = qc->result_tf.lbam;
4687 bc_hi = qc->result_tf.lbah;
1da177e4
LT
4688 bytes = (bc_hi << 8) | bc_lo;
4689
4690 /* shall be cleared to zero, indicating xfer of data */
4691 if (ireason & (1 << 0))
4692 goto err_out;
4693
4694 /* make sure transfer direction matches expected */
4695 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4696 if (do_write != i_write)
4697 goto err_out;
4698
44877b4e 4699 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
312f7da2 4700
1da177e4
LT
4701 __atapi_pio_bytes(qc, bytes);
4702
4703 return;
4704
4705err_out:
f15a1daf 4706 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
11a56d24 4707 qc->err_mask |= AC_ERR_HSM;
14be71f4 4708 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
4709}
4710
4711/**
c234fb00
AL
4712 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4713 * @ap: the target ata_port
4714 * @qc: qc on going
1da177e4 4715 *
c234fb00
AL
4716 * RETURNS:
4717 * 1 if ok in workqueue, 0 otherwise.
1da177e4 4718 */
c234fb00
AL
4719
4720static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 4721{
c234fb00
AL
4722 if (qc->tf.flags & ATA_TFLAG_POLLING)
4723 return 1;
1da177e4 4724
c234fb00
AL
4725 if (ap->hsm_task_state == HSM_ST_FIRST) {
4726 if (qc->tf.protocol == ATA_PROT_PIO &&
4727 (qc->tf.flags & ATA_TFLAG_WRITE))
4728 return 1;
1da177e4 4729
c234fb00
AL
4730 if (is_atapi_taskfile(&qc->tf) &&
4731 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4732 return 1;
fe79e683
AL
4733 }
4734
c234fb00
AL
4735 return 0;
4736}
1da177e4 4737
c17ea20d
TH
4738/**
4739 * ata_hsm_qc_complete - finish a qc running on standard HSM
4740 * @qc: Command to complete
4741 * @in_wq: 1 if called from workqueue, 0 otherwise
4742 *
4743 * Finish @qc which is running on standard HSM.
4744 *
4745 * LOCKING:
cca3974e 4746 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
4747 * Otherwise, none on entry and grabs host lock.
4748 */
4749static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4750{
4751 struct ata_port *ap = qc->ap;
4752 unsigned long flags;
4753
4754 if (ap->ops->error_handler) {
4755 if (in_wq) {
ba6a1308 4756 spin_lock_irqsave(ap->lock, flags);
c17ea20d 4757
cca3974e
JG
4758 /* EH might have kicked in while host lock is
4759 * released.
c17ea20d
TH
4760 */
4761 qc = ata_qc_from_tag(ap, qc->tag);
4762 if (qc) {
4763 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
83625006 4764 ap->ops->irq_on(ap);
c17ea20d
TH
4765 ata_qc_complete(qc);
4766 } else
4767 ata_port_freeze(ap);
4768 }
4769
ba6a1308 4770 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4771 } else {
4772 if (likely(!(qc->err_mask & AC_ERR_HSM)))
4773 ata_qc_complete(qc);
4774 else
4775 ata_port_freeze(ap);
4776 }
4777 } else {
4778 if (in_wq) {
ba6a1308 4779 spin_lock_irqsave(ap->lock, flags);
83625006 4780 ap->ops->irq_on(ap);
c17ea20d 4781 ata_qc_complete(qc);
ba6a1308 4782 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4783 } else
4784 ata_qc_complete(qc);
4785 }
1da177e4 4786
c81e29b4 4787 ata_altstatus(ap); /* flush */
c17ea20d
TH
4788}
4789
bb5cb290
AL
4790/**
4791 * ata_hsm_move - move the HSM to the next state.
4792 * @ap: the target ata_port
4793 * @qc: qc on going
4794 * @status: current device status
4795 * @in_wq: 1 if called from workqueue, 0 otherwise
4796 *
4797 * RETURNS:
4798 * 1 when poll next status needed, 0 otherwise.
4799 */
9a1004d0
TH
4800int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4801 u8 status, int in_wq)
e2cec771 4802{
bb5cb290
AL
4803 unsigned long flags = 0;
4804 int poll_next;
4805
6912ccd5
AL
4806 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4807
bb5cb290
AL
4808 /* Make sure ata_qc_issue_prot() does not throw things
4809 * like DMA polling into the workqueue. Notice that
4810 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4811 */
c234fb00 4812 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 4813
e2cec771 4814fsm_start:
999bb6f4 4815 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
44877b4e 4816 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
999bb6f4 4817
e2cec771
AL
4818 switch (ap->hsm_task_state) {
4819 case HSM_ST_FIRST:
bb5cb290
AL
4820 /* Send first data block or PACKET CDB */
4821
4822 /* If polling, we will stay in the work queue after
4823 * sending the data. Otherwise, interrupt handler
4824 * takes over after sending the data.
4825 */
4826 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4827
e2cec771 4828 /* check device status */
3655d1d3
AL
4829 if (unlikely((status & ATA_DRQ) == 0)) {
4830 /* handle BSY=0, DRQ=0 as error */
4831 if (likely(status & (ATA_ERR | ATA_DF)))
4832 /* device stops HSM for abort/error */
4833 qc->err_mask |= AC_ERR_DEV;
4834 else
4835 /* HSM violation. Let EH handle this */
4836 qc->err_mask |= AC_ERR_HSM;
4837
14be71f4 4838 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 4839 goto fsm_start;
1da177e4
LT
4840 }
4841
71601958
AL
4842 /* Device should not ask for data transfer (DRQ=1)
4843 * when it finds something wrong.
eee6c32f
AL
4844 * We ignore DRQ here and stop the HSM by
4845 * changing hsm_task_state to HSM_ST_ERR and
4846 * let the EH abort the command or reset the device.
71601958
AL
4847 */
4848 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
4849 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
4850 "error, dev_stat 0x%X\n", status);
3655d1d3 4851 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4852 ap->hsm_task_state = HSM_ST_ERR;
4853 goto fsm_start;
71601958 4854 }
1da177e4 4855
bb5cb290
AL
4856 /* Send the CDB (atapi) or the first data block (ata pio out).
4857 * During the state transition, interrupt handler shouldn't
4858 * be invoked before the data transfer is complete and
4859 * hsm_task_state is changed. Hence, the following locking.
4860 */
4861 if (in_wq)
ba6a1308 4862 spin_lock_irqsave(ap->lock, flags);
1da177e4 4863
bb5cb290
AL
4864 if (qc->tf.protocol == ATA_PROT_PIO) {
4865 /* PIO data out protocol.
4866 * send first data block.
4867 */
0565c26d 4868
bb5cb290
AL
4869 /* ata_pio_sectors() might change the state
4870 * to HSM_ST_LAST. so, the state is changed here
4871 * before ata_pio_sectors().
4872 */
4873 ap->hsm_task_state = HSM_ST;
4874 ata_pio_sectors(qc);
4875 ata_altstatus(ap); /* flush */
4876 } else
4877 /* send CDB */
4878 atapi_send_cdb(ap, qc);
4879
4880 if (in_wq)
ba6a1308 4881 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
4882
4883 /* if polling, ata_pio_task() handles the rest.
4884 * otherwise, interrupt handler takes over from here.
4885 */
e2cec771 4886 break;
1c848984 4887
e2cec771
AL
4888 case HSM_ST:
4889 /* complete command or read/write the data register */
4890 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4891 /* ATAPI PIO protocol */
4892 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
4893 /* No more data to transfer or device error.
4894 * Device error will be tagged in HSM_ST_LAST.
4895 */
e2cec771
AL
4896 ap->hsm_task_state = HSM_ST_LAST;
4897 goto fsm_start;
4898 }
1da177e4 4899
71601958
AL
4900 /* Device should not ask for data transfer (DRQ=1)
4901 * when it finds something wrong.
eee6c32f
AL
4902 * We ignore DRQ here and stop the HSM by
4903 * changing hsm_task_state to HSM_ST_ERR and
4904 * let the EH abort the command or reset the device.
71601958
AL
4905 */
4906 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
4907 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
4908 "device error, dev_stat 0x%X\n",
4909 status);
3655d1d3 4910 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4911 ap->hsm_task_state = HSM_ST_ERR;
4912 goto fsm_start;
71601958 4913 }
1da177e4 4914
e2cec771 4915 atapi_pio_bytes(qc);
7fb6ec28 4916
e2cec771
AL
4917 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4918 /* bad ireason reported by device */
4919 goto fsm_start;
1da177e4 4920
e2cec771
AL
4921 } else {
4922 /* ATA PIO protocol */
4923 if (unlikely((status & ATA_DRQ) == 0)) {
4924 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
4925 if (likely(status & (ATA_ERR | ATA_DF)))
4926 /* device stops HSM for abort/error */
4927 qc->err_mask |= AC_ERR_DEV;
4928 else
55a8e2c8
TH
4929 /* HSM violation. Let EH handle this.
4930 * Phantom devices also trigger this
4931 * condition. Mark hint.
4932 */
4933 qc->err_mask |= AC_ERR_HSM |
4934 AC_ERR_NODEV_HINT;
3655d1d3 4935
e2cec771
AL
4936 ap->hsm_task_state = HSM_ST_ERR;
4937 goto fsm_start;
4938 }
1da177e4 4939
eee6c32f
AL
4940 /* For PIO reads, some devices may ask for
4941 * data transfer (DRQ=1) alone with ERR=1.
4942 * We respect DRQ here and transfer one
4943 * block of junk data before changing the
4944 * hsm_task_state to HSM_ST_ERR.
4945 *
4946 * For PIO writes, ERR=1 DRQ=1 doesn't make
4947 * sense since the data block has been
4948 * transferred to the device.
71601958
AL
4949 */
4950 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
4951 /* data might be corrputed */
4952 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
4953
4954 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4955 ata_pio_sectors(qc);
4956 ata_altstatus(ap);
4957 status = ata_wait_idle(ap);
4958 }
4959
3655d1d3
AL
4960 if (status & (ATA_BUSY | ATA_DRQ))
4961 qc->err_mask |= AC_ERR_HSM;
4962
eee6c32f
AL
4963 /* ata_pio_sectors() might change the
4964 * state to HSM_ST_LAST. so, the state
4965 * is changed after ata_pio_sectors().
4966 */
4967 ap->hsm_task_state = HSM_ST_ERR;
4968 goto fsm_start;
71601958
AL
4969 }
4970
e2cec771
AL
4971 ata_pio_sectors(qc);
4972
4973 if (ap->hsm_task_state == HSM_ST_LAST &&
4974 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4975 /* all data read */
4976 ata_altstatus(ap);
52a32205 4977 status = ata_wait_idle(ap);
e2cec771
AL
4978 goto fsm_start;
4979 }
4980 }
4981
4982 ata_altstatus(ap); /* flush */
bb5cb290 4983 poll_next = 1;
1da177e4
LT
4984 break;
4985
14be71f4 4986 case HSM_ST_LAST:
6912ccd5
AL
4987 if (unlikely(!ata_ok(status))) {
4988 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
4989 ap->hsm_task_state = HSM_ST_ERR;
4990 goto fsm_start;
4991 }
4992
4993 /* no more data to transfer */
4332a771 4994 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
44877b4e 4995 ap->print_id, qc->dev->devno, status);
e2cec771 4996
6912ccd5
AL
4997 WARN_ON(qc->err_mask);
4998
e2cec771 4999 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 5000
e2cec771 5001 /* complete taskfile transaction */
c17ea20d 5002 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5003
5004 poll_next = 0;
1da177e4
LT
5005 break;
5006
14be71f4 5007 case HSM_ST_ERR:
e2cec771
AL
5008 /* make sure qc->err_mask is available to
5009 * know what's wrong and recover
5010 */
5011 WARN_ON(qc->err_mask == 0);
5012
5013 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 5014
999bb6f4 5015 /* complete taskfile transaction */
c17ea20d 5016 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5017
5018 poll_next = 0;
e2cec771
AL
5019 break;
5020 default:
bb5cb290 5021 poll_next = 0;
6912ccd5 5022 BUG();
1da177e4
LT
5023 }
5024
bb5cb290 5025 return poll_next;
1da177e4
LT
5026}
5027
65f27f38 5028static void ata_pio_task(struct work_struct *work)
8061f5f0 5029{
65f27f38
DH
5030 struct ata_port *ap =
5031 container_of(work, struct ata_port, port_task.work);
5032 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 5033 u8 status;
a1af3734 5034 int poll_next;
8061f5f0 5035
7fb6ec28 5036fsm_start:
a1af3734 5037 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 5038
a1af3734
AL
5039 /*
5040 * This is purely heuristic. This is a fast path.
5041 * Sometimes when we enter, BSY will be cleared in
5042 * a chk-status or two. If not, the drive is probably seeking
5043 * or something. Snooze for a couple msecs, then
5044 * chk-status again. If still busy, queue delayed work.
5045 */
5046 status = ata_busy_wait(ap, ATA_BUSY, 5);
5047 if (status & ATA_BUSY) {
5048 msleep(2);
5049 status = ata_busy_wait(ap, ATA_BUSY, 10);
5050 if (status & ATA_BUSY) {
31ce6dae 5051 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
a1af3734
AL
5052 return;
5053 }
8061f5f0
TH
5054 }
5055
a1af3734
AL
5056 /* move the HSM */
5057 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 5058
a1af3734
AL
5059 /* another command or interrupt handler
5060 * may be running at this point.
5061 */
5062 if (poll_next)
7fb6ec28 5063 goto fsm_start;
8061f5f0
TH
5064}
5065
1da177e4
LT
5066/**
5067 * ata_qc_new - Request an available ATA command, for queueing
5068 * @ap: Port associated with device @dev
5069 * @dev: Device from whom we request an available command structure
5070 *
5071 * LOCKING:
0cba632b 5072 * None.
1da177e4
LT
5073 */
5074
5075static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5076{
5077 struct ata_queued_cmd *qc = NULL;
5078 unsigned int i;
5079
e3180499 5080 /* no command while frozen */
b51e9e5d 5081 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
5082 return NULL;
5083
2ab7db1f
TH
5084 /* the last tag is reserved for internal command. */
5085 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 5086 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 5087 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
5088 break;
5089 }
5090
5091 if (qc)
5092 qc->tag = i;
5093
5094 return qc;
5095}
5096
5097/**
5098 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
5099 * @dev: Device from whom we request an available command structure
5100 *
5101 * LOCKING:
0cba632b 5102 * None.
1da177e4
LT
5103 */
5104
3373efd8 5105struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 5106{
3373efd8 5107 struct ata_port *ap = dev->ap;
1da177e4
LT
5108 struct ata_queued_cmd *qc;
5109
5110 qc = ata_qc_new(ap);
5111 if (qc) {
1da177e4
LT
5112 qc->scsicmd = NULL;
5113 qc->ap = ap;
5114 qc->dev = dev;
1da177e4 5115
2c13b7ce 5116 ata_qc_reinit(qc);
1da177e4
LT
5117 }
5118
5119 return qc;
5120}
5121
1da177e4
LT
5122/**
5123 * ata_qc_free - free unused ata_queued_cmd
5124 * @qc: Command to complete
5125 *
5126 * Designed to free unused ata_queued_cmd object
5127 * in case something prevents using it.
5128 *
5129 * LOCKING:
cca3974e 5130 * spin_lock_irqsave(host lock)
1da177e4
LT
5131 */
5132void ata_qc_free(struct ata_queued_cmd *qc)
5133{
4ba946e9
TH
5134 struct ata_port *ap = qc->ap;
5135 unsigned int tag;
5136
a4631474 5137 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 5138
4ba946e9
TH
5139 qc->flags = 0;
5140 tag = qc->tag;
5141 if (likely(ata_tag_valid(tag))) {
4ba946e9 5142 qc->tag = ATA_TAG_POISON;
6cec4a39 5143 clear_bit(tag, &ap->qc_allocated);
4ba946e9 5144 }
1da177e4
LT
5145}
5146
76014427 5147void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 5148{
dedaf2b0
TH
5149 struct ata_port *ap = qc->ap;
5150
a4631474
TH
5151 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5152 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
5153
5154 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5155 ata_sg_clean(qc);
5156
7401abf2 5157 /* command should be marked inactive atomically with qc completion */
dedaf2b0
TH
5158 if (qc->tf.protocol == ATA_PROT_NCQ)
5159 ap->sactive &= ~(1 << qc->tag);
5160 else
5161 ap->active_tag = ATA_TAG_POISON;
7401abf2 5162
3f3791d3
AL
5163 /* atapi: mark qc as inactive to prevent the interrupt handler
5164 * from completing the command twice later, before the error handler
5165 * is called. (when rc != 0 and atapi request sense is needed)
5166 */
5167 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 5168 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 5169
1da177e4 5170 /* call completion callback */
77853bf2 5171 qc->complete_fn(qc);
1da177e4
LT
5172}
5173
39599a53
TH
5174static void fill_result_tf(struct ata_queued_cmd *qc)
5175{
5176 struct ata_port *ap = qc->ap;
5177
39599a53 5178 qc->result_tf.flags = qc->tf.flags;
4742d54f 5179 ap->ops->tf_read(ap, &qc->result_tf);
39599a53
TH
5180}
5181
f686bcb8
TH
5182/**
5183 * ata_qc_complete - Complete an active ATA command
5184 * @qc: Command to complete
5185 * @err_mask: ATA Status register contents
5186 *
5187 * Indicate to the mid and upper layers that an ATA
5188 * command has completed, with either an ok or not-ok status.
5189 *
5190 * LOCKING:
cca3974e 5191 * spin_lock_irqsave(host lock)
f686bcb8
TH
5192 */
5193void ata_qc_complete(struct ata_queued_cmd *qc)
5194{
5195 struct ata_port *ap = qc->ap;
5196
5197 /* XXX: New EH and old EH use different mechanisms to
5198 * synchronize EH with regular execution path.
5199 *
5200 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5201 * Normal execution path is responsible for not accessing a
5202 * failed qc. libata core enforces the rule by returning NULL
5203 * from ata_qc_from_tag() for failed qcs.
5204 *
5205 * Old EH depends on ata_qc_complete() nullifying completion
5206 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5207 * not synchronize with interrupt handler. Only PIO task is
5208 * taken care of.
5209 */
5210 if (ap->ops->error_handler) {
b51e9e5d 5211 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
5212
5213 if (unlikely(qc->err_mask))
5214 qc->flags |= ATA_QCFLAG_FAILED;
5215
5216 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5217 if (!ata_tag_internal(qc->tag)) {
5218 /* always fill result TF for failed qc */
39599a53 5219 fill_result_tf(qc);
f686bcb8
TH
5220 ata_qc_schedule_eh(qc);
5221 return;
5222 }
5223 }
5224
5225 /* read result TF if requested */
5226 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5227 fill_result_tf(qc);
f686bcb8
TH
5228
5229 __ata_qc_complete(qc);
5230 } else {
5231 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5232 return;
5233
5234 /* read result TF if failed or requested */
5235 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5236 fill_result_tf(qc);
f686bcb8
TH
5237
5238 __ata_qc_complete(qc);
5239 }
5240}
5241
dedaf2b0
TH
5242/**
5243 * ata_qc_complete_multiple - Complete multiple qcs successfully
5244 * @ap: port in question
5245 * @qc_active: new qc_active mask
5246 * @finish_qc: LLDD callback invoked before completing a qc
5247 *
5248 * Complete in-flight commands. This functions is meant to be
5249 * called from low-level driver's interrupt routine to complete
5250 * requests normally. ap->qc_active and @qc_active is compared
5251 * and commands are completed accordingly.
5252 *
5253 * LOCKING:
cca3974e 5254 * spin_lock_irqsave(host lock)
dedaf2b0
TH
5255 *
5256 * RETURNS:
5257 * Number of completed commands on success, -errno otherwise.
5258 */
5259int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5260 void (*finish_qc)(struct ata_queued_cmd *))
5261{
5262 int nr_done = 0;
5263 u32 done_mask;
5264 int i;
5265
5266 done_mask = ap->qc_active ^ qc_active;
5267
5268 if (unlikely(done_mask & qc_active)) {
5269 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5270 "(%08x->%08x)\n", ap->qc_active, qc_active);
5271 return -EINVAL;
5272 }
5273
5274 for (i = 0; i < ATA_MAX_QUEUE; i++) {
5275 struct ata_queued_cmd *qc;
5276
5277 if (!(done_mask & (1 << i)))
5278 continue;
5279
5280 if ((qc = ata_qc_from_tag(ap, i))) {
5281 if (finish_qc)
5282 finish_qc(qc);
5283 ata_qc_complete(qc);
5284 nr_done++;
5285 }
5286 }
5287
5288 return nr_done;
5289}
5290
1da177e4
LT
5291static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
5292{
5293 struct ata_port *ap = qc->ap;
5294
5295 switch (qc->tf.protocol) {
3dc1d881 5296 case ATA_PROT_NCQ:
1da177e4
LT
5297 case ATA_PROT_DMA:
5298 case ATA_PROT_ATAPI_DMA:
5299 return 1;
5300
5301 case ATA_PROT_ATAPI:
5302 case ATA_PROT_PIO:
1da177e4
LT
5303 if (ap->flags & ATA_FLAG_PIO_DMA)
5304 return 1;
5305
5306 /* fall through */
5307
5308 default:
5309 return 0;
5310 }
5311
5312 /* never reached */
5313}
5314
5315/**
5316 * ata_qc_issue - issue taskfile to device
5317 * @qc: command to issue to device
5318 *
5319 * Prepare an ATA command to submission to device.
5320 * This includes mapping the data into a DMA-able
5321 * area, filling in the S/G table, and finally
5322 * writing the taskfile to hardware, starting the command.
5323 *
5324 * LOCKING:
cca3974e 5325 * spin_lock_irqsave(host lock)
1da177e4 5326 */
8e0e694a 5327void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
5328{
5329 struct ata_port *ap = qc->ap;
5330
dedaf2b0
TH
5331 /* Make sure only one non-NCQ command is outstanding. The
5332 * check is skipped for old EH because it reuses active qc to
5333 * request ATAPI sense.
5334 */
5335 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
5336
5337 if (qc->tf.protocol == ATA_PROT_NCQ) {
5338 WARN_ON(ap->sactive & (1 << qc->tag));
5339 ap->sactive |= 1 << qc->tag;
5340 } else {
5341 WARN_ON(ap->sactive);
5342 ap->active_tag = qc->tag;
5343 }
5344
e4a70e76 5345 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 5346 ap->qc_active |= 1 << qc->tag;
e4a70e76 5347
1da177e4
LT
5348 if (ata_should_dma_map(qc)) {
5349 if (qc->flags & ATA_QCFLAG_SG) {
5350 if (ata_sg_setup(qc))
8e436af9 5351 goto sg_err;
1da177e4
LT
5352 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
5353 if (ata_sg_setup_one(qc))
8e436af9 5354 goto sg_err;
1da177e4
LT
5355 }
5356 } else {
5357 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5358 }
5359
5360 ap->ops->qc_prep(qc);
5361
8e0e694a
TH
5362 qc->err_mask |= ap->ops->qc_issue(qc);
5363 if (unlikely(qc->err_mask))
5364 goto err;
5365 return;
1da177e4 5366
8e436af9
TH
5367sg_err:
5368 qc->flags &= ~ATA_QCFLAG_DMAMAP;
8e0e694a
TH
5369 qc->err_mask |= AC_ERR_SYSTEM;
5370err:
5371 ata_qc_complete(qc);
1da177e4
LT
5372}
5373
5374/**
5375 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
5376 * @qc: command to issue to device
5377 *
5378 * Using various libata functions and hooks, this function
5379 * starts an ATA command. ATA commands are grouped into
5380 * classes called "protocols", and issuing each type of protocol
5381 * is slightly different.
5382 *
0baab86b
EF
5383 * May be used as the qc_issue() entry in ata_port_operations.
5384 *
1da177e4 5385 * LOCKING:
cca3974e 5386 * spin_lock_irqsave(host lock)
1da177e4
LT
5387 *
5388 * RETURNS:
9a3d9eb0 5389 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
5390 */
5391
9a3d9eb0 5392unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
5393{
5394 struct ata_port *ap = qc->ap;
5395
e50362ec
AL
5396 /* Use polling pio if the LLD doesn't handle
5397 * interrupt driven pio and atapi CDB interrupt.
5398 */
5399 if (ap->flags & ATA_FLAG_PIO_POLLING) {
5400 switch (qc->tf.protocol) {
5401 case ATA_PROT_PIO:
e3472cbe 5402 case ATA_PROT_NODATA:
e50362ec
AL
5403 case ATA_PROT_ATAPI:
5404 case ATA_PROT_ATAPI_NODATA:
5405 qc->tf.flags |= ATA_TFLAG_POLLING;
5406 break;
5407 case ATA_PROT_ATAPI_DMA:
5408 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 5409 /* see ata_dma_blacklisted() */
e50362ec
AL
5410 BUG();
5411 break;
5412 default:
5413 break;
5414 }
5415 }
5416
3d3cca37
TH
5417 /* Some controllers show flaky interrupt behavior after
5418 * setting xfer mode. Use polling instead.
5419 */
5420 if (unlikely(qc->tf.command == ATA_CMD_SET_FEATURES &&
5421 qc->tf.feature == SETFEATURES_XFER) &&
5422 (ap->flags & ATA_FLAG_SETXFER_POLLING))
5423 qc->tf.flags |= ATA_TFLAG_POLLING;
5424
312f7da2 5425 /* select the device */
1da177e4
LT
5426 ata_dev_select(ap, qc->dev->devno, 1, 0);
5427
312f7da2 5428 /* start the command */
1da177e4
LT
5429 switch (qc->tf.protocol) {
5430 case ATA_PROT_NODATA:
312f7da2
AL
5431 if (qc->tf.flags & ATA_TFLAG_POLLING)
5432 ata_qc_set_polling(qc);
5433
e5338254 5434 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
5435 ap->hsm_task_state = HSM_ST_LAST;
5436
5437 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5438 ata_port_queue_task(ap, ata_pio_task, qc, 0);
312f7da2 5439
1da177e4
LT
5440 break;
5441
5442 case ATA_PROT_DMA:
587005de 5443 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5444
1da177e4
LT
5445 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5446 ap->ops->bmdma_setup(qc); /* set up bmdma */
5447 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 5448 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
5449 break;
5450
312f7da2
AL
5451 case ATA_PROT_PIO:
5452 if (qc->tf.flags & ATA_TFLAG_POLLING)
5453 ata_qc_set_polling(qc);
1da177e4 5454
e5338254 5455 ata_tf_to_host(ap, &qc->tf);
312f7da2 5456
54f00389
AL
5457 if (qc->tf.flags & ATA_TFLAG_WRITE) {
5458 /* PIO data out protocol */
5459 ap->hsm_task_state = HSM_ST_FIRST;
31ce6dae 5460 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5461
5462 /* always send first data block using
e27486db 5463 * the ata_pio_task() codepath.
54f00389 5464 */
312f7da2 5465 } else {
54f00389
AL
5466 /* PIO data in protocol */
5467 ap->hsm_task_state = HSM_ST;
5468
5469 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5470 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5471
5472 /* if polling, ata_pio_task() handles the rest.
5473 * otherwise, interrupt handler takes over from here.
5474 */
312f7da2
AL
5475 }
5476
1da177e4
LT
5477 break;
5478
1da177e4 5479 case ATA_PROT_ATAPI:
1da177e4 5480 case ATA_PROT_ATAPI_NODATA:
312f7da2
AL
5481 if (qc->tf.flags & ATA_TFLAG_POLLING)
5482 ata_qc_set_polling(qc);
5483
e5338254 5484 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 5485
312f7da2
AL
5486 ap->hsm_task_state = HSM_ST_FIRST;
5487
5488 /* send cdb by polling if no cdb interrupt */
5489 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
5490 (qc->tf.flags & ATA_TFLAG_POLLING))
31ce6dae 5491 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5492 break;
5493
5494 case ATA_PROT_ATAPI_DMA:
587005de 5495 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5496
1da177e4
LT
5497 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5498 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
5499 ap->hsm_task_state = HSM_ST_FIRST;
5500
5501 /* send cdb by polling if no cdb interrupt */
5502 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
31ce6dae 5503 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5504 break;
5505
5506 default:
5507 WARN_ON(1);
9a3d9eb0 5508 return AC_ERR_SYSTEM;
1da177e4
LT
5509 }
5510
5511 return 0;
5512}
5513
1da177e4
LT
5514/**
5515 * ata_host_intr - Handle host interrupt for given (port, task)
5516 * @ap: Port on which interrupt arrived (possibly...)
5517 * @qc: Taskfile currently active in engine
5518 *
5519 * Handle host interrupt for given queued command. Currently,
5520 * only DMA interrupts are handled. All other commands are
5521 * handled via polling with interrupts disabled (nIEN bit).
5522 *
5523 * LOCKING:
cca3974e 5524 * spin_lock_irqsave(host lock)
1da177e4
LT
5525 *
5526 * RETURNS:
5527 * One if interrupt was handled, zero if not (shared irq).
5528 */
5529
5530inline unsigned int ata_host_intr (struct ata_port *ap,
5531 struct ata_queued_cmd *qc)
5532{
ea54763f 5533 struct ata_eh_info *ehi = &ap->eh_info;
312f7da2 5534 u8 status, host_stat = 0;
1da177e4 5535
312f7da2 5536 VPRINTK("ata%u: protocol %d task_state %d\n",
44877b4e 5537 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 5538
312f7da2
AL
5539 /* Check whether we are expecting interrupt in this state */
5540 switch (ap->hsm_task_state) {
5541 case HSM_ST_FIRST:
6912ccd5
AL
5542 /* Some pre-ATAPI-4 devices assert INTRQ
5543 * at this state when ready to receive CDB.
5544 */
1da177e4 5545
312f7da2
AL
5546 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5547 * The flag was turned on only for atapi devices.
5548 * No need to check is_atapi_taskfile(&qc->tf) again.
5549 */
5550 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 5551 goto idle_irq;
1da177e4 5552 break;
312f7da2
AL
5553 case HSM_ST_LAST:
5554 if (qc->tf.protocol == ATA_PROT_DMA ||
5555 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5556 /* check status of DMA engine */
5557 host_stat = ap->ops->bmdma_status(ap);
44877b4e
TH
5558 VPRINTK("ata%u: host_stat 0x%X\n",
5559 ap->print_id, host_stat);
312f7da2
AL
5560
5561 /* if it's not our irq... */
5562 if (!(host_stat & ATA_DMA_INTR))
5563 goto idle_irq;
5564
5565 /* before we do anything else, clear DMA-Start bit */
5566 ap->ops->bmdma_stop(qc);
a4f16610
AL
5567
5568 if (unlikely(host_stat & ATA_DMA_ERR)) {
5569 /* error when transfering data to/from memory */
5570 qc->err_mask |= AC_ERR_HOST_BUS;
5571 ap->hsm_task_state = HSM_ST_ERR;
5572 }
312f7da2
AL
5573 }
5574 break;
5575 case HSM_ST:
5576 break;
1da177e4
LT
5577 default:
5578 goto idle_irq;
5579 }
5580
312f7da2
AL
5581 /* check altstatus */
5582 status = ata_altstatus(ap);
5583 if (status & ATA_BUSY)
5584 goto idle_irq;
1da177e4 5585
312f7da2
AL
5586 /* check main status, clearing INTRQ */
5587 status = ata_chk_status(ap);
5588 if (unlikely(status & ATA_BUSY))
5589 goto idle_irq;
1da177e4 5590
312f7da2
AL
5591 /* ack bmdma irq events */
5592 ap->ops->irq_clear(ap);
1da177e4 5593
bb5cb290 5594 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
5595
5596 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5597 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5598 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5599
1da177e4
LT
5600 return 1; /* irq handled */
5601
5602idle_irq:
5603 ap->stats.idle_irq++;
5604
5605#ifdef ATA_IRQ_TRAP
5606 if ((ap->stats.idle_irq % 1000) == 0) {
83625006 5607 ap->ops->irq_ack(ap, 0); /* debug trap */
f15a1daf 5608 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 5609 return 1;
1da177e4
LT
5610 }
5611#endif
5612 return 0; /* irq not handled */
5613}
5614
5615/**
5616 * ata_interrupt - Default ATA host interrupt handler
0cba632b 5617 * @irq: irq line (unused)
cca3974e 5618 * @dev_instance: pointer to our ata_host information structure
1da177e4 5619 *
0cba632b
JG
5620 * Default interrupt handler for PCI IDE devices. Calls
5621 * ata_host_intr() for each port that is not disabled.
5622 *
1da177e4 5623 * LOCKING:
cca3974e 5624 * Obtains host lock during operation.
1da177e4
LT
5625 *
5626 * RETURNS:
0cba632b 5627 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
5628 */
5629
7d12e780 5630irqreturn_t ata_interrupt (int irq, void *dev_instance)
1da177e4 5631{
cca3974e 5632 struct ata_host *host = dev_instance;
1da177e4
LT
5633 unsigned int i;
5634 unsigned int handled = 0;
5635 unsigned long flags;
5636
5637 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 5638 spin_lock_irqsave(&host->lock, flags);
1da177e4 5639
cca3974e 5640 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5641 struct ata_port *ap;
5642
cca3974e 5643 ap = host->ports[i];
c1389503 5644 if (ap &&
029f5468 5645 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
5646 struct ata_queued_cmd *qc;
5647
5648 qc = ata_qc_from_tag(ap, ap->active_tag);
312f7da2 5649 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 5650 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
5651 handled |= ata_host_intr(ap, qc);
5652 }
5653 }
5654
cca3974e 5655 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
5656
5657 return IRQ_RETVAL(handled);
5658}
5659
34bf2170
TH
5660/**
5661 * sata_scr_valid - test whether SCRs are accessible
5662 * @ap: ATA port to test SCR accessibility for
5663 *
5664 * Test whether SCRs are accessible for @ap.
5665 *
5666 * LOCKING:
5667 * None.
5668 *
5669 * RETURNS:
5670 * 1 if SCRs are accessible, 0 otherwise.
5671 */
5672int sata_scr_valid(struct ata_port *ap)
5673{
5674 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
5675}
5676
5677/**
5678 * sata_scr_read - read SCR register of the specified port
5679 * @ap: ATA port to read SCR for
5680 * @reg: SCR to read
5681 * @val: Place to store read value
5682 *
5683 * Read SCR register @reg of @ap into *@val. This function is
5684 * guaranteed to succeed if the cable type of the port is SATA
5685 * and the port implements ->scr_read.
5686 *
5687 * LOCKING:
5688 * None.
5689 *
5690 * RETURNS:
5691 * 0 on success, negative errno on failure.
5692 */
5693int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
5694{
5695 if (sata_scr_valid(ap)) {
5696 *val = ap->ops->scr_read(ap, reg);
5697 return 0;
5698 }
5699 return -EOPNOTSUPP;
5700}
5701
5702/**
5703 * sata_scr_write - write SCR register of the specified port
5704 * @ap: ATA port to write SCR for
5705 * @reg: SCR to write
5706 * @val: value to write
5707 *
5708 * Write @val to SCR register @reg of @ap. This function is
5709 * guaranteed to succeed if the cable type of the port is SATA
5710 * and the port implements ->scr_read.
5711 *
5712 * LOCKING:
5713 * None.
5714 *
5715 * RETURNS:
5716 * 0 on success, negative errno on failure.
5717 */
5718int sata_scr_write(struct ata_port *ap, int reg, u32 val)
5719{
5720 if (sata_scr_valid(ap)) {
5721 ap->ops->scr_write(ap, reg, val);
5722 return 0;
5723 }
5724 return -EOPNOTSUPP;
5725}
5726
5727/**
5728 * sata_scr_write_flush - write SCR register of the specified port and flush
5729 * @ap: ATA port to write SCR for
5730 * @reg: SCR to write
5731 * @val: value to write
5732 *
5733 * This function is identical to sata_scr_write() except that this
5734 * function performs flush after writing to the register.
5735 *
5736 * LOCKING:
5737 * None.
5738 *
5739 * RETURNS:
5740 * 0 on success, negative errno on failure.
5741 */
5742int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
5743{
5744 if (sata_scr_valid(ap)) {
5745 ap->ops->scr_write(ap, reg, val);
5746 ap->ops->scr_read(ap, reg);
5747 return 0;
5748 }
5749 return -EOPNOTSUPP;
5750}
5751
5752/**
5753 * ata_port_online - test whether the given port is online
5754 * @ap: ATA port to test
5755 *
5756 * Test whether @ap is online. Note that this function returns 0
5757 * if online status of @ap cannot be obtained, so
5758 * ata_port_online(ap) != !ata_port_offline(ap).
5759 *
5760 * LOCKING:
5761 * None.
5762 *
5763 * RETURNS:
5764 * 1 if the port online status is available and online.
5765 */
5766int ata_port_online(struct ata_port *ap)
5767{
5768 u32 sstatus;
5769
5770 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
5771 return 1;
5772 return 0;
5773}
5774
5775/**
5776 * ata_port_offline - test whether the given port is offline
5777 * @ap: ATA port to test
5778 *
5779 * Test whether @ap is offline. Note that this function returns
5780 * 0 if offline status of @ap cannot be obtained, so
5781 * ata_port_online(ap) != !ata_port_offline(ap).
5782 *
5783 * LOCKING:
5784 * None.
5785 *
5786 * RETURNS:
5787 * 1 if the port offline status is available and offline.
5788 */
5789int ata_port_offline(struct ata_port *ap)
5790{
5791 u32 sstatus;
5792
5793 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
5794 return 1;
5795 return 0;
5796}
0baab86b 5797
77b08fb5 5798int ata_flush_cache(struct ata_device *dev)
9b847548 5799{
977e6b9f 5800 unsigned int err_mask;
9b847548
JA
5801 u8 cmd;
5802
5803 if (!ata_try_flush_cache(dev))
5804 return 0;
5805
6fc49adb 5806 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
5807 cmd = ATA_CMD_FLUSH_EXT;
5808 else
5809 cmd = ATA_CMD_FLUSH;
5810
977e6b9f
TH
5811 err_mask = ata_do_simple_cmd(dev, cmd);
5812 if (err_mask) {
5813 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5814 return -EIO;
5815 }
5816
5817 return 0;
9b847548
JA
5818}
5819
6ffa01d8 5820#ifdef CONFIG_PM
cca3974e
JG
5821static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5822 unsigned int action, unsigned int ehi_flags,
5823 int wait)
500530f6
TH
5824{
5825 unsigned long flags;
5826 int i, rc;
5827
cca3974e
JG
5828 for (i = 0; i < host->n_ports; i++) {
5829 struct ata_port *ap = host->ports[i];
500530f6
TH
5830
5831 /* Previous resume operation might still be in
5832 * progress. Wait for PM_PENDING to clear.
5833 */
5834 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5835 ata_port_wait_eh(ap);
5836 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5837 }
5838
5839 /* request PM ops to EH */
5840 spin_lock_irqsave(ap->lock, flags);
5841
5842 ap->pm_mesg = mesg;
5843 if (wait) {
5844 rc = 0;
5845 ap->pm_result = &rc;
5846 }
5847
5848 ap->pflags |= ATA_PFLAG_PM_PENDING;
5849 ap->eh_info.action |= action;
5850 ap->eh_info.flags |= ehi_flags;
5851
5852 ata_port_schedule_eh(ap);
5853
5854 spin_unlock_irqrestore(ap->lock, flags);
5855
5856 /* wait and check result */
5857 if (wait) {
5858 ata_port_wait_eh(ap);
5859 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5860 if (rc)
5861 return rc;
5862 }
5863 }
5864
5865 return 0;
5866}
5867
5868/**
cca3974e
JG
5869 * ata_host_suspend - suspend host
5870 * @host: host to suspend
500530f6
TH
5871 * @mesg: PM message
5872 *
cca3974e 5873 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
5874 * function requests EH to perform PM operations and waits for EH
5875 * to finish.
5876 *
5877 * LOCKING:
5878 * Kernel thread context (may sleep).
5879 *
5880 * RETURNS:
5881 * 0 on success, -errno on failure.
5882 */
cca3974e 5883int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6 5884{
9666f400 5885 int rc;
500530f6 5886
cca3974e 5887 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
9666f400
TH
5888 if (rc == 0)
5889 host->dev->power.power_state = mesg;
500530f6
TH
5890 return rc;
5891}
5892
5893/**
cca3974e
JG
5894 * ata_host_resume - resume host
5895 * @host: host to resume
500530f6 5896 *
cca3974e 5897 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
5898 * function requests EH to perform PM operations and returns.
5899 * Note that all resume operations are performed parallely.
5900 *
5901 * LOCKING:
5902 * Kernel thread context (may sleep).
5903 */
cca3974e 5904void ata_host_resume(struct ata_host *host)
500530f6 5905{
cca3974e
JG
5906 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
5907 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5908 host->dev->power.power_state = PMSG_ON;
500530f6 5909}
6ffa01d8 5910#endif
500530f6 5911
c893a3ae
RD
5912/**
5913 * ata_port_start - Set port up for dma.
5914 * @ap: Port to initialize
5915 *
5916 * Called just after data structures for each port are
5917 * initialized. Allocates space for PRD table.
5918 *
5919 * May be used as the port_start() entry in ata_port_operations.
5920 *
5921 * LOCKING:
5922 * Inherited from caller.
5923 */
f0d36efd 5924int ata_port_start(struct ata_port *ap)
1da177e4 5925{
2f1f610b 5926 struct device *dev = ap->dev;
6037d6bb 5927 int rc;
1da177e4 5928
f0d36efd
TH
5929 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5930 GFP_KERNEL);
1da177e4
LT
5931 if (!ap->prd)
5932 return -ENOMEM;
5933
6037d6bb 5934 rc = ata_pad_alloc(ap, dev);
f0d36efd 5935 if (rc)
6037d6bb 5936 return rc;
1da177e4 5937
f0d36efd
TH
5938 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
5939 (unsigned long long)ap->prd_dma);
1da177e4
LT
5940 return 0;
5941}
5942
3ef3b43d
TH
5943/**
5944 * ata_dev_init - Initialize an ata_device structure
5945 * @dev: Device structure to initialize
5946 *
5947 * Initialize @dev in preparation for probing.
5948 *
5949 * LOCKING:
5950 * Inherited from caller.
5951 */
5952void ata_dev_init(struct ata_device *dev)
5953{
5954 struct ata_port *ap = dev->ap;
72fa4b74
TH
5955 unsigned long flags;
5956
5a04bf4b
TH
5957 /* SATA spd limit is bound to the first device */
5958 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5959
72fa4b74
TH
5960 /* High bits of dev->flags are used to record warm plug
5961 * requests which occur asynchronously. Synchronize using
cca3974e 5962 * host lock.
72fa4b74 5963 */
ba6a1308 5964 spin_lock_irqsave(ap->lock, flags);
72fa4b74 5965 dev->flags &= ~ATA_DFLAG_INIT_MASK;
ba6a1308 5966 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 5967
72fa4b74
TH
5968 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5969 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
5970 dev->pio_mask = UINT_MAX;
5971 dev->mwdma_mask = UINT_MAX;
5972 dev->udma_mask = UINT_MAX;
5973}
5974
1da177e4 5975/**
f3187195
TH
5976 * ata_port_alloc - allocate and initialize basic ATA port resources
5977 * @host: ATA host this allocated port belongs to
1da177e4 5978 *
f3187195
TH
5979 * Allocate and initialize basic ATA port resources.
5980 *
5981 * RETURNS:
5982 * Allocate ATA port on success, NULL on failure.
0cba632b 5983 *
1da177e4 5984 * LOCKING:
f3187195 5985 * Inherited from calling layer (may sleep).
1da177e4 5986 */
f3187195 5987struct ata_port *ata_port_alloc(struct ata_host *host)
1da177e4 5988{
f3187195 5989 struct ata_port *ap;
1da177e4
LT
5990 unsigned int i;
5991
f3187195
TH
5992 DPRINTK("ENTER\n");
5993
5994 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5995 if (!ap)
5996 return NULL;
5997
f4d6d004 5998 ap->pflags |= ATA_PFLAG_INITIALIZING;
cca3974e 5999 ap->lock = &host->lock;
198e0fed 6000 ap->flags = ATA_FLAG_DISABLED;
f3187195 6001 ap->print_id = -1;
1da177e4 6002 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 6003 ap->host = host;
f3187195
TH
6004 ap->dev = host->dev;
6005
5a04bf4b 6006 ap->hw_sata_spd_limit = UINT_MAX;
1da177e4
LT
6007 ap->active_tag = ATA_TAG_POISON;
6008 ap->last_ctl = 0xFF;
bd5d825c
BP
6009
6010#if defined(ATA_VERBOSE_DEBUG)
6011 /* turn on all debugging levels */
6012 ap->msg_enable = 0x00FF;
6013#elif defined(ATA_DEBUG)
6014 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 6015#else
0dd4b21f 6016 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 6017#endif
1da177e4 6018
65f27f38
DH
6019 INIT_DELAYED_WORK(&ap->port_task, NULL);
6020 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6021 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 6022 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 6023 init_waitqueue_head(&ap->eh_wait_q);
1da177e4 6024
838df628 6025 ap->cbl = ATA_CBL_NONE;
838df628 6026
acf356b1
TH
6027 for (i = 0; i < ATA_MAX_DEVICES; i++) {
6028 struct ata_device *dev = &ap->device[i];
38d87234 6029 dev->ap = ap;
72fa4b74 6030 dev->devno = i;
3ef3b43d 6031 ata_dev_init(dev);
acf356b1 6032 }
1da177e4
LT
6033
6034#ifdef ATA_IRQ_TRAP
6035 ap->stats.unhandled_irq = 1;
6036 ap->stats.idle_irq = 1;
6037#endif
1da177e4 6038 return ap;
1da177e4
LT
6039}
6040
f0d36efd
TH
6041static void ata_host_release(struct device *gendev, void *res)
6042{
6043 struct ata_host *host = dev_get_drvdata(gendev);
6044 int i;
6045
6046 for (i = 0; i < host->n_ports; i++) {
6047 struct ata_port *ap = host->ports[i];
6048
ecef7253
TH
6049 if (!ap)
6050 continue;
6051
6052 if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop)
f0d36efd 6053 ap->ops->port_stop(ap);
f0d36efd
TH
6054 }
6055
ecef7253 6056 if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop)
f0d36efd 6057 host->ops->host_stop(host);
1aa56cca 6058
1aa506e4
TH
6059 for (i = 0; i < host->n_ports; i++) {
6060 struct ata_port *ap = host->ports[i];
6061
4911487a
TH
6062 if (!ap)
6063 continue;
6064
6065 if (ap->scsi_host)
1aa506e4
TH
6066 scsi_host_put(ap->scsi_host);
6067
4911487a 6068 kfree(ap);
1aa506e4
TH
6069 host->ports[i] = NULL;
6070 }
6071
1aa56cca 6072 dev_set_drvdata(gendev, NULL);
f0d36efd
TH
6073}
6074
f3187195
TH
6075/**
6076 * ata_host_alloc - allocate and init basic ATA host resources
6077 * @dev: generic device this host is associated with
6078 * @max_ports: maximum number of ATA ports associated with this host
6079 *
6080 * Allocate and initialize basic ATA host resources. LLD calls
6081 * this function to allocate a host, initializes it fully and
6082 * attaches it using ata_host_register().
6083 *
6084 * @max_ports ports are allocated and host->n_ports is
6085 * initialized to @max_ports. The caller is allowed to decrease
6086 * host->n_ports before calling ata_host_register(). The unused
6087 * ports will be automatically freed on registration.
6088 *
6089 * RETURNS:
6090 * Allocate ATA host on success, NULL on failure.
6091 *
6092 * LOCKING:
6093 * Inherited from calling layer (may sleep).
6094 */
6095struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6096{
6097 struct ata_host *host;
6098 size_t sz;
6099 int i;
6100
6101 DPRINTK("ENTER\n");
6102
6103 if (!devres_open_group(dev, NULL, GFP_KERNEL))
6104 return NULL;
6105
6106 /* alloc a container for our list of ATA ports (buses) */
6107 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6108 /* alloc a container for our list of ATA ports (buses) */
6109 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6110 if (!host)
6111 goto err_out;
6112
6113 devres_add(dev, host);
6114 dev_set_drvdata(dev, host);
6115
6116 spin_lock_init(&host->lock);
6117 host->dev = dev;
6118 host->n_ports = max_ports;
6119
6120 /* allocate ports bound to this host */
6121 for (i = 0; i < max_ports; i++) {
6122 struct ata_port *ap;
6123
6124 ap = ata_port_alloc(host);
6125 if (!ap)
6126 goto err_out;
6127
6128 ap->port_no = i;
6129 host->ports[i] = ap;
6130 }
6131
6132 devres_remove_group(dev, NULL);
6133 return host;
6134
6135 err_out:
6136 devres_release_group(dev, NULL);
6137 return NULL;
6138}
6139
f5cda257
TH
6140/**
6141 * ata_host_alloc_pinfo - alloc host and init with port_info array
6142 * @dev: generic device this host is associated with
6143 * @ppi: array of ATA port_info to initialize host with
6144 * @n_ports: number of ATA ports attached to this host
6145 *
6146 * Allocate ATA host and initialize with info from @ppi. If NULL
6147 * terminated, @ppi may contain fewer entries than @n_ports. The
6148 * last entry will be used for the remaining ports.
6149 *
6150 * RETURNS:
6151 * Allocate ATA host on success, NULL on failure.
6152 *
6153 * LOCKING:
6154 * Inherited from calling layer (may sleep).
6155 */
6156struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6157 const struct ata_port_info * const * ppi,
6158 int n_ports)
6159{
6160 const struct ata_port_info *pi;
6161 struct ata_host *host;
6162 int i, j;
6163
6164 host = ata_host_alloc(dev, n_ports);
6165 if (!host)
6166 return NULL;
6167
6168 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6169 struct ata_port *ap = host->ports[i];
6170
6171 if (ppi[j])
6172 pi = ppi[j++];
6173
6174 ap->pio_mask = pi->pio_mask;
6175 ap->mwdma_mask = pi->mwdma_mask;
6176 ap->udma_mask = pi->udma_mask;
6177 ap->flags |= pi->flags;
6178 ap->ops = pi->port_ops;
6179
6180 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6181 host->ops = pi->port_ops;
6182 if (!host->private_data && pi->private_data)
6183 host->private_data = pi->private_data;
6184 }
6185
6186 return host;
6187}
6188
ecef7253
TH
6189/**
6190 * ata_host_start - start and freeze ports of an ATA host
6191 * @host: ATA host to start ports for
6192 *
6193 * Start and then freeze ports of @host. Started status is
6194 * recorded in host->flags, so this function can be called
6195 * multiple times. Ports are guaranteed to get started only
f3187195
TH
6196 * once. If host->ops isn't initialized yet, its set to the
6197 * first non-dummy port ops.
ecef7253
TH
6198 *
6199 * LOCKING:
6200 * Inherited from calling layer (may sleep).
6201 *
6202 * RETURNS:
6203 * 0 if all ports are started successfully, -errno otherwise.
6204 */
6205int ata_host_start(struct ata_host *host)
6206{
6207 int i, rc;
6208
6209 if (host->flags & ATA_HOST_STARTED)
6210 return 0;
6211
6212 for (i = 0; i < host->n_ports; i++) {
6213 struct ata_port *ap = host->ports[i];
6214
f3187195
TH
6215 if (!host->ops && !ata_port_is_dummy(ap))
6216 host->ops = ap->ops;
6217
ecef7253
TH
6218 if (ap->ops->port_start) {
6219 rc = ap->ops->port_start(ap);
6220 if (rc) {
6221 ata_port_printk(ap, KERN_ERR, "failed to "
6222 "start port (errno=%d)\n", rc);
6223 goto err_out;
6224 }
6225 }
6226
6227 ata_eh_freeze_port(ap);
6228 }
6229
6230 host->flags |= ATA_HOST_STARTED;
6231 return 0;
6232
6233 err_out:
6234 while (--i >= 0) {
6235 struct ata_port *ap = host->ports[i];
6236
6237 if (ap->ops->port_stop)
6238 ap->ops->port_stop(ap);
6239 }
6240 return rc;
6241}
6242
b03732f0 6243/**
cca3974e
JG
6244 * ata_sas_host_init - Initialize a host struct
6245 * @host: host to initialize
6246 * @dev: device host is attached to
6247 * @flags: host flags
6248 * @ops: port_ops
b03732f0
BK
6249 *
6250 * LOCKING:
6251 * PCI/etc. bus probe sem.
6252 *
6253 */
f3187195 6254/* KILLME - the only user left is ipr */
cca3974e
JG
6255void ata_host_init(struct ata_host *host, struct device *dev,
6256 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 6257{
cca3974e
JG
6258 spin_lock_init(&host->lock);
6259 host->dev = dev;
6260 host->flags = flags;
6261 host->ops = ops;
b03732f0
BK
6262}
6263
f3187195
TH
6264/**
6265 * ata_host_register - register initialized ATA host
6266 * @host: ATA host to register
6267 * @sht: template for SCSI host
6268 *
6269 * Register initialized ATA host. @host is allocated using
6270 * ata_host_alloc() and fully initialized by LLD. This function
6271 * starts ports, registers @host with ATA and SCSI layers and
6272 * probe registered devices.
6273 *
6274 * LOCKING:
6275 * Inherited from calling layer (may sleep).
6276 *
6277 * RETURNS:
6278 * 0 on success, -errno otherwise.
6279 */
6280int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6281{
6282 int i, rc;
6283
6284 /* host must have been started */
6285 if (!(host->flags & ATA_HOST_STARTED)) {
6286 dev_printk(KERN_ERR, host->dev,
6287 "BUG: trying to register unstarted host\n");
6288 WARN_ON(1);
6289 return -EINVAL;
6290 }
6291
6292 /* Blow away unused ports. This happens when LLD can't
6293 * determine the exact number of ports to allocate at
6294 * allocation time.
6295 */
6296 for (i = host->n_ports; host->ports[i]; i++)
6297 kfree(host->ports[i]);
6298
6299 /* give ports names and add SCSI hosts */
6300 for (i = 0; i < host->n_ports; i++)
6301 host->ports[i]->print_id = ata_print_id++;
6302
6303 rc = ata_scsi_add_hosts(host, sht);
6304 if (rc)
6305 return rc;
6306
6307 /* set cable, sata_spd_limit and report */
6308 for (i = 0; i < host->n_ports; i++) {
6309 struct ata_port *ap = host->ports[i];
6310 int irq_line;
6311 u32 scontrol;
6312 unsigned long xfer_mask;
6313
6314 /* set SATA cable type if still unset */
6315 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6316 ap->cbl = ATA_CBL_SATA;
6317
6318 /* init sata_spd_limit to the current value */
6319 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
6320 int spd = (scontrol >> 4) & 0xf;
6321 ap->hw_sata_spd_limit &= (1 << spd) - 1;
6322 }
6323 ap->sata_spd_limit = ap->hw_sata_spd_limit;
6324
6325 /* report the secondary IRQ for second channel legacy */
6326 irq_line = host->irq;
6327 if (i == 1 && host->irq2)
6328 irq_line = host->irq2;
6329
6330 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6331 ap->udma_mask);
6332
6333 /* print per-port info to dmesg */
6334 if (!ata_port_is_dummy(ap))
6335 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%p "
6336 "ctl 0x%p bmdma 0x%p irq %d\n",
6337 ap->cbl == ATA_CBL_SATA ? 'S' : 'P',
6338 ata_mode_string(xfer_mask),
6339 ap->ioaddr.cmd_addr,
6340 ap->ioaddr.ctl_addr,
6341 ap->ioaddr.bmdma_addr,
6342 irq_line);
6343 else
6344 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
6345 }
6346
6347 /* perform each probe synchronously */
6348 DPRINTK("probe begin\n");
6349 for (i = 0; i < host->n_ports; i++) {
6350 struct ata_port *ap = host->ports[i];
6351 int rc;
6352
6353 /* probe */
6354 if (ap->ops->error_handler) {
6355 struct ata_eh_info *ehi = &ap->eh_info;
6356 unsigned long flags;
6357
6358 ata_port_probe(ap);
6359
6360 /* kick EH for boot probing */
6361 spin_lock_irqsave(ap->lock, flags);
6362
6363 ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
6364 ehi->action |= ATA_EH_SOFTRESET;
6365 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6366
f4d6d004 6367 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
f3187195
TH
6368 ap->pflags |= ATA_PFLAG_LOADING;
6369 ata_port_schedule_eh(ap);
6370
6371 spin_unlock_irqrestore(ap->lock, flags);
6372
6373 /* wait for EH to finish */
6374 ata_port_wait_eh(ap);
6375 } else {
6376 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6377 rc = ata_bus_probe(ap);
6378 DPRINTK("ata%u: bus probe end\n", ap->print_id);
6379
6380 if (rc) {
6381 /* FIXME: do something useful here?
6382 * Current libata behavior will
6383 * tear down everything when
6384 * the module is removed
6385 * or the h/w is unplugged.
6386 */
6387 }
6388 }
6389 }
6390
6391 /* probes are done, now scan each port's disk(s) */
6392 DPRINTK("host probe begin\n");
6393 for (i = 0; i < host->n_ports; i++) {
6394 struct ata_port *ap = host->ports[i];
6395
6396 ata_scsi_scan_host(ap);
6397 }
6398
6399 return 0;
6400}
6401
f5cda257
TH
6402/**
6403 * ata_host_activate - start host, request IRQ and register it
6404 * @host: target ATA host
6405 * @irq: IRQ to request
6406 * @irq_handler: irq_handler used when requesting IRQ
6407 * @irq_flags: irq_flags used when requesting IRQ
6408 * @sht: scsi_host_template to use when registering the host
6409 *
6410 * After allocating an ATA host and initializing it, most libata
6411 * LLDs perform three steps to activate the host - start host,
6412 * request IRQ and register it. This helper takes necessasry
6413 * arguments and performs the three steps in one go.
6414 *
6415 * LOCKING:
6416 * Inherited from calling layer (may sleep).
6417 *
6418 * RETURNS:
6419 * 0 on success, -errno otherwise.
6420 */
6421int ata_host_activate(struct ata_host *host, int irq,
6422 irq_handler_t irq_handler, unsigned long irq_flags,
6423 struct scsi_host_template *sht)
6424{
6425 int rc;
6426
6427 rc = ata_host_start(host);
6428 if (rc)
6429 return rc;
6430
6431 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6432 dev_driver_string(host->dev), host);
6433 if (rc)
6434 return rc;
6435
6436 rc = ata_host_register(host, sht);
6437 /* if failed, just free the IRQ and leave ports alone */
6438 if (rc)
6439 devm_free_irq(host->dev, irq, host);
6440
6441 return rc;
6442}
6443
720ba126
TH
6444/**
6445 * ata_port_detach - Detach ATA port in prepration of device removal
6446 * @ap: ATA port to be detached
6447 *
6448 * Detach all ATA devices and the associated SCSI devices of @ap;
6449 * then, remove the associated SCSI host. @ap is guaranteed to
6450 * be quiescent on return from this function.
6451 *
6452 * LOCKING:
6453 * Kernel thread context (may sleep).
6454 */
6455void ata_port_detach(struct ata_port *ap)
6456{
6457 unsigned long flags;
6458 int i;
6459
6460 if (!ap->ops->error_handler)
c3cf30a9 6461 goto skip_eh;
720ba126
TH
6462
6463 /* tell EH we're leaving & flush EH */
ba6a1308 6464 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 6465 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 6466 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6467
6468 ata_port_wait_eh(ap);
6469
6470 /* EH is now guaranteed to see UNLOADING, so no new device
6471 * will be attached. Disable all existing devices.
6472 */
ba6a1308 6473 spin_lock_irqsave(ap->lock, flags);
720ba126
TH
6474
6475 for (i = 0; i < ATA_MAX_DEVICES; i++)
6476 ata_dev_disable(&ap->device[i]);
6477
ba6a1308 6478 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6479
6480 /* Final freeze & EH. All in-flight commands are aborted. EH
6481 * will be skipped and retrials will be terminated with bad
6482 * target.
6483 */
ba6a1308 6484 spin_lock_irqsave(ap->lock, flags);
720ba126 6485 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 6486 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6487
6488 ata_port_wait_eh(ap);
6489
6490 /* Flush hotplug task. The sequence is similar to
6491 * ata_port_flush_task().
6492 */
28e53bdd 6493 cancel_work_sync(&ap->hotplug_task.work); /* akpm: why? */
720ba126 6494 cancel_delayed_work(&ap->hotplug_task);
28e53bdd 6495 cancel_work_sync(&ap->hotplug_task.work);
720ba126 6496
c3cf30a9 6497 skip_eh:
720ba126 6498 /* remove the associated SCSI host */
cca3974e 6499 scsi_remove_host(ap->scsi_host);
720ba126
TH
6500}
6501
0529c159
TH
6502/**
6503 * ata_host_detach - Detach all ports of an ATA host
6504 * @host: Host to detach
6505 *
6506 * Detach all ports of @host.
6507 *
6508 * LOCKING:
6509 * Kernel thread context (may sleep).
6510 */
6511void ata_host_detach(struct ata_host *host)
6512{
6513 int i;
6514
6515 for (i = 0; i < host->n_ports; i++)
6516 ata_port_detach(host->ports[i]);
6517}
6518
1da177e4
LT
6519/**
6520 * ata_std_ports - initialize ioaddr with standard port offsets.
6521 * @ioaddr: IO address structure to be initialized
0baab86b
EF
6522 *
6523 * Utility function which initializes data_addr, error_addr,
6524 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
6525 * device_addr, status_addr, and command_addr to standard offsets
6526 * relative to cmd_addr.
6527 *
6528 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 6529 */
0baab86b 6530
1da177e4
LT
6531void ata_std_ports(struct ata_ioports *ioaddr)
6532{
6533 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
6534 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
6535 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
6536 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
6537 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
6538 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
6539 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
6540 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
6541 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
6542 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
6543}
6544
0baab86b 6545
374b1873
JG
6546#ifdef CONFIG_PCI
6547
1da177e4
LT
6548/**
6549 * ata_pci_remove_one - PCI layer callback for device removal
6550 * @pdev: PCI device that was removed
6551 *
b878ca5d
TH
6552 * PCI layer indicates to libata via this hook that hot-unplug or
6553 * module unload event has occurred. Detach all ports. Resource
6554 * release is handled via devres.
1da177e4
LT
6555 *
6556 * LOCKING:
6557 * Inherited from PCI layer (may sleep).
6558 */
f0d36efd 6559void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4
LT
6560{
6561 struct device *dev = pci_dev_to_dev(pdev);
cca3974e 6562 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 6563
b878ca5d 6564 ata_host_detach(host);
1da177e4
LT
6565}
6566
6567/* move to PCI subsystem */
057ace5e 6568int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
6569{
6570 unsigned long tmp = 0;
6571
6572 switch (bits->width) {
6573 case 1: {
6574 u8 tmp8 = 0;
6575 pci_read_config_byte(pdev, bits->reg, &tmp8);
6576 tmp = tmp8;
6577 break;
6578 }
6579 case 2: {
6580 u16 tmp16 = 0;
6581 pci_read_config_word(pdev, bits->reg, &tmp16);
6582 tmp = tmp16;
6583 break;
6584 }
6585 case 4: {
6586 u32 tmp32 = 0;
6587 pci_read_config_dword(pdev, bits->reg, &tmp32);
6588 tmp = tmp32;
6589 break;
6590 }
6591
6592 default:
6593 return -EINVAL;
6594 }
6595
6596 tmp &= bits->mask;
6597
6598 return (tmp == bits->val) ? 1 : 0;
6599}
9b847548 6600
6ffa01d8 6601#ifdef CONFIG_PM
3c5100c1 6602void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
6603{
6604 pci_save_state(pdev);
4c90d971 6605 pci_disable_device(pdev);
500530f6 6606
4c90d971 6607 if (mesg.event == PM_EVENT_SUSPEND)
500530f6 6608 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
6609}
6610
553c4aa6 6611int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 6612{
553c4aa6
TH
6613 int rc;
6614
9b847548
JA
6615 pci_set_power_state(pdev, PCI_D0);
6616 pci_restore_state(pdev);
553c4aa6 6617
b878ca5d 6618 rc = pcim_enable_device(pdev);
553c4aa6
TH
6619 if (rc) {
6620 dev_printk(KERN_ERR, &pdev->dev,
6621 "failed to enable device after resume (%d)\n", rc);
6622 return rc;
6623 }
6624
9b847548 6625 pci_set_master(pdev);
553c4aa6 6626 return 0;
500530f6
TH
6627}
6628
3c5100c1 6629int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 6630{
cca3974e 6631 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
6632 int rc = 0;
6633
cca3974e 6634 rc = ata_host_suspend(host, mesg);
500530f6
TH
6635 if (rc)
6636 return rc;
6637
3c5100c1 6638 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
6639
6640 return 0;
6641}
6642
6643int ata_pci_device_resume(struct pci_dev *pdev)
6644{
cca3974e 6645 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 6646 int rc;
500530f6 6647
553c4aa6
TH
6648 rc = ata_pci_device_do_resume(pdev);
6649 if (rc == 0)
6650 ata_host_resume(host);
6651 return rc;
9b847548 6652}
6ffa01d8
TH
6653#endif /* CONFIG_PM */
6654
1da177e4
LT
6655#endif /* CONFIG_PCI */
6656
6657
1da177e4
LT
6658static int __init ata_init(void)
6659{
a8601e5f 6660 ata_probe_timeout *= HZ;
1da177e4
LT
6661 ata_wq = create_workqueue("ata");
6662 if (!ata_wq)
6663 return -ENOMEM;
6664
453b07ac
TH
6665 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6666 if (!ata_aux_wq) {
6667 destroy_workqueue(ata_wq);
6668 return -ENOMEM;
6669 }
6670
1da177e4
LT
6671 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6672 return 0;
6673}
6674
6675static void __exit ata_exit(void)
6676{
6677 destroy_workqueue(ata_wq);
453b07ac 6678 destroy_workqueue(ata_aux_wq);
1da177e4
LT
6679}
6680
a4625085 6681subsys_initcall(ata_init);
1da177e4
LT
6682module_exit(ata_exit);
6683
67846b30 6684static unsigned long ratelimit_time;
34af946a 6685static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
6686
6687int ata_ratelimit(void)
6688{
6689 int rc;
6690 unsigned long flags;
6691
6692 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6693
6694 if (time_after(jiffies, ratelimit_time)) {
6695 rc = 1;
6696 ratelimit_time = jiffies + (HZ/5);
6697 } else
6698 rc = 0;
6699
6700 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6701
6702 return rc;
6703}
6704
c22daff4
TH
6705/**
6706 * ata_wait_register - wait until register value changes
6707 * @reg: IO-mapped register
6708 * @mask: Mask to apply to read register value
6709 * @val: Wait condition
6710 * @interval_msec: polling interval in milliseconds
6711 * @timeout_msec: timeout in milliseconds
6712 *
6713 * Waiting for some bits of register to change is a common
6714 * operation for ATA controllers. This function reads 32bit LE
6715 * IO-mapped register @reg and tests for the following condition.
6716 *
6717 * (*@reg & mask) != val
6718 *
6719 * If the condition is met, it returns; otherwise, the process is
6720 * repeated after @interval_msec until timeout.
6721 *
6722 * LOCKING:
6723 * Kernel thread context (may sleep)
6724 *
6725 * RETURNS:
6726 * The final register value.
6727 */
6728u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6729 unsigned long interval_msec,
6730 unsigned long timeout_msec)
6731{
6732 unsigned long timeout;
6733 u32 tmp;
6734
6735 tmp = ioread32(reg);
6736
6737 /* Calculate timeout _after_ the first read to make sure
6738 * preceding writes reach the controller before starting to
6739 * eat away the timeout.
6740 */
6741 timeout = jiffies + (timeout_msec * HZ) / 1000;
6742
6743 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6744 msleep(interval_msec);
6745 tmp = ioread32(reg);
6746 }
6747
6748 return tmp;
6749}
6750
dd5b06c4
TH
6751/*
6752 * Dummy port_ops
6753 */
6754static void ata_dummy_noret(struct ata_port *ap) { }
6755static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
6756static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6757
6758static u8 ata_dummy_check_status(struct ata_port *ap)
6759{
6760 return ATA_DRDY;
6761}
6762
6763static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6764{
6765 return AC_ERR_SYSTEM;
6766}
6767
6768const struct ata_port_operations ata_dummy_port_ops = {
6769 .port_disable = ata_port_disable,
6770 .check_status = ata_dummy_check_status,
6771 .check_altstatus = ata_dummy_check_status,
6772 .dev_select = ata_noop_dev_select,
6773 .qc_prep = ata_noop_qc_prep,
6774 .qc_issue = ata_dummy_qc_issue,
6775 .freeze = ata_dummy_noret,
6776 .thaw = ata_dummy_noret,
6777 .error_handler = ata_dummy_noret,
6778 .post_internal_cmd = ata_dummy_qc_noret,
6779 .irq_clear = ata_dummy_noret,
6780 .port_start = ata_dummy_ret0,
6781 .port_stop = ata_dummy_noret,
6782};
6783
21b0ad4f
TH
6784const struct ata_port_info ata_dummy_port_info = {
6785 .port_ops = &ata_dummy_port_ops,
6786};
6787
1da177e4
LT
6788/*
6789 * libata is essentially a library of internal helper functions for
6790 * low-level ATA host controller drivers. As such, the API/ABI is
6791 * likely to change as new drivers are added and updated.
6792 * Do not depend on ABI/API stability.
6793 */
6794
e9c83914
TH
6795EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6796EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6797EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 6798EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
21b0ad4f 6799EXPORT_SYMBOL_GPL(ata_dummy_port_info);
1da177e4
LT
6800EXPORT_SYMBOL_GPL(ata_std_bios_param);
6801EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 6802EXPORT_SYMBOL_GPL(ata_host_init);
f3187195 6803EXPORT_SYMBOL_GPL(ata_host_alloc);
f5cda257 6804EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
ecef7253 6805EXPORT_SYMBOL_GPL(ata_host_start);
f3187195 6806EXPORT_SYMBOL_GPL(ata_host_register);
f5cda257 6807EXPORT_SYMBOL_GPL(ata_host_activate);
0529c159 6808EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4
LT
6809EXPORT_SYMBOL_GPL(ata_sg_init);
6810EXPORT_SYMBOL_GPL(ata_sg_init_one);
9a1004d0 6811EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 6812EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 6813EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 6814EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
6815EXPORT_SYMBOL_GPL(ata_tf_load);
6816EXPORT_SYMBOL_GPL(ata_tf_read);
6817EXPORT_SYMBOL_GPL(ata_noop_dev_select);
6818EXPORT_SYMBOL_GPL(ata_std_dev_select);
43727fbc 6819EXPORT_SYMBOL_GPL(sata_print_link_status);
1da177e4
LT
6820EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6821EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6822EXPORT_SYMBOL_GPL(ata_check_status);
6823EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
6824EXPORT_SYMBOL_GPL(ata_exec_command);
6825EXPORT_SYMBOL_GPL(ata_port_start);
1da177e4 6826EXPORT_SYMBOL_GPL(ata_interrupt);
04351821 6827EXPORT_SYMBOL_GPL(ata_do_set_mode);
0d5ff566
TH
6828EXPORT_SYMBOL_GPL(ata_data_xfer);
6829EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
1da177e4 6830EXPORT_SYMBOL_GPL(ata_qc_prep);
e46834cd 6831EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
6832EXPORT_SYMBOL_GPL(ata_bmdma_setup);
6833EXPORT_SYMBOL_GPL(ata_bmdma_start);
6834EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
6835EXPORT_SYMBOL_GPL(ata_bmdma_status);
6836EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
6837EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
6838EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
6839EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
6840EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
6841EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 6842EXPORT_SYMBOL_GPL(ata_port_probe);
10305f0f 6843EXPORT_SYMBOL_GPL(ata_dev_disable);
3c567b7d 6844EXPORT_SYMBOL_GPL(sata_set_spd);
d7bb4cc7
TH
6845EXPORT_SYMBOL_GPL(sata_phy_debounce);
6846EXPORT_SYMBOL_GPL(sata_phy_resume);
1da177e4
LT
6847EXPORT_SYMBOL_GPL(sata_phy_reset);
6848EXPORT_SYMBOL_GPL(__sata_phy_reset);
6849EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 6850EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 6851EXPORT_SYMBOL_GPL(ata_std_softreset);
b6103f6d 6852EXPORT_SYMBOL_GPL(sata_port_hardreset);
c2bd5804
TH
6853EXPORT_SYMBOL_GPL(sata_std_hardreset);
6854EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
6855EXPORT_SYMBOL_GPL(ata_dev_classify);
6856EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 6857EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 6858EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 6859EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 6860EXPORT_SYMBOL_GPL(ata_busy_sleep);
d4b2bab4 6861EXPORT_SYMBOL_GPL(ata_wait_ready);
86e45b6b 6862EXPORT_SYMBOL_GPL(ata_port_queue_task);
1da177e4
LT
6863EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6864EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 6865EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 6866EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 6867EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4 6868EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
6869EXPORT_SYMBOL_GPL(sata_scr_valid);
6870EXPORT_SYMBOL_GPL(sata_scr_read);
6871EXPORT_SYMBOL_GPL(sata_scr_write);
6872EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6873EXPORT_SYMBOL_GPL(ata_port_online);
6874EXPORT_SYMBOL_GPL(ata_port_offline);
6ffa01d8 6875#ifdef CONFIG_PM
cca3974e
JG
6876EXPORT_SYMBOL_GPL(ata_host_suspend);
6877EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 6878#endif /* CONFIG_PM */
6a62a04d
TH
6879EXPORT_SYMBOL_GPL(ata_id_string);
6880EXPORT_SYMBOL_GPL(ata_id_c_string);
10305f0f 6881EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
6919a0a6 6882EXPORT_SYMBOL_GPL(ata_device_blacklisted);
1da177e4
LT
6883EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6884
1bc4ccff 6885EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
452503f9
AC
6886EXPORT_SYMBOL_GPL(ata_timing_compute);
6887EXPORT_SYMBOL_GPL(ata_timing_merge);
6888
1da177e4
LT
6889#ifdef CONFIG_PCI
6890EXPORT_SYMBOL_GPL(pci_test_config_bits);
d491b27b 6891EXPORT_SYMBOL_GPL(ata_pci_init_native_host);
1626aeb8 6892EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
21b0ad4f 6893EXPORT_SYMBOL_GPL(ata_pci_prepare_native_host);
1da177e4
LT
6894EXPORT_SYMBOL_GPL(ata_pci_init_one);
6895EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6ffa01d8 6896#ifdef CONFIG_PM
500530f6
TH
6897EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6898EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
6899EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6900EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 6901#endif /* CONFIG_PM */
67951ade
AC
6902EXPORT_SYMBOL_GPL(ata_pci_default_filter);
6903EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 6904#endif /* CONFIG_PCI */
9b847548 6905
ece1d636 6906EXPORT_SYMBOL_GPL(ata_eng_timeout);
7b70fc03
TH
6907EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6908EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499
TH
6909EXPORT_SYMBOL_GPL(ata_port_freeze);
6910EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6911EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
6912EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6913EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 6914EXPORT_SYMBOL_GPL(ata_do_eh);
83625006
AI
6915EXPORT_SYMBOL_GPL(ata_irq_on);
6916EXPORT_SYMBOL_GPL(ata_dummy_irq_on);
6917EXPORT_SYMBOL_GPL(ata_irq_ack);
6918EXPORT_SYMBOL_GPL(ata_dummy_irq_ack);
a619f981 6919EXPORT_SYMBOL_GPL(ata_dev_try_classify);
be0d18df
AC
6920
6921EXPORT_SYMBOL_GPL(ata_cable_40wire);
6922EXPORT_SYMBOL_GPL(ata_cable_80wire);
6923EXPORT_SYMBOL_GPL(ata_cable_unknown);
6924EXPORT_SYMBOL_GPL(ata_cable_sata);