]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/ata/libata-core.c
sata_nv: enable hotplug interrupt and fix some readl/readw mismatches
[net-next-2.6.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
1da177e4
LT
33 */
34
1da177e4
LT
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/mm.h>
41#include <linux/highmem.h>
42#include <linux/spinlock.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/timer.h>
46#include <linux/interrupt.h>
47#include <linux/completion.h>
48#include <linux/suspend.h>
49#include <linux/workqueue.h>
67846b30 50#include <linux/jiffies.h>
378f058c 51#include <linux/scatterlist.h>
1da177e4 52#include <scsi/scsi.h>
193515d5 53#include <scsi/scsi_cmnd.h>
1da177e4
LT
54#include <scsi/scsi_host.h>
55#include <linux/libata.h>
56#include <asm/io.h>
57#include <asm/semaphore.h>
58#include <asm/byteorder.h>
59
60#include "libata.h"
61
fda0efc5
JG
62#define DRV_VERSION "2.10" /* must be exactly four chars */
63
64
d7bb4cc7 65/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
66const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
67const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
68const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 69
3373efd8
TH
70static unsigned int ata_dev_init_params(struct ata_device *dev,
71 u16 heads, u16 sectors);
72static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
73static void ata_dev_xfermask(struct ata_device *dev);
1da177e4
LT
74
75static unsigned int ata_unique_id = 1;
76static struct workqueue_struct *ata_wq;
77
453b07ac
TH
78struct workqueue_struct *ata_aux_wq;
79
418dc1f5 80int atapi_enabled = 1;
1623c81e
JG
81module_param(atapi_enabled, int, 0444);
82MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
83
95de719a
AL
84int atapi_dmadir = 0;
85module_param(atapi_dmadir, int, 0444);
86MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
87
c3c013a2
JG
88int libata_fua = 0;
89module_param_named(fua, libata_fua, int, 0444);
90MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
91
a8601e5f
AM
92static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
93module_param(ata_probe_timeout, int, 0444);
94MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
95
11ef697b
KCA
96int noacpi;
97module_param(noacpi, int, 0444);
98MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
99
1da177e4
LT
100MODULE_AUTHOR("Jeff Garzik");
101MODULE_DESCRIPTION("Library module for ATA devices");
102MODULE_LICENSE("GPL");
103MODULE_VERSION(DRV_VERSION);
104
0baab86b 105
1da177e4
LT
106/**
107 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
108 * @tf: Taskfile to convert
109 * @fis: Buffer into which data will output
110 * @pmp: Port multiplier port
111 *
112 * Converts a standard ATA taskfile to a Serial ATA
113 * FIS structure (Register - Host to Device).
114 *
115 * LOCKING:
116 * Inherited from caller.
117 */
118
057ace5e 119void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
1da177e4
LT
120{
121 fis[0] = 0x27; /* Register - Host to Device FIS */
122 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
123 bit 7 indicates Command FIS */
124 fis[2] = tf->command;
125 fis[3] = tf->feature;
126
127 fis[4] = tf->lbal;
128 fis[5] = tf->lbam;
129 fis[6] = tf->lbah;
130 fis[7] = tf->device;
131
132 fis[8] = tf->hob_lbal;
133 fis[9] = tf->hob_lbam;
134 fis[10] = tf->hob_lbah;
135 fis[11] = tf->hob_feature;
136
137 fis[12] = tf->nsect;
138 fis[13] = tf->hob_nsect;
139 fis[14] = 0;
140 fis[15] = tf->ctl;
141
142 fis[16] = 0;
143 fis[17] = 0;
144 fis[18] = 0;
145 fis[19] = 0;
146}
147
148/**
149 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
150 * @fis: Buffer from which data will be input
151 * @tf: Taskfile to output
152 *
e12a1be6 153 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
154 *
155 * LOCKING:
156 * Inherited from caller.
157 */
158
057ace5e 159void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
160{
161 tf->command = fis[2]; /* status */
162 tf->feature = fis[3]; /* error */
163
164 tf->lbal = fis[4];
165 tf->lbam = fis[5];
166 tf->lbah = fis[6];
167 tf->device = fis[7];
168
169 tf->hob_lbal = fis[8];
170 tf->hob_lbam = fis[9];
171 tf->hob_lbah = fis[10];
172
173 tf->nsect = fis[12];
174 tf->hob_nsect = fis[13];
175}
176
8cbd6df1
AL
177static const u8 ata_rw_cmds[] = {
178 /* pio multi */
179 ATA_CMD_READ_MULTI,
180 ATA_CMD_WRITE_MULTI,
181 ATA_CMD_READ_MULTI_EXT,
182 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
183 0,
184 0,
185 0,
186 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
187 /* pio */
188 ATA_CMD_PIO_READ,
189 ATA_CMD_PIO_WRITE,
190 ATA_CMD_PIO_READ_EXT,
191 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
192 0,
193 0,
194 0,
195 0,
8cbd6df1
AL
196 /* dma */
197 ATA_CMD_READ,
198 ATA_CMD_WRITE,
199 ATA_CMD_READ_EXT,
9a3dccc4
TH
200 ATA_CMD_WRITE_EXT,
201 0,
202 0,
203 0,
204 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 205};
1da177e4
LT
206
207/**
8cbd6df1 208 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
209 * @tf: command to examine and configure
210 * @dev: device tf belongs to
1da177e4 211 *
2e9edbf8 212 * Examine the device configuration and tf->flags to calculate
8cbd6df1 213 * the proper read/write commands and protocol to use.
1da177e4
LT
214 *
215 * LOCKING:
216 * caller.
217 */
bd056d7e 218static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 219{
9a3dccc4 220 u8 cmd;
1da177e4 221
9a3dccc4 222 int index, fua, lba48, write;
2e9edbf8 223
9a3dccc4 224 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
225 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
226 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 227
8cbd6df1
AL
228 if (dev->flags & ATA_DFLAG_PIO) {
229 tf->protocol = ATA_PROT_PIO;
9a3dccc4 230 index = dev->multi_count ? 0 : 8;
bd056d7e 231 } else if (lba48 && (dev->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
232 /* Unable to use DMA due to host limitation */
233 tf->protocol = ATA_PROT_PIO;
0565c26d 234 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
235 } else {
236 tf->protocol = ATA_PROT_DMA;
9a3dccc4 237 index = 16;
8cbd6df1 238 }
1da177e4 239
9a3dccc4
TH
240 cmd = ata_rw_cmds[index + fua + lba48 + write];
241 if (cmd) {
242 tf->command = cmd;
243 return 0;
244 }
245 return -1;
1da177e4
LT
246}
247
35b649fe
TH
248/**
249 * ata_tf_read_block - Read block address from ATA taskfile
250 * @tf: ATA taskfile of interest
251 * @dev: ATA device @tf belongs to
252 *
253 * LOCKING:
254 * None.
255 *
256 * Read block address from @tf. This function can handle all
257 * three address formats - LBA, LBA48 and CHS. tf->protocol and
258 * flags select the address format to use.
259 *
260 * RETURNS:
261 * Block address read from @tf.
262 */
263u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
264{
265 u64 block = 0;
266
267 if (tf->flags & ATA_TFLAG_LBA) {
268 if (tf->flags & ATA_TFLAG_LBA48) {
269 block |= (u64)tf->hob_lbah << 40;
270 block |= (u64)tf->hob_lbam << 32;
271 block |= tf->hob_lbal << 24;
272 } else
273 block |= (tf->device & 0xf) << 24;
274
275 block |= tf->lbah << 16;
276 block |= tf->lbam << 8;
277 block |= tf->lbal;
278 } else {
279 u32 cyl, head, sect;
280
281 cyl = tf->lbam | (tf->lbah << 8);
282 head = tf->device & 0xf;
283 sect = tf->lbal;
284
285 block = (cyl * dev->heads + head) * dev->sectors + sect;
286 }
287
288 return block;
289}
290
bd056d7e
TH
291/**
292 * ata_build_rw_tf - Build ATA taskfile for given read/write request
293 * @tf: Target ATA taskfile
294 * @dev: ATA device @tf belongs to
295 * @block: Block address
296 * @n_block: Number of blocks
297 * @tf_flags: RW/FUA etc...
298 * @tag: tag
299 *
300 * LOCKING:
301 * None.
302 *
303 * Build ATA taskfile @tf for read/write request described by
304 * @block, @n_block, @tf_flags and @tag on @dev.
305 *
306 * RETURNS:
307 *
308 * 0 on success, -ERANGE if the request is too large for @dev,
309 * -EINVAL if the request is invalid.
310 */
311int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
312 u64 block, u32 n_block, unsigned int tf_flags,
313 unsigned int tag)
314{
315 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
316 tf->flags |= tf_flags;
317
6d1245bf 318 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
319 /* yay, NCQ */
320 if (!lba_48_ok(block, n_block))
321 return -ERANGE;
322
323 tf->protocol = ATA_PROT_NCQ;
324 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
325
326 if (tf->flags & ATA_TFLAG_WRITE)
327 tf->command = ATA_CMD_FPDMA_WRITE;
328 else
329 tf->command = ATA_CMD_FPDMA_READ;
330
331 tf->nsect = tag << 3;
332 tf->hob_feature = (n_block >> 8) & 0xff;
333 tf->feature = n_block & 0xff;
334
335 tf->hob_lbah = (block >> 40) & 0xff;
336 tf->hob_lbam = (block >> 32) & 0xff;
337 tf->hob_lbal = (block >> 24) & 0xff;
338 tf->lbah = (block >> 16) & 0xff;
339 tf->lbam = (block >> 8) & 0xff;
340 tf->lbal = block & 0xff;
341
342 tf->device = 1 << 6;
343 if (tf->flags & ATA_TFLAG_FUA)
344 tf->device |= 1 << 7;
345 } else if (dev->flags & ATA_DFLAG_LBA) {
346 tf->flags |= ATA_TFLAG_LBA;
347
348 if (lba_28_ok(block, n_block)) {
349 /* use LBA28 */
350 tf->device |= (block >> 24) & 0xf;
351 } else if (lba_48_ok(block, n_block)) {
352 if (!(dev->flags & ATA_DFLAG_LBA48))
353 return -ERANGE;
354
355 /* use LBA48 */
356 tf->flags |= ATA_TFLAG_LBA48;
357
358 tf->hob_nsect = (n_block >> 8) & 0xff;
359
360 tf->hob_lbah = (block >> 40) & 0xff;
361 tf->hob_lbam = (block >> 32) & 0xff;
362 tf->hob_lbal = (block >> 24) & 0xff;
363 } else
364 /* request too large even for LBA48 */
365 return -ERANGE;
366
367 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
368 return -EINVAL;
369
370 tf->nsect = n_block & 0xff;
371
372 tf->lbah = (block >> 16) & 0xff;
373 tf->lbam = (block >> 8) & 0xff;
374 tf->lbal = block & 0xff;
375
376 tf->device |= ATA_LBA;
377 } else {
378 /* CHS */
379 u32 sect, head, cyl, track;
380
381 /* The request -may- be too large for CHS addressing. */
382 if (!lba_28_ok(block, n_block))
383 return -ERANGE;
384
385 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
386 return -EINVAL;
387
388 /* Convert LBA to CHS */
389 track = (u32)block / dev->sectors;
390 cyl = track / dev->heads;
391 head = track % dev->heads;
392 sect = (u32)block % dev->sectors + 1;
393
394 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
395 (u32)block, track, cyl, head, sect);
396
397 /* Check whether the converted CHS can fit.
398 Cylinder: 0-65535
399 Head: 0-15
400 Sector: 1-255*/
401 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
402 return -ERANGE;
403
404 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
405 tf->lbal = sect;
406 tf->lbam = cyl;
407 tf->lbah = cyl >> 8;
408 tf->device |= head;
409 }
410
411 return 0;
412}
413
cb95d562
TH
414/**
415 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
416 * @pio_mask: pio_mask
417 * @mwdma_mask: mwdma_mask
418 * @udma_mask: udma_mask
419 *
420 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
421 * unsigned int xfer_mask.
422 *
423 * LOCKING:
424 * None.
425 *
426 * RETURNS:
427 * Packed xfer_mask.
428 */
429static unsigned int ata_pack_xfermask(unsigned int pio_mask,
430 unsigned int mwdma_mask,
431 unsigned int udma_mask)
432{
433 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
434 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
435 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
436}
437
c0489e4e
TH
438/**
439 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
440 * @xfer_mask: xfer_mask to unpack
441 * @pio_mask: resulting pio_mask
442 * @mwdma_mask: resulting mwdma_mask
443 * @udma_mask: resulting udma_mask
444 *
445 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
446 * Any NULL distination masks will be ignored.
447 */
448static void ata_unpack_xfermask(unsigned int xfer_mask,
449 unsigned int *pio_mask,
450 unsigned int *mwdma_mask,
451 unsigned int *udma_mask)
452{
453 if (pio_mask)
454 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
455 if (mwdma_mask)
456 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
457 if (udma_mask)
458 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
459}
460
cb95d562 461static const struct ata_xfer_ent {
be9a50c8 462 int shift, bits;
cb95d562
TH
463 u8 base;
464} ata_xfer_tbl[] = {
465 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
466 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
467 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
468 { -1, },
469};
470
471/**
472 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
473 * @xfer_mask: xfer_mask of interest
474 *
475 * Return matching XFER_* value for @xfer_mask. Only the highest
476 * bit of @xfer_mask is considered.
477 *
478 * LOCKING:
479 * None.
480 *
481 * RETURNS:
482 * Matching XFER_* value, 0 if no match found.
483 */
484static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
485{
486 int highbit = fls(xfer_mask) - 1;
487 const struct ata_xfer_ent *ent;
488
489 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
490 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
491 return ent->base + highbit - ent->shift;
492 return 0;
493}
494
495/**
496 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
497 * @xfer_mode: XFER_* of interest
498 *
499 * Return matching xfer_mask for @xfer_mode.
500 *
501 * LOCKING:
502 * None.
503 *
504 * RETURNS:
505 * Matching xfer_mask, 0 if no match found.
506 */
507static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
508{
509 const struct ata_xfer_ent *ent;
510
511 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
512 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
513 return 1 << (ent->shift + xfer_mode - ent->base);
514 return 0;
515}
516
517/**
518 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
519 * @xfer_mode: XFER_* of interest
520 *
521 * Return matching xfer_shift for @xfer_mode.
522 *
523 * LOCKING:
524 * None.
525 *
526 * RETURNS:
527 * Matching xfer_shift, -1 if no match found.
528 */
529static int ata_xfer_mode2shift(unsigned int xfer_mode)
530{
531 const struct ata_xfer_ent *ent;
532
533 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
534 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
535 return ent->shift;
536 return -1;
537}
538
1da177e4 539/**
1da7b0d0
TH
540 * ata_mode_string - convert xfer_mask to string
541 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
542 *
543 * Determine string which represents the highest speed
1da7b0d0 544 * (highest bit in @modemask).
1da177e4
LT
545 *
546 * LOCKING:
547 * None.
548 *
549 * RETURNS:
550 * Constant C string representing highest speed listed in
1da7b0d0 551 * @mode_mask, or the constant C string "<n/a>".
1da177e4 552 */
1da7b0d0 553static const char *ata_mode_string(unsigned int xfer_mask)
1da177e4 554{
75f554bc
TH
555 static const char * const xfer_mode_str[] = {
556 "PIO0",
557 "PIO1",
558 "PIO2",
559 "PIO3",
560 "PIO4",
b352e57d
AC
561 "PIO5",
562 "PIO6",
75f554bc
TH
563 "MWDMA0",
564 "MWDMA1",
565 "MWDMA2",
b352e57d
AC
566 "MWDMA3",
567 "MWDMA4",
75f554bc
TH
568 "UDMA/16",
569 "UDMA/25",
570 "UDMA/33",
571 "UDMA/44",
572 "UDMA/66",
573 "UDMA/100",
574 "UDMA/133",
575 "UDMA7",
576 };
1da7b0d0 577 int highbit;
1da177e4 578
1da7b0d0
TH
579 highbit = fls(xfer_mask) - 1;
580 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
581 return xfer_mode_str[highbit];
1da177e4 582 return "<n/a>";
1da177e4
LT
583}
584
4c360c81
TH
585static const char *sata_spd_string(unsigned int spd)
586{
587 static const char * const spd_str[] = {
588 "1.5 Gbps",
589 "3.0 Gbps",
590 };
591
592 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
593 return "<unknown>";
594 return spd_str[spd - 1];
595}
596
3373efd8 597void ata_dev_disable(struct ata_device *dev)
0b8efb0a 598{
0dd4b21f 599 if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
f15a1daf 600 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
4ae72a1e
TH
601 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
602 ATA_DNXFER_QUIET);
0b8efb0a
TH
603 dev->class++;
604 }
605}
606
1da177e4 607/**
0d5ff566 608 * ata_devchk - PATA device presence detection
1da177e4
LT
609 * @ap: ATA channel to examine
610 * @device: Device to examine (starting at zero)
611 *
612 * This technique was originally described in
613 * Hale Landis's ATADRVR (www.ata-atapi.com), and
614 * later found its way into the ATA/ATAPI spec.
615 *
616 * Write a pattern to the ATA shadow registers,
617 * and if a device is present, it will respond by
618 * correctly storing and echoing back the
619 * ATA shadow register contents.
620 *
621 * LOCKING:
622 * caller.
623 */
624
0d5ff566 625static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1da177e4
LT
626{
627 struct ata_ioports *ioaddr = &ap->ioaddr;
628 u8 nsect, lbal;
629
630 ap->ops->dev_select(ap, device);
631
0d5ff566
TH
632 iowrite8(0x55, ioaddr->nsect_addr);
633 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 634
0d5ff566
TH
635 iowrite8(0xaa, ioaddr->nsect_addr);
636 iowrite8(0x55, ioaddr->lbal_addr);
1da177e4 637
0d5ff566
TH
638 iowrite8(0x55, ioaddr->nsect_addr);
639 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 640
0d5ff566
TH
641 nsect = ioread8(ioaddr->nsect_addr);
642 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
643
644 if ((nsect == 0x55) && (lbal == 0xaa))
645 return 1; /* we found a device */
646
647 return 0; /* nothing found */
648}
649
1da177e4
LT
650/**
651 * ata_dev_classify - determine device type based on ATA-spec signature
652 * @tf: ATA taskfile register set for device to be identified
653 *
654 * Determine from taskfile register contents whether a device is
655 * ATA or ATAPI, as per "Signature and persistence" section
656 * of ATA/PI spec (volume 1, sect 5.14).
657 *
658 * LOCKING:
659 * None.
660 *
661 * RETURNS:
662 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
663 * the event of failure.
664 */
665
057ace5e 666unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
667{
668 /* Apple's open source Darwin code hints that some devices only
669 * put a proper signature into the LBA mid/high registers,
670 * So, we only check those. It's sufficient for uniqueness.
671 */
672
673 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
674 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
675 DPRINTK("found ATA device by sig\n");
676 return ATA_DEV_ATA;
677 }
678
679 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
680 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
681 DPRINTK("found ATAPI device by sig\n");
682 return ATA_DEV_ATAPI;
683 }
684
685 DPRINTK("unknown device\n");
686 return ATA_DEV_UNKNOWN;
687}
688
689/**
690 * ata_dev_try_classify - Parse returned ATA device signature
691 * @ap: ATA channel to examine
692 * @device: Device to examine (starting at zero)
b4dc7623 693 * @r_err: Value of error register on completion
1da177e4
LT
694 *
695 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
696 * an ATA/ATAPI-defined set of values is placed in the ATA
697 * shadow registers, indicating the results of device detection
698 * and diagnostics.
699 *
700 * Select the ATA device, and read the values from the ATA shadow
701 * registers. Then parse according to the Error register value,
702 * and the spec-defined values examined by ata_dev_classify().
703 *
704 * LOCKING:
705 * caller.
b4dc7623
TH
706 *
707 * RETURNS:
708 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4
LT
709 */
710
a619f981 711unsigned int
b4dc7623 712ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
1da177e4 713{
1da177e4
LT
714 struct ata_taskfile tf;
715 unsigned int class;
716 u8 err;
717
718 ap->ops->dev_select(ap, device);
719
720 memset(&tf, 0, sizeof(tf));
721
1da177e4 722 ap->ops->tf_read(ap, &tf);
0169e284 723 err = tf.feature;
b4dc7623
TH
724 if (r_err)
725 *r_err = err;
1da177e4 726
93590859
AC
727 /* see if device passed diags: if master then continue and warn later */
728 if (err == 0 && device == 0)
729 /* diagnostic fail : do nothing _YET_ */
730 ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
731 else if (err == 1)
1da177e4
LT
732 /* do nothing */ ;
733 else if ((device == 0) && (err == 0x81))
734 /* do nothing */ ;
735 else
b4dc7623 736 return ATA_DEV_NONE;
1da177e4 737
b4dc7623 738 /* determine if device is ATA or ATAPI */
1da177e4 739 class = ata_dev_classify(&tf);
b4dc7623 740
1da177e4 741 if (class == ATA_DEV_UNKNOWN)
b4dc7623 742 return ATA_DEV_NONE;
1da177e4 743 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
b4dc7623
TH
744 return ATA_DEV_NONE;
745 return class;
1da177e4
LT
746}
747
748/**
6a62a04d 749 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
750 * @id: IDENTIFY DEVICE results we will examine
751 * @s: string into which data is output
752 * @ofs: offset into identify device page
753 * @len: length of string to return. must be an even number.
754 *
755 * The strings in the IDENTIFY DEVICE page are broken up into
756 * 16-bit chunks. Run through the string, and output each
757 * 8-bit chunk linearly, regardless of platform.
758 *
759 * LOCKING:
760 * caller.
761 */
762
6a62a04d
TH
763void ata_id_string(const u16 *id, unsigned char *s,
764 unsigned int ofs, unsigned int len)
1da177e4
LT
765{
766 unsigned int c;
767
768 while (len > 0) {
769 c = id[ofs] >> 8;
770 *s = c;
771 s++;
772
773 c = id[ofs] & 0xff;
774 *s = c;
775 s++;
776
777 ofs++;
778 len -= 2;
779 }
780}
781
0e949ff3 782/**
6a62a04d 783 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
784 * @id: IDENTIFY DEVICE results we will examine
785 * @s: string into which data is output
786 * @ofs: offset into identify device page
787 * @len: length of string to return. must be an odd number.
788 *
6a62a04d 789 * This function is identical to ata_id_string except that it
0e949ff3
TH
790 * trims trailing spaces and terminates the resulting string with
791 * null. @len must be actual maximum length (even number) + 1.
792 *
793 * LOCKING:
794 * caller.
795 */
6a62a04d
TH
796void ata_id_c_string(const u16 *id, unsigned char *s,
797 unsigned int ofs, unsigned int len)
0e949ff3
TH
798{
799 unsigned char *p;
800
801 WARN_ON(!(len & 1));
802
6a62a04d 803 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
804
805 p = s + strnlen(s, len - 1);
806 while (p > s && p[-1] == ' ')
807 p--;
808 *p = '\0';
809}
0baab86b 810
2940740b
TH
811static u64 ata_id_n_sectors(const u16 *id)
812{
813 if (ata_id_has_lba(id)) {
814 if (ata_id_has_lba48(id))
815 return ata_id_u64(id, 100);
816 else
817 return ata_id_u32(id, 60);
818 } else {
819 if (ata_id_current_chs_valid(id))
820 return ata_id_u32(id, 57);
821 else
822 return id[1] * id[3] * id[6];
823 }
824}
825
0baab86b
EF
826/**
827 * ata_noop_dev_select - Select device 0/1 on ATA bus
828 * @ap: ATA channel to manipulate
829 * @device: ATA device (numbered from zero) to select
830 *
831 * This function performs no actual function.
832 *
833 * May be used as the dev_select() entry in ata_port_operations.
834 *
835 * LOCKING:
836 * caller.
837 */
1da177e4
LT
838void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
839{
840}
841
0baab86b 842
1da177e4
LT
843/**
844 * ata_std_dev_select - Select device 0/1 on ATA bus
845 * @ap: ATA channel to manipulate
846 * @device: ATA device (numbered from zero) to select
847 *
848 * Use the method defined in the ATA specification to
849 * make either device 0, or device 1, active on the
0baab86b
EF
850 * ATA channel. Works with both PIO and MMIO.
851 *
852 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
853 *
854 * LOCKING:
855 * caller.
856 */
857
858void ata_std_dev_select (struct ata_port *ap, unsigned int device)
859{
860 u8 tmp;
861
862 if (device == 0)
863 tmp = ATA_DEVICE_OBS;
864 else
865 tmp = ATA_DEVICE_OBS | ATA_DEV1;
866
0d5ff566 867 iowrite8(tmp, ap->ioaddr.device_addr);
1da177e4
LT
868 ata_pause(ap); /* needed; also flushes, for mmio */
869}
870
871/**
872 * ata_dev_select - Select device 0/1 on ATA bus
873 * @ap: ATA channel to manipulate
874 * @device: ATA device (numbered from zero) to select
875 * @wait: non-zero to wait for Status register BSY bit to clear
876 * @can_sleep: non-zero if context allows sleeping
877 *
878 * Use the method defined in the ATA specification to
879 * make either device 0, or device 1, active on the
880 * ATA channel.
881 *
882 * This is a high-level version of ata_std_dev_select(),
883 * which additionally provides the services of inserting
884 * the proper pauses and status polling, where needed.
885 *
886 * LOCKING:
887 * caller.
888 */
889
890void ata_dev_select(struct ata_port *ap, unsigned int device,
891 unsigned int wait, unsigned int can_sleep)
892{
88574551 893 if (ata_msg_probe(ap))
0dd4b21f 894 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, ata%u: "
88574551 895 "device %u, wait %u\n", ap->id, device, wait);
1da177e4
LT
896
897 if (wait)
898 ata_wait_idle(ap);
899
900 ap->ops->dev_select(ap, device);
901
902 if (wait) {
903 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
904 msleep(150);
905 ata_wait_idle(ap);
906 }
907}
908
909/**
910 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 911 * @id: IDENTIFY DEVICE page to dump
1da177e4 912 *
0bd3300a
TH
913 * Dump selected 16-bit words from the given IDENTIFY DEVICE
914 * page.
1da177e4
LT
915 *
916 * LOCKING:
917 * caller.
918 */
919
0bd3300a 920static inline void ata_dump_id(const u16 *id)
1da177e4
LT
921{
922 DPRINTK("49==0x%04x "
923 "53==0x%04x "
924 "63==0x%04x "
925 "64==0x%04x "
926 "75==0x%04x \n",
0bd3300a
TH
927 id[49],
928 id[53],
929 id[63],
930 id[64],
931 id[75]);
1da177e4
LT
932 DPRINTK("80==0x%04x "
933 "81==0x%04x "
934 "82==0x%04x "
935 "83==0x%04x "
936 "84==0x%04x \n",
0bd3300a
TH
937 id[80],
938 id[81],
939 id[82],
940 id[83],
941 id[84]);
1da177e4
LT
942 DPRINTK("88==0x%04x "
943 "93==0x%04x\n",
0bd3300a
TH
944 id[88],
945 id[93]);
1da177e4
LT
946}
947
cb95d562
TH
948/**
949 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
950 * @id: IDENTIFY data to compute xfer mask from
951 *
952 * Compute the xfermask for this device. This is not as trivial
953 * as it seems if we must consider early devices correctly.
954 *
955 * FIXME: pre IDE drive timing (do we care ?).
956 *
957 * LOCKING:
958 * None.
959 *
960 * RETURNS:
961 * Computed xfermask
962 */
963static unsigned int ata_id_xfermask(const u16 *id)
964{
965 unsigned int pio_mask, mwdma_mask, udma_mask;
966
967 /* Usual case. Word 53 indicates word 64 is valid */
968 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
969 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
970 pio_mask <<= 3;
971 pio_mask |= 0x7;
972 } else {
973 /* If word 64 isn't valid then Word 51 high byte holds
974 * the PIO timing number for the maximum. Turn it into
975 * a mask.
976 */
7a0f1c8a 977 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb
AC
978 if (mode < 5) /* Valid PIO range */
979 pio_mask = (2 << mode) - 1;
980 else
981 pio_mask = 1;
cb95d562
TH
982
983 /* But wait.. there's more. Design your standards by
984 * committee and you too can get a free iordy field to
985 * process. However its the speeds not the modes that
986 * are supported... Note drivers using the timing API
987 * will get this right anyway
988 */
989 }
990
991 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 992
b352e57d
AC
993 if (ata_id_is_cfa(id)) {
994 /*
995 * Process compact flash extended modes
996 */
997 int pio = id[163] & 0x7;
998 int dma = (id[163] >> 3) & 7;
999
1000 if (pio)
1001 pio_mask |= (1 << 5);
1002 if (pio > 1)
1003 pio_mask |= (1 << 6);
1004 if (dma)
1005 mwdma_mask |= (1 << 3);
1006 if (dma > 1)
1007 mwdma_mask |= (1 << 4);
1008 }
1009
fb21f0d0
TH
1010 udma_mask = 0;
1011 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1012 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1013
1014 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1015}
1016
86e45b6b
TH
1017/**
1018 * ata_port_queue_task - Queue port_task
1019 * @ap: The ata_port to queue port_task for
e2a7f77a 1020 * @fn: workqueue function to be scheduled
65f27f38 1021 * @data: data for @fn to use
e2a7f77a 1022 * @delay: delay time for workqueue function
86e45b6b
TH
1023 *
1024 * Schedule @fn(@data) for execution after @delay jiffies using
1025 * port_task. There is one port_task per port and it's the
1026 * user(low level driver)'s responsibility to make sure that only
1027 * one task is active at any given time.
1028 *
1029 * libata core layer takes care of synchronization between
1030 * port_task and EH. ata_port_queue_task() may be ignored for EH
1031 * synchronization.
1032 *
1033 * LOCKING:
1034 * Inherited from caller.
1035 */
65f27f38 1036void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
86e45b6b
TH
1037 unsigned long delay)
1038{
1039 int rc;
1040
b51e9e5d 1041 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
86e45b6b
TH
1042 return;
1043
65f27f38
DH
1044 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1045 ap->port_task_data = data;
86e45b6b 1046
52bad64d 1047 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1048
1049 /* rc == 0 means that another user is using port task */
1050 WARN_ON(rc == 0);
1051}
1052
1053/**
1054 * ata_port_flush_task - Flush port_task
1055 * @ap: The ata_port to flush port_task for
1056 *
1057 * After this function completes, port_task is guranteed not to
1058 * be running or scheduled.
1059 *
1060 * LOCKING:
1061 * Kernel thread context (may sleep)
1062 */
1063void ata_port_flush_task(struct ata_port *ap)
1064{
1065 unsigned long flags;
1066
1067 DPRINTK("ENTER\n");
1068
ba6a1308 1069 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 1070 ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK;
ba6a1308 1071 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b
TH
1072
1073 DPRINTK("flush #1\n");
1074 flush_workqueue(ata_wq);
1075
1076 /*
1077 * At this point, if a task is running, it's guaranteed to see
1078 * the FLUSH flag; thus, it will never queue pio tasks again.
1079 * Cancel and flush.
1080 */
1081 if (!cancel_delayed_work(&ap->port_task)) {
0dd4b21f 1082 if (ata_msg_ctl(ap))
88574551
TH
1083 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
1084 __FUNCTION__);
86e45b6b
TH
1085 flush_workqueue(ata_wq);
1086 }
1087
ba6a1308 1088 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 1089 ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK;
ba6a1308 1090 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b 1091
0dd4b21f
BP
1092 if (ata_msg_ctl(ap))
1093 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
86e45b6b
TH
1094}
1095
7102d230 1096static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1097{
77853bf2 1098 struct completion *waiting = qc->private_data;
a2a7a662 1099
a2a7a662 1100 complete(waiting);
a2a7a662
TH
1101}
1102
1103/**
2432697b 1104 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1105 * @dev: Device to which the command is sent
1106 * @tf: Taskfile registers for the command and the result
d69cf37d 1107 * @cdb: CDB for packet command
a2a7a662 1108 * @dma_dir: Data tranfer direction of the command
2432697b
TH
1109 * @sg: sg list for the data buffer of the command
1110 * @n_elem: Number of sg entries
a2a7a662
TH
1111 *
1112 * Executes libata internal command with timeout. @tf contains
1113 * command on entry and result on return. Timeout and error
1114 * conditions are reported via return value. No recovery action
1115 * is taken after a command times out. It's caller's duty to
1116 * clean up after timeout.
1117 *
1118 * LOCKING:
1119 * None. Should be called with kernel context, might sleep.
551e8889
TH
1120 *
1121 * RETURNS:
1122 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1123 */
2432697b
TH
1124unsigned ata_exec_internal_sg(struct ata_device *dev,
1125 struct ata_taskfile *tf, const u8 *cdb,
1126 int dma_dir, struct scatterlist *sg,
1127 unsigned int n_elem)
a2a7a662 1128{
3373efd8 1129 struct ata_port *ap = dev->ap;
a2a7a662
TH
1130 u8 command = tf->command;
1131 struct ata_queued_cmd *qc;
2ab7db1f 1132 unsigned int tag, preempted_tag;
dedaf2b0 1133 u32 preempted_sactive, preempted_qc_active;
60be6b9a 1134 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1135 unsigned long flags;
77853bf2 1136 unsigned int err_mask;
d95a717f 1137 int rc;
a2a7a662 1138
ba6a1308 1139 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1140
e3180499 1141 /* no internal command while frozen */
b51e9e5d 1142 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1143 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1144 return AC_ERR_SYSTEM;
1145 }
1146
2ab7db1f 1147 /* initialize internal qc */
a2a7a662 1148
2ab7db1f
TH
1149 /* XXX: Tag 0 is used for drivers with legacy EH as some
1150 * drivers choke if any other tag is given. This breaks
1151 * ata_tag_internal() test for those drivers. Don't use new
1152 * EH stuff without converting to it.
1153 */
1154 if (ap->ops->error_handler)
1155 tag = ATA_TAG_INTERNAL;
1156 else
1157 tag = 0;
1158
6cec4a39 1159 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1160 BUG();
f69499f4 1161 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1162
1163 qc->tag = tag;
1164 qc->scsicmd = NULL;
1165 qc->ap = ap;
1166 qc->dev = dev;
1167 ata_qc_reinit(qc);
1168
1169 preempted_tag = ap->active_tag;
dedaf2b0
TH
1170 preempted_sactive = ap->sactive;
1171 preempted_qc_active = ap->qc_active;
2ab7db1f 1172 ap->active_tag = ATA_TAG_POISON;
dedaf2b0
TH
1173 ap->sactive = 0;
1174 ap->qc_active = 0;
2ab7db1f
TH
1175
1176 /* prepare & issue qc */
a2a7a662 1177 qc->tf = *tf;
d69cf37d
TH
1178 if (cdb)
1179 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1180 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1181 qc->dma_dir = dma_dir;
1182 if (dma_dir != DMA_NONE) {
2432697b
TH
1183 unsigned int i, buflen = 0;
1184
1185 for (i = 0; i < n_elem; i++)
1186 buflen += sg[i].length;
1187
1188 ata_sg_init(qc, sg, n_elem);
49c80429 1189 qc->nbytes = buflen;
a2a7a662
TH
1190 }
1191
77853bf2 1192 qc->private_data = &wait;
a2a7a662
TH
1193 qc->complete_fn = ata_qc_complete_internal;
1194
8e0e694a 1195 ata_qc_issue(qc);
a2a7a662 1196
ba6a1308 1197 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1198
a8601e5f 1199 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
d95a717f
TH
1200
1201 ata_port_flush_task(ap);
41ade50c 1202
d95a717f 1203 if (!rc) {
ba6a1308 1204 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1205
1206 /* We're racing with irq here. If we lose, the
1207 * following test prevents us from completing the qc
d95a717f
TH
1208 * twice. If we win, the port is frozen and will be
1209 * cleaned up by ->post_internal_cmd().
a2a7a662 1210 */
77853bf2 1211 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1212 qc->err_mask |= AC_ERR_TIMEOUT;
1213
1214 if (ap->ops->error_handler)
1215 ata_port_freeze(ap);
1216 else
1217 ata_qc_complete(qc);
f15a1daf 1218
0dd4b21f
BP
1219 if (ata_msg_warn(ap))
1220 ata_dev_printk(dev, KERN_WARNING,
88574551 1221 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1222 }
1223
ba6a1308 1224 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1225 }
1226
d95a717f
TH
1227 /* do post_internal_cmd */
1228 if (ap->ops->post_internal_cmd)
1229 ap->ops->post_internal_cmd(qc);
1230
18d90deb 1231 if ((qc->flags & ATA_QCFLAG_FAILED) && !qc->err_mask) {
0dd4b21f 1232 if (ata_msg_warn(ap))
88574551 1233 ata_dev_printk(dev, KERN_WARNING,
0dd4b21f 1234 "zero err_mask for failed "
88574551 1235 "internal command, assuming AC_ERR_OTHER\n");
d95a717f
TH
1236 qc->err_mask |= AC_ERR_OTHER;
1237 }
1238
15869303 1239 /* finish up */
ba6a1308 1240 spin_lock_irqsave(ap->lock, flags);
15869303 1241
e61e0672 1242 *tf = qc->result_tf;
77853bf2
TH
1243 err_mask = qc->err_mask;
1244
1245 ata_qc_free(qc);
2ab7db1f 1246 ap->active_tag = preempted_tag;
dedaf2b0
TH
1247 ap->sactive = preempted_sactive;
1248 ap->qc_active = preempted_qc_active;
77853bf2 1249
1f7dd3e9
TH
1250 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1251 * Until those drivers are fixed, we detect the condition
1252 * here, fail the command with AC_ERR_SYSTEM and reenable the
1253 * port.
1254 *
1255 * Note that this doesn't change any behavior as internal
1256 * command failure results in disabling the device in the
1257 * higher layer for LLDDs without new reset/EH callbacks.
1258 *
1259 * Kill the following code as soon as those drivers are fixed.
1260 */
198e0fed 1261 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1262 err_mask |= AC_ERR_SYSTEM;
1263 ata_port_probe(ap);
1264 }
1265
ba6a1308 1266 spin_unlock_irqrestore(ap->lock, flags);
15869303 1267
77853bf2 1268 return err_mask;
a2a7a662
TH
1269}
1270
2432697b 1271/**
33480a0e 1272 * ata_exec_internal - execute libata internal command
2432697b
TH
1273 * @dev: Device to which the command is sent
1274 * @tf: Taskfile registers for the command and the result
1275 * @cdb: CDB for packet command
1276 * @dma_dir: Data tranfer direction of the command
1277 * @buf: Data buffer of the command
1278 * @buflen: Length of data buffer
1279 *
1280 * Wrapper around ata_exec_internal_sg() which takes simple
1281 * buffer instead of sg list.
1282 *
1283 * LOCKING:
1284 * None. Should be called with kernel context, might sleep.
1285 *
1286 * RETURNS:
1287 * Zero on success, AC_ERR_* mask on failure
1288 */
1289unsigned ata_exec_internal(struct ata_device *dev,
1290 struct ata_taskfile *tf, const u8 *cdb,
1291 int dma_dir, void *buf, unsigned int buflen)
1292{
33480a0e
TH
1293 struct scatterlist *psg = NULL, sg;
1294 unsigned int n_elem = 0;
2432697b 1295
33480a0e
TH
1296 if (dma_dir != DMA_NONE) {
1297 WARN_ON(!buf);
1298 sg_init_one(&sg, buf, buflen);
1299 psg = &sg;
1300 n_elem++;
1301 }
2432697b 1302
33480a0e 1303 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
2432697b
TH
1304}
1305
977e6b9f
TH
1306/**
1307 * ata_do_simple_cmd - execute simple internal command
1308 * @dev: Device to which the command is sent
1309 * @cmd: Opcode to execute
1310 *
1311 * Execute a 'simple' command, that only consists of the opcode
1312 * 'cmd' itself, without filling any other registers
1313 *
1314 * LOCKING:
1315 * Kernel thread context (may sleep).
1316 *
1317 * RETURNS:
1318 * Zero on success, AC_ERR_* mask on failure
e58eb583 1319 */
77b08fb5 1320unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1321{
1322 struct ata_taskfile tf;
e58eb583
TH
1323
1324 ata_tf_init(dev, &tf);
1325
1326 tf.command = cmd;
1327 tf.flags |= ATA_TFLAG_DEVICE;
1328 tf.protocol = ATA_PROT_NODATA;
1329
977e6b9f 1330 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
e58eb583
TH
1331}
1332
1bc4ccff
AC
1333/**
1334 * ata_pio_need_iordy - check if iordy needed
1335 * @adev: ATA device
1336 *
1337 * Check if the current speed of the device requires IORDY. Used
1338 * by various controllers for chip configuration.
1339 */
1340
1341unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1342{
1343 int pio;
1344 int speed = adev->pio_mode - XFER_PIO_0;
1345
1346 if (speed < 2)
1347 return 0;
1348 if (speed > 2)
1349 return 1;
2e9edbf8 1350
1bc4ccff
AC
1351 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1352
1353 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1354 pio = adev->id[ATA_ID_EIDE_PIO];
1355 /* Is the speed faster than the drive allows non IORDY ? */
1356 if (pio) {
1357 /* This is cycle times not frequency - watch the logic! */
1358 if (pio > 240) /* PIO2 is 240nS per cycle */
1359 return 1;
1360 return 0;
1361 }
1362 }
1363 return 0;
1364}
1365
1da177e4 1366/**
49016aca 1367 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1368 * @dev: target device
1369 * @p_class: pointer to class of the target device (may be changed)
bff04647 1370 * @flags: ATA_READID_* flags
fe635c7e 1371 * @id: buffer to read IDENTIFY data into
1da177e4 1372 *
49016aca
TH
1373 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1374 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1375 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1376 * for pre-ATA4 drives.
1da177e4
LT
1377 *
1378 * LOCKING:
49016aca
TH
1379 * Kernel thread context (may sleep)
1380 *
1381 * RETURNS:
1382 * 0 on success, -errno otherwise.
1da177e4 1383 */
a9beec95 1384int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1385 unsigned int flags, u16 *id)
1da177e4 1386{
3373efd8 1387 struct ata_port *ap = dev->ap;
49016aca 1388 unsigned int class = *p_class;
a0123703 1389 struct ata_taskfile tf;
49016aca
TH
1390 unsigned int err_mask = 0;
1391 const char *reason;
1392 int rc;
1da177e4 1393
0dd4b21f 1394 if (ata_msg_ctl(ap))
88574551
TH
1395 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1396 __FUNCTION__, ap->id, dev->devno);
1da177e4 1397
49016aca 1398 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1da177e4 1399
49016aca 1400 retry:
3373efd8 1401 ata_tf_init(dev, &tf);
a0123703 1402
49016aca
TH
1403 switch (class) {
1404 case ATA_DEV_ATA:
a0123703 1405 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1406 break;
1407 case ATA_DEV_ATAPI:
a0123703 1408 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1409 break;
1410 default:
1411 rc = -ENODEV;
1412 reason = "unsupported class";
1413 goto err_out;
1da177e4
LT
1414 }
1415
a0123703 1416 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1417
1418 /* Some devices choke if TF registers contain garbage. Make
1419 * sure those are properly initialized.
1420 */
1421 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1422
1423 /* Device presence detection is unreliable on some
1424 * controllers. Always poll IDENTIFY if available.
1425 */
1426 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 1427
3373efd8 1428 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
49016aca 1429 id, sizeof(id[0]) * ATA_ID_WORDS);
a0123703 1430 if (err_mask) {
800b3996 1431 if (err_mask & AC_ERR_NODEV_HINT) {
55a8e2c8
TH
1432 DPRINTK("ata%u.%d: NODEV after polling detection\n",
1433 ap->id, dev->devno);
1434 return -ENOENT;
1435 }
1436
49016aca
TH
1437 rc = -EIO;
1438 reason = "I/O error";
1da177e4
LT
1439 goto err_out;
1440 }
1441
49016aca 1442 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1443
49016aca 1444 /* sanity check */
a4f5749b
TH
1445 rc = -EINVAL;
1446 reason = "device reports illegal type";
1447
1448 if (class == ATA_DEV_ATA) {
1449 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1450 goto err_out;
1451 } else {
1452 if (ata_id_is_ata(id))
1453 goto err_out;
49016aca
TH
1454 }
1455
bff04647 1456 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
1457 /*
1458 * The exact sequence expected by certain pre-ATA4 drives is:
1459 * SRST RESET
1460 * IDENTIFY
1461 * INITIALIZE DEVICE PARAMETERS
1462 * anything else..
1463 * Some drives were very specific about that exact sequence.
1464 */
1465 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 1466 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
1467 if (err_mask) {
1468 rc = -EIO;
1469 reason = "INIT_DEV_PARAMS failed";
1470 goto err_out;
1471 }
1472
1473 /* current CHS translation info (id[53-58]) might be
1474 * changed. reread the identify device info.
1475 */
bff04647 1476 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
1477 goto retry;
1478 }
1479 }
1480
1481 *p_class = class;
fe635c7e 1482
49016aca
TH
1483 return 0;
1484
1485 err_out:
88574551 1486 if (ata_msg_warn(ap))
0dd4b21f 1487 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 1488 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
1489 return rc;
1490}
1491
3373efd8 1492static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 1493{
3373efd8 1494 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
1495}
1496
a6e6ce8e
TH
1497static void ata_dev_config_ncq(struct ata_device *dev,
1498 char *desc, size_t desc_sz)
1499{
1500 struct ata_port *ap = dev->ap;
1501 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1502
1503 if (!ata_id_has_ncq(dev->id)) {
1504 desc[0] = '\0';
1505 return;
1506 }
6919a0a6
AC
1507 if (ata_device_blacklisted(dev) & ATA_HORKAGE_NONCQ) {
1508 snprintf(desc, desc_sz, "NCQ (not used)");
1509 return;
1510 }
a6e6ce8e 1511 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 1512 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
1513 dev->flags |= ATA_DFLAG_NCQ;
1514 }
1515
1516 if (hdepth >= ddepth)
1517 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1518 else
1519 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1520}
1521
e6d902a3
BK
1522static void ata_set_port_max_cmd_len(struct ata_port *ap)
1523{
1524 int i;
1525
cca3974e
JG
1526 if (ap->scsi_host) {
1527 unsigned int len = 0;
1528
e6d902a3 1529 for (i = 0; i < ATA_MAX_DEVICES; i++)
cca3974e
JG
1530 len = max(len, ap->device[i].cdb_len);
1531
1532 ap->scsi_host->max_cmd_len = len;
e6d902a3
BK
1533 }
1534}
1535
49016aca 1536/**
ffeae418 1537 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
1538 * @dev: Target device to configure
1539 *
1540 * Configure @dev according to @dev->id. Generic and low-level
1541 * driver specific fixups are also applied.
49016aca
TH
1542 *
1543 * LOCKING:
ffeae418
TH
1544 * Kernel thread context (may sleep)
1545 *
1546 * RETURNS:
1547 * 0 on success, -errno otherwise
49016aca 1548 */
efdaedc4 1549int ata_dev_configure(struct ata_device *dev)
49016aca 1550{
3373efd8 1551 struct ata_port *ap = dev->ap;
efdaedc4 1552 int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO;
1148c3a7 1553 const u16 *id = dev->id;
ff8854b2 1554 unsigned int xfer_mask;
b352e57d 1555 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
1556 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
1557 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 1558 int rc;
49016aca 1559
0dd4b21f 1560 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
88574551
TH
1561 ata_dev_printk(dev, KERN_INFO,
1562 "%s: ENTER/EXIT (host %u, dev %u) -- nodev\n",
1563 __FUNCTION__, ap->id, dev->devno);
ffeae418 1564 return 0;
49016aca
TH
1565 }
1566
0dd4b21f 1567 if (ata_msg_probe(ap))
88574551
TH
1568 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER, host %u, dev %u\n",
1569 __FUNCTION__, ap->id, dev->devno);
1da177e4 1570
08573a86
KCA
1571 /* set _SDD */
1572 rc = ata_acpi_push_id(ap, dev->devno);
1573 if (rc) {
1574 ata_dev_printk(dev, KERN_WARNING, "failed to set _SDD(%d)\n",
1575 rc);
1576 }
1577
1578 /* retrieve and execute the ATA task file of _GTF */
1579 ata_acpi_exec_tfs(ap);
1580
c39f5ebe 1581 /* print device capabilities */
0dd4b21f 1582 if (ata_msg_probe(ap))
88574551
TH
1583 ata_dev_printk(dev, KERN_DEBUG,
1584 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1585 "85:%04x 86:%04x 87:%04x 88:%04x\n",
0dd4b21f 1586 __FUNCTION__,
f15a1daf
TH
1587 id[49], id[82], id[83], id[84],
1588 id[85], id[86], id[87], id[88]);
c39f5ebe 1589
208a9933 1590 /* initialize to-be-configured parameters */
ea1dd4e1 1591 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
1592 dev->max_sectors = 0;
1593 dev->cdb_len = 0;
1594 dev->n_sectors = 0;
1595 dev->cylinders = 0;
1596 dev->heads = 0;
1597 dev->sectors = 0;
1598
1da177e4
LT
1599 /*
1600 * common ATA, ATAPI feature tests
1601 */
1602
ff8854b2 1603 /* find max transfer mode; for printk only */
1148c3a7 1604 xfer_mask = ata_id_xfermask(id);
1da177e4 1605
0dd4b21f
BP
1606 if (ata_msg_probe(ap))
1607 ata_dump_id(id);
1da177e4
LT
1608
1609 /* ATA-specific feature tests */
1610 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
1611 if (ata_id_is_cfa(id)) {
1612 if (id[162] & 1) /* CPRM may make this media unusable */
1613 ata_dev_printk(dev, KERN_WARNING, "ata%u: device %u supports DRM functions and may not be fully accessable.\n",
1614 ap->id, dev->devno);
1615 snprintf(revbuf, 7, "CFA");
1616 }
1617 else
1618 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1619
1148c3a7 1620 dev->n_sectors = ata_id_n_sectors(id);
2940740b 1621
3f64f565 1622 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
591a6e8e 1623 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
3f64f565
EM
1624 sizeof(fwrevbuf));
1625
591a6e8e 1626 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
3f64f565
EM
1627 sizeof(modelbuf));
1628
1629 if (dev->id[59] & 0x100)
1630 dev->multi_count = dev->id[59] & 0xff;
1631
1148c3a7 1632 if (ata_id_has_lba(id)) {
4c2d721a 1633 const char *lba_desc;
a6e6ce8e 1634 char ncq_desc[20];
8bf62ece 1635
4c2d721a
TH
1636 lba_desc = "LBA";
1637 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 1638 if (ata_id_has_lba48(id)) {
8bf62ece 1639 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 1640 lba_desc = "LBA48";
6fc49adb
TH
1641
1642 if (dev->n_sectors >= (1UL << 28) &&
1643 ata_id_has_flush_ext(id))
1644 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 1645 }
8bf62ece 1646
a6e6ce8e
TH
1647 /* config NCQ */
1648 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1649
8bf62ece 1650 /* print device info to dmesg */
3f64f565
EM
1651 if (ata_msg_drv(ap) && print_info) {
1652 ata_dev_printk(dev, KERN_INFO,
1653 "%s: %s, %s, max %s\n",
1654 revbuf, modelbuf, fwrevbuf,
1655 ata_mode_string(xfer_mask));
1656 ata_dev_printk(dev, KERN_INFO,
1657 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 1658 (unsigned long long)dev->n_sectors,
3f64f565
EM
1659 dev->multi_count, lba_desc, ncq_desc);
1660 }
ffeae418 1661 } else {
8bf62ece
AL
1662 /* CHS */
1663
1664 /* Default translation */
1148c3a7
TH
1665 dev->cylinders = id[1];
1666 dev->heads = id[3];
1667 dev->sectors = id[6];
8bf62ece 1668
1148c3a7 1669 if (ata_id_current_chs_valid(id)) {
8bf62ece 1670 /* Current CHS translation is valid. */
1148c3a7
TH
1671 dev->cylinders = id[54];
1672 dev->heads = id[55];
1673 dev->sectors = id[56];
8bf62ece
AL
1674 }
1675
1676 /* print device info to dmesg */
3f64f565 1677 if (ata_msg_drv(ap) && print_info) {
88574551 1678 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
1679 "%s: %s, %s, max %s\n",
1680 revbuf, modelbuf, fwrevbuf,
1681 ata_mode_string(xfer_mask));
1682 ata_dev_printk(dev, KERN_INFO,
1683 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
1684 (unsigned long long)dev->n_sectors,
1685 dev->multi_count, dev->cylinders,
1686 dev->heads, dev->sectors);
1687 }
07f6f7d0
AL
1688 }
1689
6e7846e9 1690 dev->cdb_len = 16;
1da177e4
LT
1691 }
1692
1693 /* ATAPI-specific feature tests */
2c13b7ce 1694 else if (dev->class == ATA_DEV_ATAPI) {
08a556db
AL
1695 char *cdb_intr_string = "";
1696
1148c3a7 1697 rc = atapi_cdb_len(id);
1da177e4 1698 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 1699 if (ata_msg_warn(ap))
88574551
TH
1700 ata_dev_printk(dev, KERN_WARNING,
1701 "unsupported CDB len\n");
ffeae418 1702 rc = -EINVAL;
1da177e4
LT
1703 goto err_out_nosup;
1704 }
6e7846e9 1705 dev->cdb_len = (unsigned int) rc;
1da177e4 1706
08a556db 1707 if (ata_id_cdb_intr(dev->id)) {
312f7da2 1708 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
1709 cdb_intr_string = ", CDB intr";
1710 }
312f7da2 1711
1da177e4 1712 /* print device info to dmesg */
5afc8142 1713 if (ata_msg_drv(ap) && print_info)
12436c30
TH
1714 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1715 ata_mode_string(xfer_mask),
1716 cdb_intr_string);
1da177e4
LT
1717 }
1718
914ed354
TH
1719 /* determine max_sectors */
1720 dev->max_sectors = ATA_MAX_SECTORS;
1721 if (dev->flags & ATA_DFLAG_LBA48)
1722 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
1723
93590859
AC
1724 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
1725 /* Let the user know. We don't want to disallow opens for
1726 rescue purposes, or in case the vendor is just a blithering
1727 idiot */
1728 if (print_info) {
1729 ata_dev_printk(dev, KERN_WARNING,
1730"Drive reports diagnostics failure. This may indicate a drive\n");
1731 ata_dev_printk(dev, KERN_WARNING,
1732"fault or invalid emulation. Contact drive vendor for information.\n");
1733 }
1734 }
1735
e6d902a3 1736 ata_set_port_max_cmd_len(ap);
6e7846e9 1737
4b2f3ede 1738 /* limit bridge transfers to udma5, 200 sectors */
3373efd8 1739 if (ata_dev_knobble(dev)) {
5afc8142 1740 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
1741 ata_dev_printk(dev, KERN_INFO,
1742 "applying bridge limits\n");
5a529139 1743 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
1744 dev->max_sectors = ATA_MAX_SECTORS;
1745 }
1746
1747 if (ap->ops->dev_config)
1748 ap->ops->dev_config(ap, dev);
1749
0dd4b21f
BP
1750 if (ata_msg_probe(ap))
1751 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
1752 __FUNCTION__, ata_chk_status(ap));
ffeae418 1753 return 0;
1da177e4
LT
1754
1755err_out_nosup:
0dd4b21f 1756 if (ata_msg_probe(ap))
88574551
TH
1757 ata_dev_printk(dev, KERN_DEBUG,
1758 "%s: EXIT, err\n", __FUNCTION__);
ffeae418 1759 return rc;
1da177e4
LT
1760}
1761
1762/**
1763 * ata_bus_probe - Reset and probe ATA bus
1764 * @ap: Bus to probe
1765 *
0cba632b
JG
1766 * Master ATA bus probing function. Initiates a hardware-dependent
1767 * bus reset, then attempts to identify any devices found on
1768 * the bus.
1769 *
1da177e4 1770 * LOCKING:
0cba632b 1771 * PCI/etc. bus probe sem.
1da177e4
LT
1772 *
1773 * RETURNS:
96072e69 1774 * Zero on success, negative errno otherwise.
1da177e4
LT
1775 */
1776
80289167 1777int ata_bus_probe(struct ata_port *ap)
1da177e4 1778{
28ca5c57 1779 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 1780 int tries[ATA_MAX_DEVICES];
4ae72a1e 1781 int i, rc;
e82cbdb9 1782 struct ata_device *dev;
1da177e4 1783
28ca5c57 1784 ata_port_probe(ap);
c19ba8af 1785
14d2bac1
TH
1786 for (i = 0; i < ATA_MAX_DEVICES; i++)
1787 tries[i] = ATA_PROBE_MAX_TRIES;
1788
1789 retry:
2044470c 1790 /* reset and determine device classes */
52783c5d 1791 ap->ops->phy_reset(ap);
2061a47a 1792
52783c5d
TH
1793 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1794 dev = &ap->device[i];
c19ba8af 1795
52783c5d
TH
1796 if (!(ap->flags & ATA_FLAG_DISABLED) &&
1797 dev->class != ATA_DEV_UNKNOWN)
1798 classes[dev->devno] = dev->class;
1799 else
1800 classes[dev->devno] = ATA_DEV_NONE;
2044470c 1801
52783c5d 1802 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 1803 }
1da177e4 1804
52783c5d 1805 ata_port_probe(ap);
2044470c 1806
b6079ca4
AC
1807 /* after the reset the device state is PIO 0 and the controller
1808 state is undefined. Record the mode */
1809
1810 for (i = 0; i < ATA_MAX_DEVICES; i++)
1811 ap->device[i].pio_mode = XFER_PIO_0;
1812
28ca5c57 1813 /* read IDENTIFY page and configure devices */
1da177e4 1814 for (i = 0; i < ATA_MAX_DEVICES; i++) {
e82cbdb9 1815 dev = &ap->device[i];
28ca5c57 1816
ec573755
TH
1817 if (tries[i])
1818 dev->class = classes[i];
ffeae418 1819
14d2bac1 1820 if (!ata_dev_enabled(dev))
ffeae418 1821 continue;
ffeae418 1822
bff04647
TH
1823 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
1824 dev->id);
14d2bac1
TH
1825 if (rc)
1826 goto fail;
1827
efdaedc4
TH
1828 ap->eh_context.i.flags |= ATA_EHI_PRINTINFO;
1829 rc = ata_dev_configure(dev);
1830 ap->eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
1831 if (rc)
1832 goto fail;
1da177e4
LT
1833 }
1834
e82cbdb9 1835 /* configure transfer mode */
3adcebb2 1836 rc = ata_set_mode(ap, &dev);
4ae72a1e 1837 if (rc)
51713d35 1838 goto fail;
1da177e4 1839
e82cbdb9
TH
1840 for (i = 0; i < ATA_MAX_DEVICES; i++)
1841 if (ata_dev_enabled(&ap->device[i]))
1842 return 0;
1da177e4 1843
e82cbdb9
TH
1844 /* no device present, disable port */
1845 ata_port_disable(ap);
1da177e4 1846 ap->ops->port_disable(ap);
96072e69 1847 return -ENODEV;
14d2bac1
TH
1848
1849 fail:
4ae72a1e
TH
1850 tries[dev->devno]--;
1851
14d2bac1
TH
1852 switch (rc) {
1853 case -EINVAL:
4ae72a1e 1854 /* eeek, something went very wrong, give up */
14d2bac1
TH
1855 tries[dev->devno] = 0;
1856 break;
4ae72a1e
TH
1857
1858 case -ENODEV:
1859 /* give it just one more chance */
1860 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 1861 case -EIO:
4ae72a1e
TH
1862 if (tries[dev->devno] == 1) {
1863 /* This is the last chance, better to slow
1864 * down than lose it.
1865 */
1866 sata_down_spd_limit(ap);
1867 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
1868 }
14d2bac1
TH
1869 }
1870
4ae72a1e 1871 if (!tries[dev->devno])
3373efd8 1872 ata_dev_disable(dev);
ec573755 1873
14d2bac1 1874 goto retry;
1da177e4
LT
1875}
1876
1877/**
0cba632b
JG
1878 * ata_port_probe - Mark port as enabled
1879 * @ap: Port for which we indicate enablement
1da177e4 1880 *
0cba632b
JG
1881 * Modify @ap data structure such that the system
1882 * thinks that the entire port is enabled.
1883 *
cca3974e 1884 * LOCKING: host lock, or some other form of
0cba632b 1885 * serialization.
1da177e4
LT
1886 */
1887
1888void ata_port_probe(struct ata_port *ap)
1889{
198e0fed 1890 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
1891}
1892
3be680b7
TH
1893/**
1894 * sata_print_link_status - Print SATA link status
1895 * @ap: SATA port to printk link status about
1896 *
1897 * This function prints link speed and status of a SATA link.
1898 *
1899 * LOCKING:
1900 * None.
1901 */
1902static void sata_print_link_status(struct ata_port *ap)
1903{
6d5f9732 1904 u32 sstatus, scontrol, tmp;
3be680b7 1905
81952c54 1906 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
3be680b7 1907 return;
81952c54 1908 sata_scr_read(ap, SCR_CONTROL, &scontrol);
3be680b7 1909
81952c54 1910 if (ata_port_online(ap)) {
3be680b7 1911 tmp = (sstatus >> 4) & 0xf;
f15a1daf
TH
1912 ata_port_printk(ap, KERN_INFO,
1913 "SATA link up %s (SStatus %X SControl %X)\n",
1914 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 1915 } else {
f15a1daf
TH
1916 ata_port_printk(ap, KERN_INFO,
1917 "SATA link down (SStatus %X SControl %X)\n",
1918 sstatus, scontrol);
3be680b7
TH
1919 }
1920}
1921
1da177e4 1922/**
780a87f7
JG
1923 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1924 * @ap: SATA port associated with target SATA PHY.
1da177e4 1925 *
780a87f7
JG
1926 * This function issues commands to standard SATA Sxxx
1927 * PHY registers, to wake up the phy (and device), and
1928 * clear any reset condition.
1da177e4
LT
1929 *
1930 * LOCKING:
0cba632b 1931 * PCI/etc. bus probe sem.
1da177e4
LT
1932 *
1933 */
1934void __sata_phy_reset(struct ata_port *ap)
1935{
1936 u32 sstatus;
1937 unsigned long timeout = jiffies + (HZ * 5);
1938
1939 if (ap->flags & ATA_FLAG_SATA_RESET) {
cdcca89e 1940 /* issue phy wake/reset */
81952c54 1941 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
62ba2841
TH
1942 /* Couldn't find anything in SATA I/II specs, but
1943 * AHCI-1.1 10.4.2 says at least 1 ms. */
1944 mdelay(1);
1da177e4 1945 }
81952c54
TH
1946 /* phy wake/clear reset */
1947 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1da177e4
LT
1948
1949 /* wait for phy to become ready, if necessary */
1950 do {
1951 msleep(200);
81952c54 1952 sata_scr_read(ap, SCR_STATUS, &sstatus);
1da177e4
LT
1953 if ((sstatus & 0xf) != 1)
1954 break;
1955 } while (time_before(jiffies, timeout));
1956
3be680b7
TH
1957 /* print link status */
1958 sata_print_link_status(ap);
656563e3 1959
3be680b7 1960 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 1961 if (!ata_port_offline(ap))
1da177e4 1962 ata_port_probe(ap);
3be680b7 1963 else
1da177e4 1964 ata_port_disable(ap);
1da177e4 1965
198e0fed 1966 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
1967 return;
1968
1969 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1970 ata_port_disable(ap);
1971 return;
1972 }
1973
1974 ap->cbl = ATA_CBL_SATA;
1975}
1976
1977/**
780a87f7
JG
1978 * sata_phy_reset - Reset SATA bus.
1979 * @ap: SATA port associated with target SATA PHY.
1da177e4 1980 *
780a87f7
JG
1981 * This function resets the SATA bus, and then probes
1982 * the bus for devices.
1da177e4
LT
1983 *
1984 * LOCKING:
0cba632b 1985 * PCI/etc. bus probe sem.
1da177e4
LT
1986 *
1987 */
1988void sata_phy_reset(struct ata_port *ap)
1989{
1990 __sata_phy_reset(ap);
198e0fed 1991 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
1992 return;
1993 ata_bus_reset(ap);
1994}
1995
ebdfca6e
AC
1996/**
1997 * ata_dev_pair - return other device on cable
ebdfca6e
AC
1998 * @adev: device
1999 *
2000 * Obtain the other device on the same cable, or if none is
2001 * present NULL is returned
2002 */
2e9edbf8 2003
3373efd8 2004struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2005{
3373efd8 2006 struct ata_port *ap = adev->ap;
ebdfca6e 2007 struct ata_device *pair = &ap->device[1 - adev->devno];
e1211e3f 2008 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2009 return NULL;
2010 return pair;
2011}
2012
1da177e4 2013/**
780a87f7
JG
2014 * ata_port_disable - Disable port.
2015 * @ap: Port to be disabled.
1da177e4 2016 *
780a87f7
JG
2017 * Modify @ap data structure such that the system
2018 * thinks that the entire port is disabled, and should
2019 * never attempt to probe or communicate with devices
2020 * on this port.
2021 *
cca3974e 2022 * LOCKING: host lock, or some other form of
780a87f7 2023 * serialization.
1da177e4
LT
2024 */
2025
2026void ata_port_disable(struct ata_port *ap)
2027{
2028 ap->device[0].class = ATA_DEV_NONE;
2029 ap->device[1].class = ATA_DEV_NONE;
198e0fed 2030 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2031}
2032
1c3fae4d 2033/**
3c567b7d 2034 * sata_down_spd_limit - adjust SATA spd limit downward
1c3fae4d
TH
2035 * @ap: Port to adjust SATA spd limit for
2036 *
2037 * Adjust SATA spd limit of @ap downward. Note that this
2038 * function only adjusts the limit. The change must be applied
3c567b7d 2039 * using sata_set_spd().
1c3fae4d
TH
2040 *
2041 * LOCKING:
2042 * Inherited from caller.
2043 *
2044 * RETURNS:
2045 * 0 on success, negative errno on failure
2046 */
3c567b7d 2047int sata_down_spd_limit(struct ata_port *ap)
1c3fae4d 2048{
81952c54
TH
2049 u32 sstatus, spd, mask;
2050 int rc, highbit;
1c3fae4d 2051
81952c54
TH
2052 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
2053 if (rc)
2054 return rc;
1c3fae4d
TH
2055
2056 mask = ap->sata_spd_limit;
2057 if (mask <= 1)
2058 return -EINVAL;
2059 highbit = fls(mask) - 1;
2060 mask &= ~(1 << highbit);
2061
81952c54 2062 spd = (sstatus >> 4) & 0xf;
1c3fae4d
TH
2063 if (spd <= 1)
2064 return -EINVAL;
2065 spd--;
2066 mask &= (1 << spd) - 1;
2067 if (!mask)
2068 return -EINVAL;
2069
2070 ap->sata_spd_limit = mask;
2071
f15a1daf
TH
2072 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
2073 sata_spd_string(fls(mask)));
1c3fae4d
TH
2074
2075 return 0;
2076}
2077
3c567b7d 2078static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
1c3fae4d
TH
2079{
2080 u32 spd, limit;
2081
2082 if (ap->sata_spd_limit == UINT_MAX)
2083 limit = 0;
2084 else
2085 limit = fls(ap->sata_spd_limit);
2086
2087 spd = (*scontrol >> 4) & 0xf;
2088 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2089
2090 return spd != limit;
2091}
2092
2093/**
3c567b7d 2094 * sata_set_spd_needed - is SATA spd configuration needed
1c3fae4d
TH
2095 * @ap: Port in question
2096 *
2097 * Test whether the spd limit in SControl matches
2098 * @ap->sata_spd_limit. This function is used to determine
2099 * whether hardreset is necessary to apply SATA spd
2100 * configuration.
2101 *
2102 * LOCKING:
2103 * Inherited from caller.
2104 *
2105 * RETURNS:
2106 * 1 if SATA spd configuration is needed, 0 otherwise.
2107 */
3c567b7d 2108int sata_set_spd_needed(struct ata_port *ap)
1c3fae4d
TH
2109{
2110 u32 scontrol;
2111
81952c54 2112 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
1c3fae4d
TH
2113 return 0;
2114
3c567b7d 2115 return __sata_set_spd_needed(ap, &scontrol);
1c3fae4d
TH
2116}
2117
2118/**
3c567b7d 2119 * sata_set_spd - set SATA spd according to spd limit
1c3fae4d
TH
2120 * @ap: Port to set SATA spd for
2121 *
2122 * Set SATA spd of @ap according to sata_spd_limit.
2123 *
2124 * LOCKING:
2125 * Inherited from caller.
2126 *
2127 * RETURNS:
2128 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2129 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2130 */
3c567b7d 2131int sata_set_spd(struct ata_port *ap)
1c3fae4d
TH
2132{
2133 u32 scontrol;
81952c54 2134 int rc;
1c3fae4d 2135
81952c54
TH
2136 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2137 return rc;
1c3fae4d 2138
3c567b7d 2139 if (!__sata_set_spd_needed(ap, &scontrol))
1c3fae4d
TH
2140 return 0;
2141
81952c54
TH
2142 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2143 return rc;
2144
1c3fae4d
TH
2145 return 1;
2146}
2147
452503f9
AC
2148/*
2149 * This mode timing computation functionality is ported over from
2150 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2151 */
2152/*
b352e57d 2153 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2154 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2155 * for UDMA6, which is currently supported only by Maxtor drives.
2156 *
2157 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2158 */
2159
2160static const struct ata_timing ata_timing[] = {
2161
2162 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2163 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2164 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2165 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2166
b352e57d
AC
2167 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2168 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
452503f9
AC
2169 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2170 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2171 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2172
2173/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2e9edbf8 2174
452503f9
AC
2175 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2176 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2177 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2e9edbf8 2178
452503f9
AC
2179 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2180 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2181 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2182
b352e57d
AC
2183 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2184 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
452503f9
AC
2185 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2186 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2187
2188 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2189 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2190 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2191
2192/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2193
2194 { 0xFF }
2195};
2196
2197#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2198#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2199
2200static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2201{
2202 q->setup = EZ(t->setup * 1000, T);
2203 q->act8b = EZ(t->act8b * 1000, T);
2204 q->rec8b = EZ(t->rec8b * 1000, T);
2205 q->cyc8b = EZ(t->cyc8b * 1000, T);
2206 q->active = EZ(t->active * 1000, T);
2207 q->recover = EZ(t->recover * 1000, T);
2208 q->cycle = EZ(t->cycle * 1000, T);
2209 q->udma = EZ(t->udma * 1000, UT);
2210}
2211
2212void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2213 struct ata_timing *m, unsigned int what)
2214{
2215 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2216 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2217 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2218 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2219 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2220 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2221 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2222 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2223}
2224
2225static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2226{
2227 const struct ata_timing *t;
2228
2229 for (t = ata_timing; t->mode != speed; t++)
91190758 2230 if (t->mode == 0xFF)
452503f9 2231 return NULL;
2e9edbf8 2232 return t;
452503f9
AC
2233}
2234
2235int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2236 struct ata_timing *t, int T, int UT)
2237{
2238 const struct ata_timing *s;
2239 struct ata_timing p;
2240
2241 /*
2e9edbf8 2242 * Find the mode.
75b1f2f8 2243 */
452503f9
AC
2244
2245 if (!(s = ata_timing_find_mode(speed)))
2246 return -EINVAL;
2247
75b1f2f8
AL
2248 memcpy(t, s, sizeof(*s));
2249
452503f9
AC
2250 /*
2251 * If the drive is an EIDE drive, it can tell us it needs extended
2252 * PIO/MW_DMA cycle timing.
2253 */
2254
2255 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2256 memset(&p, 0, sizeof(p));
2257 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2258 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2259 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2260 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2261 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2262 }
2263 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2264 }
2265
2266 /*
2267 * Convert the timing to bus clock counts.
2268 */
2269
75b1f2f8 2270 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2271
2272 /*
c893a3ae
RD
2273 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2274 * S.M.A.R.T * and some other commands. We have to ensure that the
2275 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2276 */
2277
fd3367af 2278 if (speed > XFER_PIO_6) {
452503f9
AC
2279 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2280 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2281 }
2282
2283 /*
c893a3ae 2284 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2285 */
2286
2287 if (t->act8b + t->rec8b < t->cyc8b) {
2288 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2289 t->rec8b = t->cyc8b - t->act8b;
2290 }
2291
2292 if (t->active + t->recover < t->cycle) {
2293 t->active += (t->cycle - (t->active + t->recover)) / 2;
2294 t->recover = t->cycle - t->active;
2295 }
2296
2297 return 0;
2298}
2299
cf176e1a
TH
2300/**
2301 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 2302 * @dev: Device to adjust xfer masks
458337db 2303 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
2304 *
2305 * Adjust xfer masks of @dev downward. Note that this function
2306 * does not apply the change. Invoking ata_set_mode() afterwards
2307 * will apply the limit.
2308 *
2309 * LOCKING:
2310 * Inherited from caller.
2311 *
2312 * RETURNS:
2313 * 0 on success, negative errno on failure
2314 */
458337db 2315int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 2316{
458337db
TH
2317 char buf[32];
2318 unsigned int orig_mask, xfer_mask;
2319 unsigned int pio_mask, mwdma_mask, udma_mask;
2320 int quiet, highbit;
cf176e1a 2321
458337db
TH
2322 quiet = !!(sel & ATA_DNXFER_QUIET);
2323 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 2324
458337db
TH
2325 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2326 dev->mwdma_mask,
2327 dev->udma_mask);
2328 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 2329
458337db
TH
2330 switch (sel) {
2331 case ATA_DNXFER_PIO:
2332 highbit = fls(pio_mask) - 1;
2333 pio_mask &= ~(1 << highbit);
2334 break;
2335
2336 case ATA_DNXFER_DMA:
2337 if (udma_mask) {
2338 highbit = fls(udma_mask) - 1;
2339 udma_mask &= ~(1 << highbit);
2340 if (!udma_mask)
2341 return -ENOENT;
2342 } else if (mwdma_mask) {
2343 highbit = fls(mwdma_mask) - 1;
2344 mwdma_mask &= ~(1 << highbit);
2345 if (!mwdma_mask)
2346 return -ENOENT;
2347 }
2348 break;
2349
2350 case ATA_DNXFER_40C:
2351 udma_mask &= ATA_UDMA_MASK_40C;
2352 break;
2353
2354 case ATA_DNXFER_FORCE_PIO0:
2355 pio_mask &= 1;
2356 case ATA_DNXFER_FORCE_PIO:
2357 mwdma_mask = 0;
2358 udma_mask = 0;
2359 break;
2360
458337db
TH
2361 default:
2362 BUG();
2363 }
2364
2365 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2366
2367 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2368 return -ENOENT;
2369
2370 if (!quiet) {
2371 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2372 snprintf(buf, sizeof(buf), "%s:%s",
2373 ata_mode_string(xfer_mask),
2374 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2375 else
2376 snprintf(buf, sizeof(buf), "%s",
2377 ata_mode_string(xfer_mask));
2378
2379 ata_dev_printk(dev, KERN_WARNING,
2380 "limiting speed to %s\n", buf);
2381 }
cf176e1a
TH
2382
2383 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2384 &dev->udma_mask);
2385
cf176e1a 2386 return 0;
cf176e1a
TH
2387}
2388
3373efd8 2389static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 2390{
baa1e78a 2391 struct ata_eh_context *ehc = &dev->ap->eh_context;
83206a29
TH
2392 unsigned int err_mask;
2393 int rc;
1da177e4 2394
e8384607 2395 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
2396 if (dev->xfer_shift == ATA_SHIFT_PIO)
2397 dev->flags |= ATA_DFLAG_PIO;
2398
3373efd8 2399 err_mask = ata_dev_set_xfermode(dev);
11750a40
AC
2400 /* Old CFA may refuse this command, which is just fine */
2401 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2402 err_mask &= ~AC_ERR_DEV;
2403
83206a29 2404 if (err_mask) {
f15a1daf
TH
2405 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2406 "(err_mask=0x%x)\n", err_mask);
83206a29
TH
2407 return -EIO;
2408 }
1da177e4 2409
baa1e78a 2410 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3373efd8 2411 rc = ata_dev_revalidate(dev, 0);
baa1e78a 2412 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
5eb45c02 2413 if (rc)
83206a29 2414 return rc;
48a8a14f 2415
23e71c3d
TH
2416 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2417 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 2418
f15a1daf
TH
2419 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2420 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
83206a29 2421 return 0;
1da177e4
LT
2422}
2423
1da177e4
LT
2424/**
2425 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2426 * @ap: port on which timings will be programmed
e82cbdb9 2427 * @r_failed_dev: out paramter for failed device
1da177e4 2428 *
e82cbdb9
TH
2429 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2430 * ata_set_mode() fails, pointer to the failing device is
2431 * returned in @r_failed_dev.
780a87f7 2432 *
1da177e4 2433 * LOCKING:
0cba632b 2434 * PCI/etc. bus probe sem.
e82cbdb9
TH
2435 *
2436 * RETURNS:
2437 * 0 on success, negative errno otherwise
1da177e4 2438 */
1ad8e7f9 2439int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
1da177e4 2440{
e8e0619f 2441 struct ata_device *dev;
e82cbdb9 2442 int i, rc = 0, used_dma = 0, found = 0;
1da177e4 2443
3adcebb2 2444 /* has private set_mode? */
b229a7b0
AC
2445 if (ap->ops->set_mode)
2446 return ap->ops->set_mode(ap, r_failed_dev);
3adcebb2 2447
a6d5a51c
TH
2448 /* step 1: calculate xfer_mask */
2449 for (i = 0; i < ATA_MAX_DEVICES; i++) {
acf356b1 2450 unsigned int pio_mask, dma_mask;
a6d5a51c 2451
e8e0619f
TH
2452 dev = &ap->device[i];
2453
e1211e3f 2454 if (!ata_dev_enabled(dev))
a6d5a51c
TH
2455 continue;
2456
3373efd8 2457 ata_dev_xfermask(dev);
1da177e4 2458
acf356b1
TH
2459 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2460 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2461 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2462 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 2463
4f65977d 2464 found = 1;
5444a6f4
AC
2465 if (dev->dma_mode)
2466 used_dma = 1;
a6d5a51c 2467 }
4f65977d 2468 if (!found)
e82cbdb9 2469 goto out;
a6d5a51c
TH
2470
2471 /* step 2: always set host PIO timings */
e8e0619f
TH
2472 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2473 dev = &ap->device[i];
2474 if (!ata_dev_enabled(dev))
2475 continue;
2476
2477 if (!dev->pio_mode) {
f15a1daf 2478 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 2479 rc = -EINVAL;
e82cbdb9 2480 goto out;
e8e0619f
TH
2481 }
2482
2483 dev->xfer_mode = dev->pio_mode;
2484 dev->xfer_shift = ATA_SHIFT_PIO;
2485 if (ap->ops->set_piomode)
2486 ap->ops->set_piomode(ap, dev);
2487 }
1da177e4 2488
a6d5a51c 2489 /* step 3: set host DMA timings */
e8e0619f
TH
2490 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2491 dev = &ap->device[i];
2492
2493 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2494 continue;
2495
2496 dev->xfer_mode = dev->dma_mode;
2497 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2498 if (ap->ops->set_dmamode)
2499 ap->ops->set_dmamode(ap, dev);
2500 }
1da177e4
LT
2501
2502 /* step 4: update devices' xfer mode */
83206a29 2503 for (i = 0; i < ATA_MAX_DEVICES; i++) {
e8e0619f 2504 dev = &ap->device[i];
1da177e4 2505
18d90deb 2506 /* don't update suspended devices' xfer mode */
02670bf3 2507 if (!ata_dev_ready(dev))
83206a29
TH
2508 continue;
2509
3373efd8 2510 rc = ata_dev_set_mode(dev);
5bbc53f4 2511 if (rc)
e82cbdb9 2512 goto out;
83206a29 2513 }
1da177e4 2514
e8e0619f
TH
2515 /* Record simplex status. If we selected DMA then the other
2516 * host channels are not permitted to do so.
5444a6f4 2517 */
cca3974e
JG
2518 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
2519 ap->host->simplex_claimed = 1;
5444a6f4 2520
e8e0619f 2521 /* step5: chip specific finalisation */
1da177e4
LT
2522 if (ap->ops->post_set_mode)
2523 ap->ops->post_set_mode(ap);
2524
e82cbdb9
TH
2525 out:
2526 if (rc)
2527 *r_failed_dev = dev;
2528 return rc;
1da177e4
LT
2529}
2530
1fdffbce
JG
2531/**
2532 * ata_tf_to_host - issue ATA taskfile to host controller
2533 * @ap: port to which command is being issued
2534 * @tf: ATA taskfile register set
2535 *
2536 * Issues ATA taskfile register set to ATA host controller,
2537 * with proper synchronization with interrupt handler and
2538 * other threads.
2539 *
2540 * LOCKING:
cca3974e 2541 * spin_lock_irqsave(host lock)
1fdffbce
JG
2542 */
2543
2544static inline void ata_tf_to_host(struct ata_port *ap,
2545 const struct ata_taskfile *tf)
2546{
2547 ap->ops->tf_load(ap, tf);
2548 ap->ops->exec_command(ap, tf);
2549}
2550
1da177e4
LT
2551/**
2552 * ata_busy_sleep - sleep until BSY clears, or timeout
2553 * @ap: port containing status register to be polled
2554 * @tmout_pat: impatience timeout
2555 * @tmout: overall timeout
2556 *
780a87f7
JG
2557 * Sleep until ATA Status register bit BSY clears,
2558 * or a timeout occurs.
2559 *
d1adc1bb
TH
2560 * LOCKING:
2561 * Kernel thread context (may sleep).
2562 *
2563 * RETURNS:
2564 * 0 on success, -errno otherwise.
1da177e4 2565 */
d1adc1bb
TH
2566int ata_busy_sleep(struct ata_port *ap,
2567 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
2568{
2569 unsigned long timer_start, timeout;
2570 u8 status;
2571
2572 status = ata_busy_wait(ap, ATA_BUSY, 300);
2573 timer_start = jiffies;
2574 timeout = timer_start + tmout_pat;
d1adc1bb
TH
2575 while (status != 0xff && (status & ATA_BUSY) &&
2576 time_before(jiffies, timeout)) {
1da177e4
LT
2577 msleep(50);
2578 status = ata_busy_wait(ap, ATA_BUSY, 3);
2579 }
2580
d1adc1bb 2581 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 2582 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
2583 "port is slow to respond, please be patient "
2584 "(Status 0x%x)\n", status);
1da177e4
LT
2585
2586 timeout = timer_start + tmout;
d1adc1bb
TH
2587 while (status != 0xff && (status & ATA_BUSY) &&
2588 time_before(jiffies, timeout)) {
1da177e4
LT
2589 msleep(50);
2590 status = ata_chk_status(ap);
2591 }
2592
d1adc1bb
TH
2593 if (status == 0xff)
2594 return -ENODEV;
2595
1da177e4 2596 if (status & ATA_BUSY) {
f15a1daf 2597 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
2598 "(%lu secs, Status 0x%x)\n",
2599 tmout / HZ, status);
d1adc1bb 2600 return -EBUSY;
1da177e4
LT
2601 }
2602
2603 return 0;
2604}
2605
2606static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
2607{
2608 struct ata_ioports *ioaddr = &ap->ioaddr;
2609 unsigned int dev0 = devmask & (1 << 0);
2610 unsigned int dev1 = devmask & (1 << 1);
2611 unsigned long timeout;
2612
2613 /* if device 0 was found in ata_devchk, wait for its
2614 * BSY bit to clear
2615 */
2616 if (dev0)
2617 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2618
2619 /* if device 1 was found in ata_devchk, wait for
2620 * register access, then wait for BSY to clear
2621 */
2622 timeout = jiffies + ATA_TMOUT_BOOT;
2623 while (dev1) {
2624 u8 nsect, lbal;
2625
2626 ap->ops->dev_select(ap, 1);
0d5ff566
TH
2627 nsect = ioread8(ioaddr->nsect_addr);
2628 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
2629 if ((nsect == 1) && (lbal == 1))
2630 break;
2631 if (time_after(jiffies, timeout)) {
2632 dev1 = 0;
2633 break;
2634 }
2635 msleep(50); /* give drive a breather */
2636 }
2637 if (dev1)
2638 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2639
2640 /* is all this really necessary? */
2641 ap->ops->dev_select(ap, 0);
2642 if (dev1)
2643 ap->ops->dev_select(ap, 1);
2644 if (dev0)
2645 ap->ops->dev_select(ap, 0);
2646}
2647
1da177e4
LT
2648static unsigned int ata_bus_softreset(struct ata_port *ap,
2649 unsigned int devmask)
2650{
2651 struct ata_ioports *ioaddr = &ap->ioaddr;
2652
2653 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
2654
2655 /* software reset. causes dev0 to be selected */
0d5ff566
TH
2656 iowrite8(ap->ctl, ioaddr->ctl_addr);
2657 udelay(20); /* FIXME: flush */
2658 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2659 udelay(20); /* FIXME: flush */
2660 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
2661
2662 /* spec mandates ">= 2ms" before checking status.
2663 * We wait 150ms, because that was the magic delay used for
2664 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2665 * between when the ATA command register is written, and then
2666 * status is checked. Because waiting for "a while" before
2667 * checking status is fine, post SRST, we perform this magic
2668 * delay here as well.
09c7ad79
AC
2669 *
2670 * Old drivers/ide uses the 2mS rule and then waits for ready
1da177e4
LT
2671 */
2672 msleep(150);
2673
2e9edbf8 2674 /* Before we perform post reset processing we want to see if
298a41ca
TH
2675 * the bus shows 0xFF because the odd clown forgets the D7
2676 * pulldown resistor.
2677 */
d1adc1bb
TH
2678 if (ata_check_status(ap) == 0xFF)
2679 return 0;
09c7ad79 2680
1da177e4
LT
2681 ata_bus_post_reset(ap, devmask);
2682
2683 return 0;
2684}
2685
2686/**
2687 * ata_bus_reset - reset host port and associated ATA channel
2688 * @ap: port to reset
2689 *
2690 * This is typically the first time we actually start issuing
2691 * commands to the ATA channel. We wait for BSY to clear, then
2692 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2693 * result. Determine what devices, if any, are on the channel
2694 * by looking at the device 0/1 error register. Look at the signature
2695 * stored in each device's taskfile registers, to determine if
2696 * the device is ATA or ATAPI.
2697 *
2698 * LOCKING:
0cba632b 2699 * PCI/etc. bus probe sem.
cca3974e 2700 * Obtains host lock.
1da177e4
LT
2701 *
2702 * SIDE EFFECTS:
198e0fed 2703 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
2704 */
2705
2706void ata_bus_reset(struct ata_port *ap)
2707{
2708 struct ata_ioports *ioaddr = &ap->ioaddr;
2709 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2710 u8 err;
aec5c3c1 2711 unsigned int dev0, dev1 = 0, devmask = 0;
1da177e4
LT
2712
2713 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
2714
2715 /* determine if device 0/1 are present */
2716 if (ap->flags & ATA_FLAG_SATA_RESET)
2717 dev0 = 1;
2718 else {
2719 dev0 = ata_devchk(ap, 0);
2720 if (slave_possible)
2721 dev1 = ata_devchk(ap, 1);
2722 }
2723
2724 if (dev0)
2725 devmask |= (1 << 0);
2726 if (dev1)
2727 devmask |= (1 << 1);
2728
2729 /* select device 0 again */
2730 ap->ops->dev_select(ap, 0);
2731
2732 /* issue bus reset */
2733 if (ap->flags & ATA_FLAG_SRST)
aec5c3c1
TH
2734 if (ata_bus_softreset(ap, devmask))
2735 goto err_out;
1da177e4
LT
2736
2737 /*
2738 * determine by signature whether we have ATA or ATAPI devices
2739 */
b4dc7623 2740 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
1da177e4 2741 if ((slave_possible) && (err != 0x81))
b4dc7623 2742 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
1da177e4
LT
2743
2744 /* re-enable interrupts */
83625006 2745 ap->ops->irq_on(ap);
1da177e4
LT
2746
2747 /* is double-select really necessary? */
2748 if (ap->device[1].class != ATA_DEV_NONE)
2749 ap->ops->dev_select(ap, 1);
2750 if (ap->device[0].class != ATA_DEV_NONE)
2751 ap->ops->dev_select(ap, 0);
2752
2753 /* if no devices were detected, disable this port */
2754 if ((ap->device[0].class == ATA_DEV_NONE) &&
2755 (ap->device[1].class == ATA_DEV_NONE))
2756 goto err_out;
2757
2758 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2759 /* set up device control for ATA_FLAG_SATA_RESET */
0d5ff566 2760 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
2761 }
2762
2763 DPRINTK("EXIT\n");
2764 return;
2765
2766err_out:
f15a1daf 2767 ata_port_printk(ap, KERN_ERR, "disabling port\n");
1da177e4
LT
2768 ap->ops->port_disable(ap);
2769
2770 DPRINTK("EXIT\n");
2771}
2772
d7bb4cc7
TH
2773/**
2774 * sata_phy_debounce - debounce SATA phy status
2775 * @ap: ATA port to debounce SATA phy status for
2776 * @params: timing parameters { interval, duratinon, timeout } in msec
2777 *
2778 * Make sure SStatus of @ap reaches stable state, determined by
2779 * holding the same value where DET is not 1 for @duration polled
2780 * every @interval, before @timeout. Timeout constraints the
2781 * beginning of the stable state. Because, after hot unplugging,
2782 * DET gets stuck at 1 on some controllers, this functions waits
2783 * until timeout then returns 0 if DET is stable at 1.
2784 *
2785 * LOCKING:
2786 * Kernel thread context (may sleep)
2787 *
2788 * RETURNS:
2789 * 0 on success, -errno on failure.
2790 */
2791int sata_phy_debounce(struct ata_port *ap, const unsigned long *params)
7a7921e8 2792{
d7bb4cc7
TH
2793 unsigned long interval_msec = params[0];
2794 unsigned long duration = params[1] * HZ / 1000;
2795 unsigned long timeout = jiffies + params[2] * HZ / 1000;
2796 unsigned long last_jiffies;
2797 u32 last, cur;
2798 int rc;
2799
2800 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2801 return rc;
2802 cur &= 0xf;
2803
2804 last = cur;
2805 last_jiffies = jiffies;
2806
2807 while (1) {
2808 msleep(interval_msec);
2809 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2810 return rc;
2811 cur &= 0xf;
2812
2813 /* DET stable? */
2814 if (cur == last) {
2815 if (cur == 1 && time_before(jiffies, timeout))
2816 continue;
2817 if (time_after(jiffies, last_jiffies + duration))
2818 return 0;
2819 continue;
2820 }
2821
2822 /* unstable, start over */
2823 last = cur;
2824 last_jiffies = jiffies;
2825
2826 /* check timeout */
2827 if (time_after(jiffies, timeout))
2828 return -EBUSY;
2829 }
2830}
2831
2832/**
2833 * sata_phy_resume - resume SATA phy
2834 * @ap: ATA port to resume SATA phy for
2835 * @params: timing parameters { interval, duratinon, timeout } in msec
2836 *
2837 * Resume SATA phy of @ap and debounce it.
2838 *
2839 * LOCKING:
2840 * Kernel thread context (may sleep)
2841 *
2842 * RETURNS:
2843 * 0 on success, -errno on failure.
2844 */
2845int sata_phy_resume(struct ata_port *ap, const unsigned long *params)
2846{
2847 u32 scontrol;
81952c54
TH
2848 int rc;
2849
2850 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2851 return rc;
7a7921e8 2852
852ee16a 2853 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54
TH
2854
2855 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2856 return rc;
7a7921e8 2857
d7bb4cc7
TH
2858 /* Some PHYs react badly if SStatus is pounded immediately
2859 * after resuming. Delay 200ms before debouncing.
2860 */
2861 msleep(200);
7a7921e8 2862
d7bb4cc7 2863 return sata_phy_debounce(ap, params);
7a7921e8
TH
2864}
2865
f5914a46
TH
2866static void ata_wait_spinup(struct ata_port *ap)
2867{
2868 struct ata_eh_context *ehc = &ap->eh_context;
2869 unsigned long end, secs;
2870 int rc;
2871
2872 /* first, debounce phy if SATA */
2873 if (ap->cbl == ATA_CBL_SATA) {
e9c83914 2874 rc = sata_phy_debounce(ap, sata_deb_timing_hotplug);
f5914a46
TH
2875
2876 /* if debounced successfully and offline, no need to wait */
2877 if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap))
2878 return;
2879 }
2880
2881 /* okay, let's give the drive time to spin up */
2882 end = ehc->i.hotplug_timestamp + ATA_SPINUP_WAIT * HZ / 1000;
2883 secs = ((end - jiffies) + HZ - 1) / HZ;
2884
2885 if (time_after(jiffies, end))
2886 return;
2887
2888 if (secs > 5)
2889 ata_port_printk(ap, KERN_INFO, "waiting for device to spin up "
2890 "(%lu secs)\n", secs);
2891
2892 schedule_timeout_uninterruptible(end - jiffies);
2893}
2894
2895/**
2896 * ata_std_prereset - prepare for reset
2897 * @ap: ATA port to be reset
2898 *
2899 * @ap is about to be reset. Initialize it.
2900 *
2901 * LOCKING:
2902 * Kernel thread context (may sleep)
2903 *
2904 * RETURNS:
2905 * 0 on success, -errno otherwise.
2906 */
2907int ata_std_prereset(struct ata_port *ap)
2908{
2909 struct ata_eh_context *ehc = &ap->eh_context;
e9c83914 2910 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
2911 int rc;
2912
28324304
TH
2913 /* handle link resume & hotplug spinup */
2914 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
2915 (ap->flags & ATA_FLAG_HRST_TO_RESUME))
2916 ehc->i.action |= ATA_EH_HARDRESET;
2917
2918 if ((ehc->i.flags & ATA_EHI_HOTPLUGGED) &&
2919 (ap->flags & ATA_FLAG_SKIP_D2H_BSY))
2920 ata_wait_spinup(ap);
f5914a46
TH
2921
2922 /* if we're about to do hardreset, nothing more to do */
2923 if (ehc->i.action & ATA_EH_HARDRESET)
2924 return 0;
2925
2926 /* if SATA, resume phy */
2927 if (ap->cbl == ATA_CBL_SATA) {
f5914a46
TH
2928 rc = sata_phy_resume(ap, timing);
2929 if (rc && rc != -EOPNOTSUPP) {
2930 /* phy resume failed */
2931 ata_port_printk(ap, KERN_WARNING, "failed to resume "
2932 "link for reset (errno=%d)\n", rc);
2933 return rc;
2934 }
2935 }
2936
2937 /* Wait for !BSY if the controller can wait for the first D2H
2938 * Reg FIS and we don't know that no device is attached.
2939 */
2940 if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap))
2941 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2942
2943 return 0;
2944}
2945
c2bd5804
TH
2946/**
2947 * ata_std_softreset - reset host port via ATA SRST
2948 * @ap: port to reset
c2bd5804
TH
2949 * @classes: resulting classes of attached devices
2950 *
52783c5d 2951 * Reset host port using ATA SRST.
c2bd5804
TH
2952 *
2953 * LOCKING:
2954 * Kernel thread context (may sleep)
2955 *
2956 * RETURNS:
2957 * 0 on success, -errno otherwise.
2958 */
2bf2cb26 2959int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
c2bd5804
TH
2960{
2961 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2962 unsigned int devmask = 0, err_mask;
2963 u8 err;
2964
2965 DPRINTK("ENTER\n");
2966
81952c54 2967 if (ata_port_offline(ap)) {
3a39746a
TH
2968 classes[0] = ATA_DEV_NONE;
2969 goto out;
2970 }
2971
c2bd5804
TH
2972 /* determine if device 0/1 are present */
2973 if (ata_devchk(ap, 0))
2974 devmask |= (1 << 0);
2975 if (slave_possible && ata_devchk(ap, 1))
2976 devmask |= (1 << 1);
2977
c2bd5804
TH
2978 /* select device 0 again */
2979 ap->ops->dev_select(ap, 0);
2980
2981 /* issue bus reset */
2982 DPRINTK("about to softreset, devmask=%x\n", devmask);
2983 err_mask = ata_bus_softreset(ap, devmask);
2984 if (err_mask) {
f15a1daf
TH
2985 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
2986 err_mask);
c2bd5804
TH
2987 return -EIO;
2988 }
2989
2990 /* determine by signature whether we have ATA or ATAPI devices */
2991 classes[0] = ata_dev_try_classify(ap, 0, &err);
2992 if (slave_possible && err != 0x81)
2993 classes[1] = ata_dev_try_classify(ap, 1, &err);
2994
3a39746a 2995 out:
c2bd5804
TH
2996 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
2997 return 0;
2998}
2999
3000/**
b6103f6d 3001 * sata_port_hardreset - reset port via SATA phy reset
c2bd5804 3002 * @ap: port to reset
b6103f6d 3003 * @timing: timing parameters { interval, duratinon, timeout } in msec
c2bd5804
TH
3004 *
3005 * SATA phy-reset host port using DET bits of SControl register.
c2bd5804
TH
3006 *
3007 * LOCKING:
3008 * Kernel thread context (may sleep)
3009 *
3010 * RETURNS:
3011 * 0 on success, -errno otherwise.
3012 */
b6103f6d 3013int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing)
c2bd5804 3014{
852ee16a 3015 u32 scontrol;
81952c54 3016 int rc;
852ee16a 3017
c2bd5804
TH
3018 DPRINTK("ENTER\n");
3019
3c567b7d 3020 if (sata_set_spd_needed(ap)) {
1c3fae4d
TH
3021 /* SATA spec says nothing about how to reconfigure
3022 * spd. To be on the safe side, turn off phy during
3023 * reconfiguration. This works for at least ICH7 AHCI
3024 * and Sil3124.
3025 */
81952c54 3026 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
b6103f6d 3027 goto out;
81952c54 3028
a34b6fc0 3029 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54
TH
3030
3031 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
b6103f6d 3032 goto out;
1c3fae4d 3033
3c567b7d 3034 sata_set_spd(ap);
1c3fae4d
TH
3035 }
3036
3037 /* issue phy wake/reset */
81952c54 3038 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
b6103f6d 3039 goto out;
81952c54 3040
852ee16a 3041 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54
TH
3042
3043 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
b6103f6d 3044 goto out;
c2bd5804 3045
1c3fae4d 3046 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
3047 * 10.4.2 says at least 1 ms.
3048 */
3049 msleep(1);
3050
1c3fae4d 3051 /* bring phy back */
b6103f6d
TH
3052 rc = sata_phy_resume(ap, timing);
3053 out:
3054 DPRINTK("EXIT, rc=%d\n", rc);
3055 return rc;
3056}
3057
3058/**
3059 * sata_std_hardreset - reset host port via SATA phy reset
3060 * @ap: port to reset
3061 * @class: resulting class of attached device
3062 *
3063 * SATA phy-reset host port using DET bits of SControl register,
3064 * wait for !BSY and classify the attached device.
3065 *
3066 * LOCKING:
3067 * Kernel thread context (may sleep)
3068 *
3069 * RETURNS:
3070 * 0 on success, -errno otherwise.
3071 */
3072int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
3073{
3074 const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context);
3075 int rc;
3076
3077 DPRINTK("ENTER\n");
3078
3079 /* do hardreset */
3080 rc = sata_port_hardreset(ap, timing);
3081 if (rc) {
3082 ata_port_printk(ap, KERN_ERR,
3083 "COMRESET failed (errno=%d)\n", rc);
3084 return rc;
3085 }
c2bd5804 3086
c2bd5804 3087 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 3088 if (ata_port_offline(ap)) {
c2bd5804
TH
3089 *class = ATA_DEV_NONE;
3090 DPRINTK("EXIT, link offline\n");
3091 return 0;
3092 }
3093
34fee227
TH
3094 /* wait a while before checking status, see SRST for more info */
3095 msleep(150);
3096
c2bd5804 3097 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
f15a1daf
TH
3098 ata_port_printk(ap, KERN_ERR,
3099 "COMRESET failed (device not ready)\n");
c2bd5804
TH
3100 return -EIO;
3101 }
3102
3a39746a
TH
3103 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3104
c2bd5804
TH
3105 *class = ata_dev_try_classify(ap, 0, NULL);
3106
3107 DPRINTK("EXIT, class=%u\n", *class);
3108 return 0;
3109}
3110
3111/**
3112 * ata_std_postreset - standard postreset callback
3113 * @ap: the target ata_port
3114 * @classes: classes of attached devices
3115 *
3116 * This function is invoked after a successful reset. Note that
3117 * the device might have been reset more than once using
3118 * different reset methods before postreset is invoked.
c2bd5804 3119 *
c2bd5804
TH
3120 * LOCKING:
3121 * Kernel thread context (may sleep)
3122 */
3123void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
3124{
dc2b3515
TH
3125 u32 serror;
3126
c2bd5804
TH
3127 DPRINTK("ENTER\n");
3128
c2bd5804 3129 /* print link status */
81952c54 3130 sata_print_link_status(ap);
c2bd5804 3131
dc2b3515
TH
3132 /* clear SError */
3133 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
3134 sata_scr_write(ap, SCR_ERROR, serror);
3135
3a39746a 3136 /* re-enable interrupts */
83625006
AI
3137 if (!ap->ops->error_handler)
3138 ap->ops->irq_on(ap);
c2bd5804
TH
3139
3140 /* is double-select really necessary? */
3141 if (classes[0] != ATA_DEV_NONE)
3142 ap->ops->dev_select(ap, 1);
3143 if (classes[1] != ATA_DEV_NONE)
3144 ap->ops->dev_select(ap, 0);
3145
3a39746a
TH
3146 /* bail out if no device is present */
3147 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3148 DPRINTK("EXIT, no device\n");
3149 return;
3150 }
3151
3152 /* set up device control */
0d5ff566
TH
3153 if (ap->ioaddr.ctl_addr)
3154 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
c2bd5804
TH
3155
3156 DPRINTK("EXIT\n");
3157}
3158
623a3128
TH
3159/**
3160 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3161 * @dev: device to compare against
3162 * @new_class: class of the new device
3163 * @new_id: IDENTIFY page of the new device
3164 *
3165 * Compare @new_class and @new_id against @dev and determine
3166 * whether @dev is the device indicated by @new_class and
3167 * @new_id.
3168 *
3169 * LOCKING:
3170 * None.
3171 *
3172 * RETURNS:
3173 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3174 */
3373efd8
TH
3175static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3176 const u16 *new_id)
623a3128
TH
3177{
3178 const u16 *old_id = dev->id;
a0cf733b
TH
3179 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3180 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
3181 u64 new_n_sectors;
3182
3183 if (dev->class != new_class) {
f15a1daf
TH
3184 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3185 dev->class, new_class);
623a3128
TH
3186 return 0;
3187 }
3188
a0cf733b
TH
3189 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3190 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3191 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3192 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
3193 new_n_sectors = ata_id_n_sectors(new_id);
3194
3195 if (strcmp(model[0], model[1])) {
f15a1daf
TH
3196 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3197 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
3198 return 0;
3199 }
3200
3201 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
3202 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3203 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
3204 return 0;
3205 }
3206
3207 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
f15a1daf
TH
3208 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3209 "%llu != %llu\n",
3210 (unsigned long long)dev->n_sectors,
3211 (unsigned long long)new_n_sectors);
623a3128
TH
3212 return 0;
3213 }
3214
3215 return 1;
3216}
3217
3218/**
3219 * ata_dev_revalidate - Revalidate ATA device
623a3128 3220 * @dev: device to revalidate
bff04647 3221 * @readid_flags: read ID flags
623a3128
TH
3222 *
3223 * Re-read IDENTIFY page and make sure @dev is still attached to
3224 * the port.
3225 *
3226 * LOCKING:
3227 * Kernel thread context (may sleep)
3228 *
3229 * RETURNS:
3230 * 0 on success, negative errno otherwise
3231 */
bff04647 3232int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
623a3128 3233{
5eb45c02 3234 unsigned int class = dev->class;
f15a1daf 3235 u16 *id = (void *)dev->ap->sector_buf;
623a3128
TH
3236 int rc;
3237
5eb45c02
TH
3238 if (!ata_dev_enabled(dev)) {
3239 rc = -ENODEV;
3240 goto fail;
3241 }
623a3128 3242
fe635c7e 3243 /* read ID data */
bff04647 3244 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128
TH
3245 if (rc)
3246 goto fail;
3247
3248 /* is the device still there? */
3373efd8 3249 if (!ata_dev_same_device(dev, class, id)) {
623a3128
TH
3250 rc = -ENODEV;
3251 goto fail;
3252 }
3253
fe635c7e 3254 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
623a3128
TH
3255
3256 /* configure device according to the new ID */
efdaedc4 3257 rc = ata_dev_configure(dev);
5eb45c02
TH
3258 if (rc == 0)
3259 return 0;
623a3128
TH
3260
3261 fail:
f15a1daf 3262 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
3263 return rc;
3264}
3265
6919a0a6
AC
3266struct ata_blacklist_entry {
3267 const char *model_num;
3268 const char *model_rev;
3269 unsigned long horkage;
3270};
3271
3272static const struct ata_blacklist_entry ata_device_blacklist [] = {
3273 /* Devices with DMA related problems under Linux */
3274 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3275 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3276 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3277 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3278 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3279 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3280 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3281 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3282 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3283 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3284 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3285 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3286 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3287 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3288 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3289 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3290 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3291 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3292 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3293 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3294 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3295 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3296 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3297 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3298 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3299 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
3300 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3301 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3302 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
3303
3304 /* Devices we expect to fail diagnostics */
3305
3306 /* Devices where NCQ should be avoided */
3307 /* NCQ is slow */
3308 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
3309
3310 /* Devices with NCQ limits */
3311
3312 /* End Marker */
3313 { }
1da177e4 3314};
2e9edbf8 3315
6919a0a6 3316unsigned long ata_device_blacklisted(const struct ata_device *dev)
1da177e4 3317{
8bfa79fc
TH
3318 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3319 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 3320 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 3321
8bfa79fc
TH
3322 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3323 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 3324
6919a0a6 3325 while (ad->model_num) {
8bfa79fc 3326 if (!strcmp(ad->model_num, model_num)) {
6919a0a6
AC
3327 if (ad->model_rev == NULL)
3328 return ad->horkage;
8bfa79fc 3329 if (!strcmp(ad->model_rev, model_rev))
6919a0a6 3330 return ad->horkage;
f4b15fef 3331 }
6919a0a6 3332 ad++;
f4b15fef 3333 }
1da177e4
LT
3334 return 0;
3335}
3336
6919a0a6
AC
3337static int ata_dma_blacklisted(const struct ata_device *dev)
3338{
3339 /* We don't support polling DMA.
3340 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3341 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3342 */
3343 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
3344 (dev->flags & ATA_DFLAG_CDB_INTR))
3345 return 1;
3346 return (ata_device_blacklisted(dev) & ATA_HORKAGE_NODMA) ? 1 : 0;
3347}
3348
a6d5a51c
TH
3349/**
3350 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
3351 * @dev: Device to compute xfermask for
3352 *
acf356b1
TH
3353 * Compute supported xfermask of @dev and store it in
3354 * dev->*_mask. This function is responsible for applying all
3355 * known limits including host controller limits, device
3356 * blacklist, etc...
a6d5a51c
TH
3357 *
3358 * LOCKING:
3359 * None.
a6d5a51c 3360 */
3373efd8 3361static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 3362{
3373efd8 3363 struct ata_port *ap = dev->ap;
cca3974e 3364 struct ata_host *host = ap->host;
a6d5a51c 3365 unsigned long xfer_mask;
1da177e4 3366
37deecb5 3367 /* controller modes available */
565083e1
TH
3368 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3369 ap->mwdma_mask, ap->udma_mask);
3370
3371 /* Apply cable rule here. Don't apply it early because when
3372 * we handle hot plug the cable type can itself change.
3373 */
3374 if (ap->cbl == ATA_CBL_PATA40)
3375 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
fc085150
AC
3376 /* Apply drive side cable rule. Unknown or 80 pin cables reported
3377 * host side are checked drive side as well. Cases where we know a
3378 * 40wire cable is used safely for 80 are not checked here.
3379 */
3380 if (ata_drive_40wire(dev->id) && (ap->cbl == ATA_CBL_PATA_UNK || ap->cbl == ATA_CBL_PATA80))
3381 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3382
1da177e4 3383
37deecb5
TH
3384 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3385 dev->mwdma_mask, dev->udma_mask);
3386 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 3387
b352e57d
AC
3388 /*
3389 * CFA Advanced TrueIDE timings are not allowed on a shared
3390 * cable
3391 */
3392 if (ata_dev_pair(dev)) {
3393 /* No PIO5 or PIO6 */
3394 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3395 /* No MWDMA3 or MWDMA 4 */
3396 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3397 }
3398
37deecb5
TH
3399 if (ata_dma_blacklisted(dev)) {
3400 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
3401 ata_dev_printk(dev, KERN_WARNING,
3402 "device is on DMA blacklist, disabling DMA\n");
37deecb5 3403 }
a6d5a51c 3404
cca3974e 3405 if ((host->flags & ATA_HOST_SIMPLEX) && host->simplex_claimed) {
37deecb5
TH
3406 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3407 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3408 "other device, disabling DMA\n");
5444a6f4 3409 }
565083e1 3410
5444a6f4
AC
3411 if (ap->ops->mode_filter)
3412 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
3413
565083e1
TH
3414 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3415 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
3416}
3417
1da177e4
LT
3418/**
3419 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
3420 * @dev: Device to which command will be sent
3421 *
780a87f7
JG
3422 * Issue SET FEATURES - XFER MODE command to device @dev
3423 * on port @ap.
3424 *
1da177e4 3425 * LOCKING:
0cba632b 3426 * PCI/etc. bus probe sem.
83206a29
TH
3427 *
3428 * RETURNS:
3429 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
3430 */
3431
3373efd8 3432static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 3433{
a0123703 3434 struct ata_taskfile tf;
83206a29 3435 unsigned int err_mask;
1da177e4
LT
3436
3437 /* set up set-features taskfile */
3438 DPRINTK("set features - xfer mode\n");
3439
3373efd8 3440 ata_tf_init(dev, &tf);
a0123703
TH
3441 tf.command = ATA_CMD_SET_FEATURES;
3442 tf.feature = SETFEATURES_XFER;
3443 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3444 tf.protocol = ATA_PROT_NODATA;
3445 tf.nsect = dev->xfer_mode;
1da177e4 3446
3373efd8 3447 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1da177e4 3448
83206a29
TH
3449 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3450 return err_mask;
1da177e4
LT
3451}
3452
8bf62ece
AL
3453/**
3454 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 3455 * @dev: Device to which command will be sent
e2a7f77a
RD
3456 * @heads: Number of heads (taskfile parameter)
3457 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
3458 *
3459 * LOCKING:
6aff8f1f
TH
3460 * Kernel thread context (may sleep)
3461 *
3462 * RETURNS:
3463 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 3464 */
3373efd8
TH
3465static unsigned int ata_dev_init_params(struct ata_device *dev,
3466 u16 heads, u16 sectors)
8bf62ece 3467{
a0123703 3468 struct ata_taskfile tf;
6aff8f1f 3469 unsigned int err_mask;
8bf62ece
AL
3470
3471 /* Number of sectors per track 1-255. Number of heads 1-16 */
3472 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 3473 return AC_ERR_INVALID;
8bf62ece
AL
3474
3475 /* set up init dev params taskfile */
3476 DPRINTK("init dev params \n");
3477
3373efd8 3478 ata_tf_init(dev, &tf);
a0123703
TH
3479 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3480 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3481 tf.protocol = ATA_PROT_NODATA;
3482 tf.nsect = sectors;
3483 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 3484
3373efd8 3485 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
8bf62ece 3486
6aff8f1f
TH
3487 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3488 return err_mask;
8bf62ece
AL
3489}
3490
1da177e4 3491/**
0cba632b
JG
3492 * ata_sg_clean - Unmap DMA memory associated with command
3493 * @qc: Command containing DMA memory to be released
3494 *
3495 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
3496 *
3497 * LOCKING:
cca3974e 3498 * spin_lock_irqsave(host lock)
1da177e4 3499 */
70e6ad0c 3500void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
3501{
3502 struct ata_port *ap = qc->ap;
cedc9a47 3503 struct scatterlist *sg = qc->__sg;
1da177e4 3504 int dir = qc->dma_dir;
cedc9a47 3505 void *pad_buf = NULL;
1da177e4 3506
a4631474
TH
3507 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
3508 WARN_ON(sg == NULL);
1da177e4
LT
3509
3510 if (qc->flags & ATA_QCFLAG_SINGLE)
f131883e 3511 WARN_ON(qc->n_elem > 1);
1da177e4 3512
2c13b7ce 3513 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 3514
cedc9a47
JG
3515 /* if we padded the buffer out to 32-bit bound, and data
3516 * xfer direction is from-device, we must copy from the
3517 * pad buffer back into the supplied buffer
3518 */
3519 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
3520 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3521
3522 if (qc->flags & ATA_QCFLAG_SG) {
e1410f2d 3523 if (qc->n_elem)
2f1f610b 3524 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
cedc9a47
JG
3525 /* restore last sg */
3526 sg[qc->orig_n_elem - 1].length += qc->pad_len;
3527 if (pad_buf) {
3528 struct scatterlist *psg = &qc->pad_sgent;
3529 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3530 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
dfa15988 3531 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
3532 }
3533 } else {
2e242fa9 3534 if (qc->n_elem)
2f1f610b 3535 dma_unmap_single(ap->dev,
e1410f2d
JG
3536 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
3537 dir);
cedc9a47
JG
3538 /* restore sg */
3539 sg->length += qc->pad_len;
3540 if (pad_buf)
3541 memcpy(qc->buf_virt + sg->length - qc->pad_len,
3542 pad_buf, qc->pad_len);
3543 }
1da177e4
LT
3544
3545 qc->flags &= ~ATA_QCFLAG_DMAMAP;
cedc9a47 3546 qc->__sg = NULL;
1da177e4
LT
3547}
3548
3549/**
3550 * ata_fill_sg - Fill PCI IDE PRD table
3551 * @qc: Metadata associated with taskfile to be transferred
3552 *
780a87f7
JG
3553 * Fill PCI IDE PRD (scatter-gather) table with segments
3554 * associated with the current disk command.
3555 *
1da177e4 3556 * LOCKING:
cca3974e 3557 * spin_lock_irqsave(host lock)
1da177e4
LT
3558 *
3559 */
3560static void ata_fill_sg(struct ata_queued_cmd *qc)
3561{
1da177e4 3562 struct ata_port *ap = qc->ap;
cedc9a47
JG
3563 struct scatterlist *sg;
3564 unsigned int idx;
1da177e4 3565
a4631474 3566 WARN_ON(qc->__sg == NULL);
f131883e 3567 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1da177e4
LT
3568
3569 idx = 0;
cedc9a47 3570 ata_for_each_sg(sg, qc) {
1da177e4
LT
3571 u32 addr, offset;
3572 u32 sg_len, len;
3573
3574 /* determine if physical DMA addr spans 64K boundary.
3575 * Note h/w doesn't support 64-bit, so we unconditionally
3576 * truncate dma_addr_t to u32.
3577 */
3578 addr = (u32) sg_dma_address(sg);
3579 sg_len = sg_dma_len(sg);
3580
3581 while (sg_len) {
3582 offset = addr & 0xffff;
3583 len = sg_len;
3584 if ((offset + sg_len) > 0x10000)
3585 len = 0x10000 - offset;
3586
3587 ap->prd[idx].addr = cpu_to_le32(addr);
3588 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
3589 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
3590
3591 idx++;
3592 sg_len -= len;
3593 addr += len;
3594 }
3595 }
3596
3597 if (idx)
3598 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
3599}
3600/**
3601 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
3602 * @qc: Metadata associated with taskfile to check
3603 *
780a87f7
JG
3604 * Allow low-level driver to filter ATA PACKET commands, returning
3605 * a status indicating whether or not it is OK to use DMA for the
3606 * supplied PACKET command.
3607 *
1da177e4 3608 * LOCKING:
cca3974e 3609 * spin_lock_irqsave(host lock)
0cba632b 3610 *
1da177e4
LT
3611 * RETURNS: 0 when ATAPI DMA can be used
3612 * nonzero otherwise
3613 */
3614int ata_check_atapi_dma(struct ata_queued_cmd *qc)
3615{
3616 struct ata_port *ap = qc->ap;
3617 int rc = 0; /* Assume ATAPI DMA is OK by default */
3618
3619 if (ap->ops->check_atapi_dma)
3620 rc = ap->ops->check_atapi_dma(qc);
3621
3622 return rc;
3623}
3624/**
3625 * ata_qc_prep - Prepare taskfile for submission
3626 * @qc: Metadata associated with taskfile to be prepared
3627 *
780a87f7
JG
3628 * Prepare ATA taskfile for submission.
3629 *
1da177e4 3630 * LOCKING:
cca3974e 3631 * spin_lock_irqsave(host lock)
1da177e4
LT
3632 */
3633void ata_qc_prep(struct ata_queued_cmd *qc)
3634{
3635 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
3636 return;
3637
3638 ata_fill_sg(qc);
3639}
3640
e46834cd
BK
3641void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
3642
0cba632b
JG
3643/**
3644 * ata_sg_init_one - Associate command with memory buffer
3645 * @qc: Command to be associated
3646 * @buf: Memory buffer
3647 * @buflen: Length of memory buffer, in bytes.
3648 *
3649 * Initialize the data-related elements of queued_cmd @qc
3650 * to point to a single memory buffer, @buf of byte length @buflen.
3651 *
3652 * LOCKING:
cca3974e 3653 * spin_lock_irqsave(host lock)
0cba632b
JG
3654 */
3655
1da177e4
LT
3656void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
3657{
1da177e4
LT
3658 qc->flags |= ATA_QCFLAG_SINGLE;
3659
cedc9a47 3660 qc->__sg = &qc->sgent;
1da177e4 3661 qc->n_elem = 1;
cedc9a47 3662 qc->orig_n_elem = 1;
1da177e4 3663 qc->buf_virt = buf;
233277ca 3664 qc->nbytes = buflen;
1da177e4 3665
61c0596c 3666 sg_init_one(&qc->sgent, buf, buflen);
1da177e4
LT
3667}
3668
0cba632b
JG
3669/**
3670 * ata_sg_init - Associate command with scatter-gather table.
3671 * @qc: Command to be associated
3672 * @sg: Scatter-gather table.
3673 * @n_elem: Number of elements in s/g table.
3674 *
3675 * Initialize the data-related elements of queued_cmd @qc
3676 * to point to a scatter-gather table @sg, containing @n_elem
3677 * elements.
3678 *
3679 * LOCKING:
cca3974e 3680 * spin_lock_irqsave(host lock)
0cba632b
JG
3681 */
3682
1da177e4
LT
3683void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
3684 unsigned int n_elem)
3685{
3686 qc->flags |= ATA_QCFLAG_SG;
cedc9a47 3687 qc->__sg = sg;
1da177e4 3688 qc->n_elem = n_elem;
cedc9a47 3689 qc->orig_n_elem = n_elem;
1da177e4
LT
3690}
3691
3692/**
0cba632b
JG
3693 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
3694 * @qc: Command with memory buffer to be mapped.
3695 *
3696 * DMA-map the memory buffer associated with queued_cmd @qc.
1da177e4
LT
3697 *
3698 * LOCKING:
cca3974e 3699 * spin_lock_irqsave(host lock)
1da177e4
LT
3700 *
3701 * RETURNS:
0cba632b 3702 * Zero on success, negative on error.
1da177e4
LT
3703 */
3704
3705static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3706{
3707 struct ata_port *ap = qc->ap;
3708 int dir = qc->dma_dir;
cedc9a47 3709 struct scatterlist *sg = qc->__sg;
1da177e4 3710 dma_addr_t dma_address;
2e242fa9 3711 int trim_sg = 0;
1da177e4 3712
cedc9a47
JG
3713 /* we must lengthen transfers to end on a 32-bit boundary */
3714 qc->pad_len = sg->length & 3;
3715 if (qc->pad_len) {
3716 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3717 struct scatterlist *psg = &qc->pad_sgent;
3718
a4631474 3719 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
3720
3721 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3722
3723 if (qc->tf.flags & ATA_TFLAG_WRITE)
3724 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3725 qc->pad_len);
3726
3727 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3728 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3729 /* trim sg */
3730 sg->length -= qc->pad_len;
2e242fa9
TH
3731 if (sg->length == 0)
3732 trim_sg = 1;
cedc9a47
JG
3733
3734 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3735 sg->length, qc->pad_len);
3736 }
3737
2e242fa9
TH
3738 if (trim_sg) {
3739 qc->n_elem--;
e1410f2d
JG
3740 goto skip_map;
3741 }
3742
2f1f610b 3743 dma_address = dma_map_single(ap->dev, qc->buf_virt,
32529e01 3744 sg->length, dir);
537a95d9
TH
3745 if (dma_mapping_error(dma_address)) {
3746 /* restore sg */
3747 sg->length += qc->pad_len;
1da177e4 3748 return -1;
537a95d9 3749 }
1da177e4
LT
3750
3751 sg_dma_address(sg) = dma_address;
32529e01 3752 sg_dma_len(sg) = sg->length;
1da177e4 3753
2e242fa9 3754skip_map:
1da177e4
LT
3755 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3756 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3757
3758 return 0;
3759}
3760
3761/**
0cba632b
JG
3762 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3763 * @qc: Command with scatter-gather table to be mapped.
3764 *
3765 * DMA-map the scatter-gather table associated with queued_cmd @qc.
1da177e4
LT
3766 *
3767 * LOCKING:
cca3974e 3768 * spin_lock_irqsave(host lock)
1da177e4
LT
3769 *
3770 * RETURNS:
0cba632b 3771 * Zero on success, negative on error.
1da177e4
LT
3772 *
3773 */
3774
3775static int ata_sg_setup(struct ata_queued_cmd *qc)
3776{
3777 struct ata_port *ap = qc->ap;
cedc9a47
JG
3778 struct scatterlist *sg = qc->__sg;
3779 struct scatterlist *lsg = &sg[qc->n_elem - 1];
e1410f2d 3780 int n_elem, pre_n_elem, dir, trim_sg = 0;
1da177e4
LT
3781
3782 VPRINTK("ENTER, ata%u\n", ap->id);
a4631474 3783 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
1da177e4 3784
cedc9a47
JG
3785 /* we must lengthen transfers to end on a 32-bit boundary */
3786 qc->pad_len = lsg->length & 3;
3787 if (qc->pad_len) {
3788 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3789 struct scatterlist *psg = &qc->pad_sgent;
3790 unsigned int offset;
3791
a4631474 3792 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
3793
3794 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3795
3796 /*
3797 * psg->page/offset are used to copy to-be-written
3798 * data in this function or read data in ata_sg_clean.
3799 */
3800 offset = lsg->offset + lsg->length - qc->pad_len;
3801 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3802 psg->offset = offset_in_page(offset);
3803
3804 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3805 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3806 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
dfa15988 3807 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
3808 }
3809
3810 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3811 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3812 /* trim last sg */
3813 lsg->length -= qc->pad_len;
e1410f2d
JG
3814 if (lsg->length == 0)
3815 trim_sg = 1;
cedc9a47
JG
3816
3817 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3818 qc->n_elem - 1, lsg->length, qc->pad_len);
3819 }
3820
e1410f2d
JG
3821 pre_n_elem = qc->n_elem;
3822 if (trim_sg && pre_n_elem)
3823 pre_n_elem--;
3824
3825 if (!pre_n_elem) {
3826 n_elem = 0;
3827 goto skip_map;
3828 }
3829
1da177e4 3830 dir = qc->dma_dir;
2f1f610b 3831 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
537a95d9
TH
3832 if (n_elem < 1) {
3833 /* restore last sg */
3834 lsg->length += qc->pad_len;
1da177e4 3835 return -1;
537a95d9 3836 }
1da177e4
LT
3837
3838 DPRINTK("%d sg elements mapped\n", n_elem);
3839
e1410f2d 3840skip_map:
1da177e4
LT
3841 qc->n_elem = n_elem;
3842
3843 return 0;
3844}
3845
0baab86b 3846/**
c893a3ae 3847 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
3848 * @buf: Buffer to swap
3849 * @buf_words: Number of 16-bit words in buffer.
3850 *
3851 * Swap halves of 16-bit words if needed to convert from
3852 * little-endian byte order to native cpu byte order, or
3853 * vice-versa.
3854 *
3855 * LOCKING:
6f0ef4fa 3856 * Inherited from caller.
0baab86b 3857 */
1da177e4
LT
3858void swap_buf_le16(u16 *buf, unsigned int buf_words)
3859{
3860#ifdef __BIG_ENDIAN
3861 unsigned int i;
3862
3863 for (i = 0; i < buf_words; i++)
3864 buf[i] = le16_to_cpu(buf[i]);
3865#endif /* __BIG_ENDIAN */
3866}
3867
6ae4cfb5 3868/**
0d5ff566 3869 * ata_data_xfer - Transfer data by PIO
a6b2c5d4 3870 * @adev: device to target
6ae4cfb5
AL
3871 * @buf: data buffer
3872 * @buflen: buffer length
344babaa 3873 * @write_data: read/write
6ae4cfb5
AL
3874 *
3875 * Transfer data from/to the device data register by PIO.
3876 *
3877 * LOCKING:
3878 * Inherited from caller.
6ae4cfb5 3879 */
0d5ff566
TH
3880void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
3881 unsigned int buflen, int write_data)
1da177e4 3882{
a6b2c5d4 3883 struct ata_port *ap = adev->ap;
6ae4cfb5 3884 unsigned int words = buflen >> 1;
1da177e4 3885
6ae4cfb5 3886 /* Transfer multiple of 2 bytes */
1da177e4 3887 if (write_data)
0d5ff566 3888 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
1da177e4 3889 else
0d5ff566 3890 ioread16_rep(ap->ioaddr.data_addr, buf, words);
6ae4cfb5
AL
3891
3892 /* Transfer trailing 1 byte, if any. */
3893 if (unlikely(buflen & 0x01)) {
3894 u16 align_buf[1] = { 0 };
3895 unsigned char *trailing_buf = buf + buflen - 1;
3896
3897 if (write_data) {
3898 memcpy(align_buf, trailing_buf, 1);
0d5ff566 3899 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
6ae4cfb5 3900 } else {
0d5ff566 3901 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
6ae4cfb5
AL
3902 memcpy(trailing_buf, align_buf, 1);
3903 }
3904 }
1da177e4
LT
3905}
3906
75e99585 3907/**
0d5ff566 3908 * ata_data_xfer_noirq - Transfer data by PIO
75e99585
AC
3909 * @adev: device to target
3910 * @buf: data buffer
3911 * @buflen: buffer length
3912 * @write_data: read/write
3913 *
88574551 3914 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
3915 * transfer with interrupts disabled.
3916 *
3917 * LOCKING:
3918 * Inherited from caller.
3919 */
0d5ff566
TH
3920void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
3921 unsigned int buflen, int write_data)
75e99585
AC
3922{
3923 unsigned long flags;
3924 local_irq_save(flags);
0d5ff566 3925 ata_data_xfer(adev, buf, buflen, write_data);
75e99585
AC
3926 local_irq_restore(flags);
3927}
3928
3929
6ae4cfb5
AL
3930/**
3931 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3932 * @qc: Command on going
3933 *
3934 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3935 *
3936 * LOCKING:
3937 * Inherited from caller.
3938 */
3939
1da177e4
LT
3940static void ata_pio_sector(struct ata_queued_cmd *qc)
3941{
3942 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 3943 struct scatterlist *sg = qc->__sg;
1da177e4
LT
3944 struct ata_port *ap = qc->ap;
3945 struct page *page;
3946 unsigned int offset;
3947 unsigned char *buf;
3948
726f0785 3949 if (qc->curbytes == qc->nbytes - ATA_SECT_SIZE)
14be71f4 3950 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
3951
3952 page = sg[qc->cursg].page;
726f0785 3953 offset = sg[qc->cursg].offset + qc->cursg_ofs;
1da177e4
LT
3954
3955 /* get the current page and offset */
3956 page = nth_page(page, (offset >> PAGE_SHIFT));
3957 offset %= PAGE_SIZE;
3958
1da177e4
LT
3959 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3960
91b8b313
AL
3961 if (PageHighMem(page)) {
3962 unsigned long flags;
3963
a6b2c5d4 3964 /* FIXME: use a bounce buffer */
91b8b313
AL
3965 local_irq_save(flags);
3966 buf = kmap_atomic(page, KM_IRQ0);
083958d3 3967
91b8b313 3968 /* do the actual data transfer */
a6b2c5d4 3969 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
1da177e4 3970
91b8b313
AL
3971 kunmap_atomic(buf, KM_IRQ0);
3972 local_irq_restore(flags);
3973 } else {
3974 buf = page_address(page);
a6b2c5d4 3975 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
91b8b313 3976 }
1da177e4 3977
726f0785
TH
3978 qc->curbytes += ATA_SECT_SIZE;
3979 qc->cursg_ofs += ATA_SECT_SIZE;
1da177e4 3980
726f0785 3981 if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
1da177e4
LT
3982 qc->cursg++;
3983 qc->cursg_ofs = 0;
3984 }
1da177e4 3985}
1da177e4 3986
07f6f7d0
AL
3987/**
3988 * ata_pio_sectors - Transfer one or many 512-byte sectors.
3989 * @qc: Command on going
3990 *
c81e29b4 3991 * Transfer one or many ATA_SECT_SIZE of data from/to the
07f6f7d0
AL
3992 * ATA device for the DRQ request.
3993 *
3994 * LOCKING:
3995 * Inherited from caller.
3996 */
1da177e4 3997
07f6f7d0
AL
3998static void ata_pio_sectors(struct ata_queued_cmd *qc)
3999{
4000 if (is_multi_taskfile(&qc->tf)) {
4001 /* READ/WRITE MULTIPLE */
4002 unsigned int nsect;
4003
587005de 4004 WARN_ON(qc->dev->multi_count == 0);
1da177e4 4005
726f0785
TH
4006 nsect = min((qc->nbytes - qc->curbytes) / ATA_SECT_SIZE,
4007 qc->dev->multi_count);
07f6f7d0
AL
4008 while (nsect--)
4009 ata_pio_sector(qc);
4010 } else
4011 ata_pio_sector(qc);
4012}
4013
c71c1857
AL
4014/**
4015 * atapi_send_cdb - Write CDB bytes to hardware
4016 * @ap: Port to which ATAPI device is attached.
4017 * @qc: Taskfile currently active
4018 *
4019 * When device has indicated its readiness to accept
4020 * a CDB, this function is called. Send the CDB.
4021 *
4022 * LOCKING:
4023 * caller.
4024 */
4025
4026static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4027{
4028 /* send SCSI cdb */
4029 DPRINTK("send cdb\n");
db024d53 4030 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 4031
a6b2c5d4 4032 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
4033 ata_altstatus(ap); /* flush */
4034
4035 switch (qc->tf.protocol) {
4036 case ATA_PROT_ATAPI:
4037 ap->hsm_task_state = HSM_ST;
4038 break;
4039 case ATA_PROT_ATAPI_NODATA:
4040 ap->hsm_task_state = HSM_ST_LAST;
4041 break;
4042 case ATA_PROT_ATAPI_DMA:
4043 ap->hsm_task_state = HSM_ST_LAST;
4044 /* initiate bmdma */
4045 ap->ops->bmdma_start(qc);
4046 break;
4047 }
1da177e4
LT
4048}
4049
6ae4cfb5
AL
4050/**
4051 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
4052 * @qc: Command on going
4053 * @bytes: number of bytes
4054 *
4055 * Transfer Transfer data from/to the ATAPI device.
4056 *
4057 * LOCKING:
4058 * Inherited from caller.
4059 *
4060 */
4061
1da177e4
LT
4062static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4063{
4064 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4065 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4066 struct ata_port *ap = qc->ap;
4067 struct page *page;
4068 unsigned char *buf;
4069 unsigned int offset, count;
4070
563a6e1f 4071 if (qc->curbytes + bytes >= qc->nbytes)
14be71f4 4072 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4073
4074next_sg:
563a6e1f 4075 if (unlikely(qc->cursg >= qc->n_elem)) {
7fb6ec28 4076 /*
563a6e1f
AL
4077 * The end of qc->sg is reached and the device expects
4078 * more data to transfer. In order not to overrun qc->sg
4079 * and fulfill length specified in the byte count register,
4080 * - for read case, discard trailing data from the device
4081 * - for write case, padding zero data to the device
4082 */
4083 u16 pad_buf[1] = { 0 };
4084 unsigned int words = bytes >> 1;
4085 unsigned int i;
4086
4087 if (words) /* warning if bytes > 1 */
f15a1daf
TH
4088 ata_dev_printk(qc->dev, KERN_WARNING,
4089 "%u bytes trailing data\n", bytes);
563a6e1f
AL
4090
4091 for (i = 0; i < words; i++)
a6b2c5d4 4092 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
563a6e1f 4093
14be71f4 4094 ap->hsm_task_state = HSM_ST_LAST;
563a6e1f
AL
4095 return;
4096 }
4097
cedc9a47 4098 sg = &qc->__sg[qc->cursg];
1da177e4 4099
1da177e4
LT
4100 page = sg->page;
4101 offset = sg->offset + qc->cursg_ofs;
4102
4103 /* get the current page and offset */
4104 page = nth_page(page, (offset >> PAGE_SHIFT));
4105 offset %= PAGE_SIZE;
4106
6952df03 4107 /* don't overrun current sg */
32529e01 4108 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
4109
4110 /* don't cross page boundaries */
4111 count = min(count, (unsigned int)PAGE_SIZE - offset);
4112
7282aa4b
AL
4113 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4114
91b8b313
AL
4115 if (PageHighMem(page)) {
4116 unsigned long flags;
4117
a6b2c5d4 4118 /* FIXME: use bounce buffer */
91b8b313
AL
4119 local_irq_save(flags);
4120 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4121
91b8b313 4122 /* do the actual data transfer */
a6b2c5d4 4123 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
7282aa4b 4124
91b8b313
AL
4125 kunmap_atomic(buf, KM_IRQ0);
4126 local_irq_restore(flags);
4127 } else {
4128 buf = page_address(page);
a6b2c5d4 4129 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
91b8b313 4130 }
1da177e4
LT
4131
4132 bytes -= count;
4133 qc->curbytes += count;
4134 qc->cursg_ofs += count;
4135
32529e01 4136 if (qc->cursg_ofs == sg->length) {
1da177e4
LT
4137 qc->cursg++;
4138 qc->cursg_ofs = 0;
4139 }
4140
563a6e1f 4141 if (bytes)
1da177e4 4142 goto next_sg;
1da177e4
LT
4143}
4144
6ae4cfb5
AL
4145/**
4146 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
4147 * @qc: Command on going
4148 *
4149 * Transfer Transfer data from/to the ATAPI device.
4150 *
4151 * LOCKING:
4152 * Inherited from caller.
6ae4cfb5
AL
4153 */
4154
1da177e4
LT
4155static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4156{
4157 struct ata_port *ap = qc->ap;
4158 struct ata_device *dev = qc->dev;
4159 unsigned int ireason, bc_lo, bc_hi, bytes;
4160 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4161
eec4c3f3
AL
4162 /* Abuse qc->result_tf for temp storage of intermediate TF
4163 * here to save some kernel stack usage.
4164 * For normal completion, qc->result_tf is not relevant. For
4165 * error, qc->result_tf is later overwritten by ata_qc_complete().
4166 * So, the correctness of qc->result_tf is not affected.
4167 */
4168 ap->ops->tf_read(ap, &qc->result_tf);
4169 ireason = qc->result_tf.nsect;
4170 bc_lo = qc->result_tf.lbam;
4171 bc_hi = qc->result_tf.lbah;
1da177e4
LT
4172 bytes = (bc_hi << 8) | bc_lo;
4173
4174 /* shall be cleared to zero, indicating xfer of data */
4175 if (ireason & (1 << 0))
4176 goto err_out;
4177
4178 /* make sure transfer direction matches expected */
4179 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4180 if (do_write != i_write)
4181 goto err_out;
4182
312f7da2
AL
4183 VPRINTK("ata%u: xfering %d bytes\n", ap->id, bytes);
4184
1da177e4
LT
4185 __atapi_pio_bytes(qc, bytes);
4186
4187 return;
4188
4189err_out:
f15a1daf 4190 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
11a56d24 4191 qc->err_mask |= AC_ERR_HSM;
14be71f4 4192 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
4193}
4194
4195/**
c234fb00
AL
4196 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4197 * @ap: the target ata_port
4198 * @qc: qc on going
1da177e4 4199 *
c234fb00
AL
4200 * RETURNS:
4201 * 1 if ok in workqueue, 0 otherwise.
1da177e4 4202 */
c234fb00
AL
4203
4204static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 4205{
c234fb00
AL
4206 if (qc->tf.flags & ATA_TFLAG_POLLING)
4207 return 1;
1da177e4 4208
c234fb00
AL
4209 if (ap->hsm_task_state == HSM_ST_FIRST) {
4210 if (qc->tf.protocol == ATA_PROT_PIO &&
4211 (qc->tf.flags & ATA_TFLAG_WRITE))
4212 return 1;
1da177e4 4213
c234fb00
AL
4214 if (is_atapi_taskfile(&qc->tf) &&
4215 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4216 return 1;
fe79e683
AL
4217 }
4218
c234fb00
AL
4219 return 0;
4220}
1da177e4 4221
c17ea20d
TH
4222/**
4223 * ata_hsm_qc_complete - finish a qc running on standard HSM
4224 * @qc: Command to complete
4225 * @in_wq: 1 if called from workqueue, 0 otherwise
4226 *
4227 * Finish @qc which is running on standard HSM.
4228 *
4229 * LOCKING:
cca3974e 4230 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
4231 * Otherwise, none on entry and grabs host lock.
4232 */
4233static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4234{
4235 struct ata_port *ap = qc->ap;
4236 unsigned long flags;
4237
4238 if (ap->ops->error_handler) {
4239 if (in_wq) {
ba6a1308 4240 spin_lock_irqsave(ap->lock, flags);
c17ea20d 4241
cca3974e
JG
4242 /* EH might have kicked in while host lock is
4243 * released.
c17ea20d
TH
4244 */
4245 qc = ata_qc_from_tag(ap, qc->tag);
4246 if (qc) {
4247 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
83625006 4248 ap->ops->irq_on(ap);
c17ea20d
TH
4249 ata_qc_complete(qc);
4250 } else
4251 ata_port_freeze(ap);
4252 }
4253
ba6a1308 4254 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4255 } else {
4256 if (likely(!(qc->err_mask & AC_ERR_HSM)))
4257 ata_qc_complete(qc);
4258 else
4259 ata_port_freeze(ap);
4260 }
4261 } else {
4262 if (in_wq) {
ba6a1308 4263 spin_lock_irqsave(ap->lock, flags);
83625006 4264 ap->ops->irq_on(ap);
c17ea20d 4265 ata_qc_complete(qc);
ba6a1308 4266 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4267 } else
4268 ata_qc_complete(qc);
4269 }
1da177e4 4270
c81e29b4 4271 ata_altstatus(ap); /* flush */
c17ea20d
TH
4272}
4273
bb5cb290
AL
4274/**
4275 * ata_hsm_move - move the HSM to the next state.
4276 * @ap: the target ata_port
4277 * @qc: qc on going
4278 * @status: current device status
4279 * @in_wq: 1 if called from workqueue, 0 otherwise
4280 *
4281 * RETURNS:
4282 * 1 when poll next status needed, 0 otherwise.
4283 */
9a1004d0
TH
4284int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4285 u8 status, int in_wq)
e2cec771 4286{
bb5cb290
AL
4287 unsigned long flags = 0;
4288 int poll_next;
4289
6912ccd5
AL
4290 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4291
bb5cb290
AL
4292 /* Make sure ata_qc_issue_prot() does not throw things
4293 * like DMA polling into the workqueue. Notice that
4294 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4295 */
c234fb00 4296 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 4297
e2cec771 4298fsm_start:
999bb6f4
AL
4299 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
4300 ap->id, qc->tf.protocol, ap->hsm_task_state, status);
4301
e2cec771
AL
4302 switch (ap->hsm_task_state) {
4303 case HSM_ST_FIRST:
bb5cb290
AL
4304 /* Send first data block or PACKET CDB */
4305
4306 /* If polling, we will stay in the work queue after
4307 * sending the data. Otherwise, interrupt handler
4308 * takes over after sending the data.
4309 */
4310 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4311
e2cec771 4312 /* check device status */
3655d1d3
AL
4313 if (unlikely((status & ATA_DRQ) == 0)) {
4314 /* handle BSY=0, DRQ=0 as error */
4315 if (likely(status & (ATA_ERR | ATA_DF)))
4316 /* device stops HSM for abort/error */
4317 qc->err_mask |= AC_ERR_DEV;
4318 else
4319 /* HSM violation. Let EH handle this */
4320 qc->err_mask |= AC_ERR_HSM;
4321
14be71f4 4322 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 4323 goto fsm_start;
1da177e4
LT
4324 }
4325
71601958
AL
4326 /* Device should not ask for data transfer (DRQ=1)
4327 * when it finds something wrong.
eee6c32f
AL
4328 * We ignore DRQ here and stop the HSM by
4329 * changing hsm_task_state to HSM_ST_ERR and
4330 * let the EH abort the command or reset the device.
71601958
AL
4331 */
4332 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4333 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4334 ap->id, status);
3655d1d3 4335 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4336 ap->hsm_task_state = HSM_ST_ERR;
4337 goto fsm_start;
71601958 4338 }
1da177e4 4339
bb5cb290
AL
4340 /* Send the CDB (atapi) or the first data block (ata pio out).
4341 * During the state transition, interrupt handler shouldn't
4342 * be invoked before the data transfer is complete and
4343 * hsm_task_state is changed. Hence, the following locking.
4344 */
4345 if (in_wq)
ba6a1308 4346 spin_lock_irqsave(ap->lock, flags);
1da177e4 4347
bb5cb290
AL
4348 if (qc->tf.protocol == ATA_PROT_PIO) {
4349 /* PIO data out protocol.
4350 * send first data block.
4351 */
0565c26d 4352
bb5cb290
AL
4353 /* ata_pio_sectors() might change the state
4354 * to HSM_ST_LAST. so, the state is changed here
4355 * before ata_pio_sectors().
4356 */
4357 ap->hsm_task_state = HSM_ST;
4358 ata_pio_sectors(qc);
4359 ata_altstatus(ap); /* flush */
4360 } else
4361 /* send CDB */
4362 atapi_send_cdb(ap, qc);
4363
4364 if (in_wq)
ba6a1308 4365 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
4366
4367 /* if polling, ata_pio_task() handles the rest.
4368 * otherwise, interrupt handler takes over from here.
4369 */
e2cec771 4370 break;
1c848984 4371
e2cec771
AL
4372 case HSM_ST:
4373 /* complete command or read/write the data register */
4374 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4375 /* ATAPI PIO protocol */
4376 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
4377 /* No more data to transfer or device error.
4378 * Device error will be tagged in HSM_ST_LAST.
4379 */
e2cec771
AL
4380 ap->hsm_task_state = HSM_ST_LAST;
4381 goto fsm_start;
4382 }
1da177e4 4383
71601958
AL
4384 /* Device should not ask for data transfer (DRQ=1)
4385 * when it finds something wrong.
eee6c32f
AL
4386 * We ignore DRQ here and stop the HSM by
4387 * changing hsm_task_state to HSM_ST_ERR and
4388 * let the EH abort the command or reset the device.
71601958
AL
4389 */
4390 if (unlikely(status & (ATA_ERR | ATA_DF))) {
4391 printk(KERN_WARNING "ata%d: DRQ=1 with device error, dev_stat 0x%X\n",
4392 ap->id, status);
3655d1d3 4393 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4394 ap->hsm_task_state = HSM_ST_ERR;
4395 goto fsm_start;
71601958 4396 }
1da177e4 4397
e2cec771 4398 atapi_pio_bytes(qc);
7fb6ec28 4399
e2cec771
AL
4400 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4401 /* bad ireason reported by device */
4402 goto fsm_start;
1da177e4 4403
e2cec771
AL
4404 } else {
4405 /* ATA PIO protocol */
4406 if (unlikely((status & ATA_DRQ) == 0)) {
4407 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
4408 if (likely(status & (ATA_ERR | ATA_DF)))
4409 /* device stops HSM for abort/error */
4410 qc->err_mask |= AC_ERR_DEV;
4411 else
55a8e2c8
TH
4412 /* HSM violation. Let EH handle this.
4413 * Phantom devices also trigger this
4414 * condition. Mark hint.
4415 */
4416 qc->err_mask |= AC_ERR_HSM |
4417 AC_ERR_NODEV_HINT;
3655d1d3 4418
e2cec771
AL
4419 ap->hsm_task_state = HSM_ST_ERR;
4420 goto fsm_start;
4421 }
1da177e4 4422
eee6c32f
AL
4423 /* For PIO reads, some devices may ask for
4424 * data transfer (DRQ=1) alone with ERR=1.
4425 * We respect DRQ here and transfer one
4426 * block of junk data before changing the
4427 * hsm_task_state to HSM_ST_ERR.
4428 *
4429 * For PIO writes, ERR=1 DRQ=1 doesn't make
4430 * sense since the data block has been
4431 * transferred to the device.
71601958
AL
4432 */
4433 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
4434 /* data might be corrputed */
4435 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
4436
4437 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4438 ata_pio_sectors(qc);
4439 ata_altstatus(ap);
4440 status = ata_wait_idle(ap);
4441 }
4442
3655d1d3
AL
4443 if (status & (ATA_BUSY | ATA_DRQ))
4444 qc->err_mask |= AC_ERR_HSM;
4445
eee6c32f
AL
4446 /* ata_pio_sectors() might change the
4447 * state to HSM_ST_LAST. so, the state
4448 * is changed after ata_pio_sectors().
4449 */
4450 ap->hsm_task_state = HSM_ST_ERR;
4451 goto fsm_start;
71601958
AL
4452 }
4453
e2cec771
AL
4454 ata_pio_sectors(qc);
4455
4456 if (ap->hsm_task_state == HSM_ST_LAST &&
4457 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4458 /* all data read */
4459 ata_altstatus(ap);
52a32205 4460 status = ata_wait_idle(ap);
e2cec771
AL
4461 goto fsm_start;
4462 }
4463 }
4464
4465 ata_altstatus(ap); /* flush */
bb5cb290 4466 poll_next = 1;
1da177e4
LT
4467 break;
4468
14be71f4 4469 case HSM_ST_LAST:
6912ccd5
AL
4470 if (unlikely(!ata_ok(status))) {
4471 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
4472 ap->hsm_task_state = HSM_ST_ERR;
4473 goto fsm_start;
4474 }
4475
4476 /* no more data to transfer */
4332a771
AL
4477 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
4478 ap->id, qc->dev->devno, status);
e2cec771 4479
6912ccd5
AL
4480 WARN_ON(qc->err_mask);
4481
e2cec771 4482 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 4483
e2cec771 4484 /* complete taskfile transaction */
c17ea20d 4485 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
4486
4487 poll_next = 0;
1da177e4
LT
4488 break;
4489
14be71f4 4490 case HSM_ST_ERR:
e2cec771
AL
4491 /* make sure qc->err_mask is available to
4492 * know what's wrong and recover
4493 */
4494 WARN_ON(qc->err_mask == 0);
4495
4496 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 4497
999bb6f4 4498 /* complete taskfile transaction */
c17ea20d 4499 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
4500
4501 poll_next = 0;
e2cec771
AL
4502 break;
4503 default:
bb5cb290 4504 poll_next = 0;
6912ccd5 4505 BUG();
1da177e4
LT
4506 }
4507
bb5cb290 4508 return poll_next;
1da177e4
LT
4509}
4510
65f27f38 4511static void ata_pio_task(struct work_struct *work)
8061f5f0 4512{
65f27f38
DH
4513 struct ata_port *ap =
4514 container_of(work, struct ata_port, port_task.work);
4515 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 4516 u8 status;
a1af3734 4517 int poll_next;
8061f5f0 4518
7fb6ec28 4519fsm_start:
a1af3734 4520 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 4521
a1af3734
AL
4522 /*
4523 * This is purely heuristic. This is a fast path.
4524 * Sometimes when we enter, BSY will be cleared in
4525 * a chk-status or two. If not, the drive is probably seeking
4526 * or something. Snooze for a couple msecs, then
4527 * chk-status again. If still busy, queue delayed work.
4528 */
4529 status = ata_busy_wait(ap, ATA_BUSY, 5);
4530 if (status & ATA_BUSY) {
4531 msleep(2);
4532 status = ata_busy_wait(ap, ATA_BUSY, 10);
4533 if (status & ATA_BUSY) {
31ce6dae 4534 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
a1af3734
AL
4535 return;
4536 }
8061f5f0
TH
4537 }
4538
a1af3734
AL
4539 /* move the HSM */
4540 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 4541
a1af3734
AL
4542 /* another command or interrupt handler
4543 * may be running at this point.
4544 */
4545 if (poll_next)
7fb6ec28 4546 goto fsm_start;
8061f5f0
TH
4547}
4548
1da177e4
LT
4549/**
4550 * ata_qc_new - Request an available ATA command, for queueing
4551 * @ap: Port associated with device @dev
4552 * @dev: Device from whom we request an available command structure
4553 *
4554 * LOCKING:
0cba632b 4555 * None.
1da177e4
LT
4556 */
4557
4558static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4559{
4560 struct ata_queued_cmd *qc = NULL;
4561 unsigned int i;
4562
e3180499 4563 /* no command while frozen */
b51e9e5d 4564 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
4565 return NULL;
4566
2ab7db1f
TH
4567 /* the last tag is reserved for internal command. */
4568 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 4569 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 4570 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
4571 break;
4572 }
4573
4574 if (qc)
4575 qc->tag = i;
4576
4577 return qc;
4578}
4579
4580/**
4581 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
4582 * @dev: Device from whom we request an available command structure
4583 *
4584 * LOCKING:
0cba632b 4585 * None.
1da177e4
LT
4586 */
4587
3373efd8 4588struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 4589{
3373efd8 4590 struct ata_port *ap = dev->ap;
1da177e4
LT
4591 struct ata_queued_cmd *qc;
4592
4593 qc = ata_qc_new(ap);
4594 if (qc) {
1da177e4
LT
4595 qc->scsicmd = NULL;
4596 qc->ap = ap;
4597 qc->dev = dev;
1da177e4 4598
2c13b7ce 4599 ata_qc_reinit(qc);
1da177e4
LT
4600 }
4601
4602 return qc;
4603}
4604
1da177e4
LT
4605/**
4606 * ata_qc_free - free unused ata_queued_cmd
4607 * @qc: Command to complete
4608 *
4609 * Designed to free unused ata_queued_cmd object
4610 * in case something prevents using it.
4611 *
4612 * LOCKING:
cca3974e 4613 * spin_lock_irqsave(host lock)
1da177e4
LT
4614 */
4615void ata_qc_free(struct ata_queued_cmd *qc)
4616{
4ba946e9
TH
4617 struct ata_port *ap = qc->ap;
4618 unsigned int tag;
4619
a4631474 4620 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 4621
4ba946e9
TH
4622 qc->flags = 0;
4623 tag = qc->tag;
4624 if (likely(ata_tag_valid(tag))) {
4ba946e9 4625 qc->tag = ATA_TAG_POISON;
6cec4a39 4626 clear_bit(tag, &ap->qc_allocated);
4ba946e9 4627 }
1da177e4
LT
4628}
4629
76014427 4630void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 4631{
dedaf2b0
TH
4632 struct ata_port *ap = qc->ap;
4633
a4631474
TH
4634 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4635 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
4636
4637 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4638 ata_sg_clean(qc);
4639
7401abf2 4640 /* command should be marked inactive atomically with qc completion */
dedaf2b0
TH
4641 if (qc->tf.protocol == ATA_PROT_NCQ)
4642 ap->sactive &= ~(1 << qc->tag);
4643 else
4644 ap->active_tag = ATA_TAG_POISON;
7401abf2 4645
3f3791d3
AL
4646 /* atapi: mark qc as inactive to prevent the interrupt handler
4647 * from completing the command twice later, before the error handler
4648 * is called. (when rc != 0 and atapi request sense is needed)
4649 */
4650 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 4651 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 4652
1da177e4 4653 /* call completion callback */
77853bf2 4654 qc->complete_fn(qc);
1da177e4
LT
4655}
4656
39599a53
TH
4657static void fill_result_tf(struct ata_queued_cmd *qc)
4658{
4659 struct ata_port *ap = qc->ap;
4660
4661 ap->ops->tf_read(ap, &qc->result_tf);
4662 qc->result_tf.flags = qc->tf.flags;
4663}
4664
f686bcb8
TH
4665/**
4666 * ata_qc_complete - Complete an active ATA command
4667 * @qc: Command to complete
4668 * @err_mask: ATA Status register contents
4669 *
4670 * Indicate to the mid and upper layers that an ATA
4671 * command has completed, with either an ok or not-ok status.
4672 *
4673 * LOCKING:
cca3974e 4674 * spin_lock_irqsave(host lock)
f686bcb8
TH
4675 */
4676void ata_qc_complete(struct ata_queued_cmd *qc)
4677{
4678 struct ata_port *ap = qc->ap;
4679
4680 /* XXX: New EH and old EH use different mechanisms to
4681 * synchronize EH with regular execution path.
4682 *
4683 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4684 * Normal execution path is responsible for not accessing a
4685 * failed qc. libata core enforces the rule by returning NULL
4686 * from ata_qc_from_tag() for failed qcs.
4687 *
4688 * Old EH depends on ata_qc_complete() nullifying completion
4689 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4690 * not synchronize with interrupt handler. Only PIO task is
4691 * taken care of.
4692 */
4693 if (ap->ops->error_handler) {
b51e9e5d 4694 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
4695
4696 if (unlikely(qc->err_mask))
4697 qc->flags |= ATA_QCFLAG_FAILED;
4698
4699 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4700 if (!ata_tag_internal(qc->tag)) {
4701 /* always fill result TF for failed qc */
39599a53 4702 fill_result_tf(qc);
f686bcb8
TH
4703 ata_qc_schedule_eh(qc);
4704 return;
4705 }
4706 }
4707
4708 /* read result TF if requested */
4709 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4710 fill_result_tf(qc);
f686bcb8
TH
4711
4712 __ata_qc_complete(qc);
4713 } else {
4714 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4715 return;
4716
4717 /* read result TF if failed or requested */
4718 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4719 fill_result_tf(qc);
f686bcb8
TH
4720
4721 __ata_qc_complete(qc);
4722 }
4723}
4724
dedaf2b0
TH
4725/**
4726 * ata_qc_complete_multiple - Complete multiple qcs successfully
4727 * @ap: port in question
4728 * @qc_active: new qc_active mask
4729 * @finish_qc: LLDD callback invoked before completing a qc
4730 *
4731 * Complete in-flight commands. This functions is meant to be
4732 * called from low-level driver's interrupt routine to complete
4733 * requests normally. ap->qc_active and @qc_active is compared
4734 * and commands are completed accordingly.
4735 *
4736 * LOCKING:
cca3974e 4737 * spin_lock_irqsave(host lock)
dedaf2b0
TH
4738 *
4739 * RETURNS:
4740 * Number of completed commands on success, -errno otherwise.
4741 */
4742int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
4743 void (*finish_qc)(struct ata_queued_cmd *))
4744{
4745 int nr_done = 0;
4746 u32 done_mask;
4747 int i;
4748
4749 done_mask = ap->qc_active ^ qc_active;
4750
4751 if (unlikely(done_mask & qc_active)) {
4752 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4753 "(%08x->%08x)\n", ap->qc_active, qc_active);
4754 return -EINVAL;
4755 }
4756
4757 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4758 struct ata_queued_cmd *qc;
4759
4760 if (!(done_mask & (1 << i)))
4761 continue;
4762
4763 if ((qc = ata_qc_from_tag(ap, i))) {
4764 if (finish_qc)
4765 finish_qc(qc);
4766 ata_qc_complete(qc);
4767 nr_done++;
4768 }
4769 }
4770
4771 return nr_done;
4772}
4773
1da177e4
LT
4774static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4775{
4776 struct ata_port *ap = qc->ap;
4777
4778 switch (qc->tf.protocol) {
3dc1d881 4779 case ATA_PROT_NCQ:
1da177e4
LT
4780 case ATA_PROT_DMA:
4781 case ATA_PROT_ATAPI_DMA:
4782 return 1;
4783
4784 case ATA_PROT_ATAPI:
4785 case ATA_PROT_PIO:
1da177e4
LT
4786 if (ap->flags & ATA_FLAG_PIO_DMA)
4787 return 1;
4788
4789 /* fall through */
4790
4791 default:
4792 return 0;
4793 }
4794
4795 /* never reached */
4796}
4797
4798/**
4799 * ata_qc_issue - issue taskfile to device
4800 * @qc: command to issue to device
4801 *
4802 * Prepare an ATA command to submission to device.
4803 * This includes mapping the data into a DMA-able
4804 * area, filling in the S/G table, and finally
4805 * writing the taskfile to hardware, starting the command.
4806 *
4807 * LOCKING:
cca3974e 4808 * spin_lock_irqsave(host lock)
1da177e4 4809 */
8e0e694a 4810void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
4811{
4812 struct ata_port *ap = qc->ap;
4813
dedaf2b0
TH
4814 /* Make sure only one non-NCQ command is outstanding. The
4815 * check is skipped for old EH because it reuses active qc to
4816 * request ATAPI sense.
4817 */
4818 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
4819
4820 if (qc->tf.protocol == ATA_PROT_NCQ) {
4821 WARN_ON(ap->sactive & (1 << qc->tag));
4822 ap->sactive |= 1 << qc->tag;
4823 } else {
4824 WARN_ON(ap->sactive);
4825 ap->active_tag = qc->tag;
4826 }
4827
e4a70e76 4828 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 4829 ap->qc_active |= 1 << qc->tag;
e4a70e76 4830
1da177e4
LT
4831 if (ata_should_dma_map(qc)) {
4832 if (qc->flags & ATA_QCFLAG_SG) {
4833 if (ata_sg_setup(qc))
8e436af9 4834 goto sg_err;
1da177e4
LT
4835 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4836 if (ata_sg_setup_one(qc))
8e436af9 4837 goto sg_err;
1da177e4
LT
4838 }
4839 } else {
4840 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4841 }
4842
4843 ap->ops->qc_prep(qc);
4844
8e0e694a
TH
4845 qc->err_mask |= ap->ops->qc_issue(qc);
4846 if (unlikely(qc->err_mask))
4847 goto err;
4848 return;
1da177e4 4849
8e436af9
TH
4850sg_err:
4851 qc->flags &= ~ATA_QCFLAG_DMAMAP;
8e0e694a
TH
4852 qc->err_mask |= AC_ERR_SYSTEM;
4853err:
4854 ata_qc_complete(qc);
1da177e4
LT
4855}
4856
4857/**
4858 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4859 * @qc: command to issue to device
4860 *
4861 * Using various libata functions and hooks, this function
4862 * starts an ATA command. ATA commands are grouped into
4863 * classes called "protocols", and issuing each type of protocol
4864 * is slightly different.
4865 *
0baab86b
EF
4866 * May be used as the qc_issue() entry in ata_port_operations.
4867 *
1da177e4 4868 * LOCKING:
cca3974e 4869 * spin_lock_irqsave(host lock)
1da177e4
LT
4870 *
4871 * RETURNS:
9a3d9eb0 4872 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
4873 */
4874
9a3d9eb0 4875unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
4876{
4877 struct ata_port *ap = qc->ap;
4878
e50362ec
AL
4879 /* Use polling pio if the LLD doesn't handle
4880 * interrupt driven pio and atapi CDB interrupt.
4881 */
4882 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4883 switch (qc->tf.protocol) {
4884 case ATA_PROT_PIO:
e3472cbe 4885 case ATA_PROT_NODATA:
e50362ec
AL
4886 case ATA_PROT_ATAPI:
4887 case ATA_PROT_ATAPI_NODATA:
4888 qc->tf.flags |= ATA_TFLAG_POLLING;
4889 break;
4890 case ATA_PROT_ATAPI_DMA:
4891 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 4892 /* see ata_dma_blacklisted() */
e50362ec
AL
4893 BUG();
4894 break;
4895 default:
4896 break;
4897 }
4898 }
4899
3d3cca37
TH
4900 /* Some controllers show flaky interrupt behavior after
4901 * setting xfer mode. Use polling instead.
4902 */
4903 if (unlikely(qc->tf.command == ATA_CMD_SET_FEATURES &&
4904 qc->tf.feature == SETFEATURES_XFER) &&
4905 (ap->flags & ATA_FLAG_SETXFER_POLLING))
4906 qc->tf.flags |= ATA_TFLAG_POLLING;
4907
312f7da2 4908 /* select the device */
1da177e4
LT
4909 ata_dev_select(ap, qc->dev->devno, 1, 0);
4910
312f7da2 4911 /* start the command */
1da177e4
LT
4912 switch (qc->tf.protocol) {
4913 case ATA_PROT_NODATA:
312f7da2
AL
4914 if (qc->tf.flags & ATA_TFLAG_POLLING)
4915 ata_qc_set_polling(qc);
4916
e5338254 4917 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
4918 ap->hsm_task_state = HSM_ST_LAST;
4919
4920 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 4921 ata_port_queue_task(ap, ata_pio_task, qc, 0);
312f7da2 4922
1da177e4
LT
4923 break;
4924
4925 case ATA_PROT_DMA:
587005de 4926 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 4927
1da177e4
LT
4928 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4929 ap->ops->bmdma_setup(qc); /* set up bmdma */
4930 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 4931 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4932 break;
4933
312f7da2
AL
4934 case ATA_PROT_PIO:
4935 if (qc->tf.flags & ATA_TFLAG_POLLING)
4936 ata_qc_set_polling(qc);
1da177e4 4937
e5338254 4938 ata_tf_to_host(ap, &qc->tf);
312f7da2 4939
54f00389
AL
4940 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4941 /* PIO data out protocol */
4942 ap->hsm_task_state = HSM_ST_FIRST;
31ce6dae 4943 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
4944
4945 /* always send first data block using
e27486db 4946 * the ata_pio_task() codepath.
54f00389 4947 */
312f7da2 4948 } else {
54f00389
AL
4949 /* PIO data in protocol */
4950 ap->hsm_task_state = HSM_ST;
4951
4952 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 4953 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
4954
4955 /* if polling, ata_pio_task() handles the rest.
4956 * otherwise, interrupt handler takes over from here.
4957 */
312f7da2
AL
4958 }
4959
1da177e4
LT
4960 break;
4961
1da177e4 4962 case ATA_PROT_ATAPI:
1da177e4 4963 case ATA_PROT_ATAPI_NODATA:
312f7da2
AL
4964 if (qc->tf.flags & ATA_TFLAG_POLLING)
4965 ata_qc_set_polling(qc);
4966
e5338254 4967 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 4968
312f7da2
AL
4969 ap->hsm_task_state = HSM_ST_FIRST;
4970
4971 /* send cdb by polling if no cdb interrupt */
4972 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
4973 (qc->tf.flags & ATA_TFLAG_POLLING))
31ce6dae 4974 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
4975 break;
4976
4977 case ATA_PROT_ATAPI_DMA:
587005de 4978 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 4979
1da177e4
LT
4980 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4981 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
4982 ap->hsm_task_state = HSM_ST_FIRST;
4983
4984 /* send cdb by polling if no cdb interrupt */
4985 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
31ce6dae 4986 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
4987 break;
4988
4989 default:
4990 WARN_ON(1);
9a3d9eb0 4991 return AC_ERR_SYSTEM;
1da177e4
LT
4992 }
4993
4994 return 0;
4995}
4996
1da177e4
LT
4997/**
4998 * ata_host_intr - Handle host interrupt for given (port, task)
4999 * @ap: Port on which interrupt arrived (possibly...)
5000 * @qc: Taskfile currently active in engine
5001 *
5002 * Handle host interrupt for given queued command. Currently,
5003 * only DMA interrupts are handled. All other commands are
5004 * handled via polling with interrupts disabled (nIEN bit).
5005 *
5006 * LOCKING:
cca3974e 5007 * spin_lock_irqsave(host lock)
1da177e4
LT
5008 *
5009 * RETURNS:
5010 * One if interrupt was handled, zero if not (shared irq).
5011 */
5012
5013inline unsigned int ata_host_intr (struct ata_port *ap,
5014 struct ata_queued_cmd *qc)
5015{
ea54763f 5016 struct ata_eh_info *ehi = &ap->eh_info;
312f7da2 5017 u8 status, host_stat = 0;
1da177e4 5018
312f7da2
AL
5019 VPRINTK("ata%u: protocol %d task_state %d\n",
5020 ap->id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 5021
312f7da2
AL
5022 /* Check whether we are expecting interrupt in this state */
5023 switch (ap->hsm_task_state) {
5024 case HSM_ST_FIRST:
6912ccd5
AL
5025 /* Some pre-ATAPI-4 devices assert INTRQ
5026 * at this state when ready to receive CDB.
5027 */
1da177e4 5028
312f7da2
AL
5029 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5030 * The flag was turned on only for atapi devices.
5031 * No need to check is_atapi_taskfile(&qc->tf) again.
5032 */
5033 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 5034 goto idle_irq;
1da177e4 5035 break;
312f7da2
AL
5036 case HSM_ST_LAST:
5037 if (qc->tf.protocol == ATA_PROT_DMA ||
5038 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5039 /* check status of DMA engine */
5040 host_stat = ap->ops->bmdma_status(ap);
5041 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
5042
5043 /* if it's not our irq... */
5044 if (!(host_stat & ATA_DMA_INTR))
5045 goto idle_irq;
5046
5047 /* before we do anything else, clear DMA-Start bit */
5048 ap->ops->bmdma_stop(qc);
a4f16610
AL
5049
5050 if (unlikely(host_stat & ATA_DMA_ERR)) {
5051 /* error when transfering data to/from memory */
5052 qc->err_mask |= AC_ERR_HOST_BUS;
5053 ap->hsm_task_state = HSM_ST_ERR;
5054 }
312f7da2
AL
5055 }
5056 break;
5057 case HSM_ST:
5058 break;
1da177e4
LT
5059 default:
5060 goto idle_irq;
5061 }
5062
312f7da2
AL
5063 /* check altstatus */
5064 status = ata_altstatus(ap);
5065 if (status & ATA_BUSY)
5066 goto idle_irq;
1da177e4 5067
312f7da2
AL
5068 /* check main status, clearing INTRQ */
5069 status = ata_chk_status(ap);
5070 if (unlikely(status & ATA_BUSY))
5071 goto idle_irq;
1da177e4 5072
312f7da2
AL
5073 /* ack bmdma irq events */
5074 ap->ops->irq_clear(ap);
1da177e4 5075
bb5cb290 5076 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
5077
5078 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5079 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5080 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5081
1da177e4
LT
5082 return 1; /* irq handled */
5083
5084idle_irq:
5085 ap->stats.idle_irq++;
5086
5087#ifdef ATA_IRQ_TRAP
5088 if ((ap->stats.idle_irq % 1000) == 0) {
83625006 5089 ap->ops->irq_ack(ap, 0); /* debug trap */
f15a1daf 5090 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 5091 return 1;
1da177e4
LT
5092 }
5093#endif
5094 return 0; /* irq not handled */
5095}
5096
5097/**
5098 * ata_interrupt - Default ATA host interrupt handler
0cba632b 5099 * @irq: irq line (unused)
cca3974e 5100 * @dev_instance: pointer to our ata_host information structure
1da177e4 5101 *
0cba632b
JG
5102 * Default interrupt handler for PCI IDE devices. Calls
5103 * ata_host_intr() for each port that is not disabled.
5104 *
1da177e4 5105 * LOCKING:
cca3974e 5106 * Obtains host lock during operation.
1da177e4
LT
5107 *
5108 * RETURNS:
0cba632b 5109 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
5110 */
5111
7d12e780 5112irqreturn_t ata_interrupt (int irq, void *dev_instance)
1da177e4 5113{
cca3974e 5114 struct ata_host *host = dev_instance;
1da177e4
LT
5115 unsigned int i;
5116 unsigned int handled = 0;
5117 unsigned long flags;
5118
5119 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 5120 spin_lock_irqsave(&host->lock, flags);
1da177e4 5121
cca3974e 5122 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5123 struct ata_port *ap;
5124
cca3974e 5125 ap = host->ports[i];
c1389503 5126 if (ap &&
029f5468 5127 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
5128 struct ata_queued_cmd *qc;
5129
5130 qc = ata_qc_from_tag(ap, ap->active_tag);
312f7da2 5131 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 5132 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
5133 handled |= ata_host_intr(ap, qc);
5134 }
5135 }
5136
cca3974e 5137 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
5138
5139 return IRQ_RETVAL(handled);
5140}
5141
34bf2170
TH
5142/**
5143 * sata_scr_valid - test whether SCRs are accessible
5144 * @ap: ATA port to test SCR accessibility for
5145 *
5146 * Test whether SCRs are accessible for @ap.
5147 *
5148 * LOCKING:
5149 * None.
5150 *
5151 * RETURNS:
5152 * 1 if SCRs are accessible, 0 otherwise.
5153 */
5154int sata_scr_valid(struct ata_port *ap)
5155{
5156 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
5157}
5158
5159/**
5160 * sata_scr_read - read SCR register of the specified port
5161 * @ap: ATA port to read SCR for
5162 * @reg: SCR to read
5163 * @val: Place to store read value
5164 *
5165 * Read SCR register @reg of @ap into *@val. This function is
5166 * guaranteed to succeed if the cable type of the port is SATA
5167 * and the port implements ->scr_read.
5168 *
5169 * LOCKING:
5170 * None.
5171 *
5172 * RETURNS:
5173 * 0 on success, negative errno on failure.
5174 */
5175int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
5176{
5177 if (sata_scr_valid(ap)) {
5178 *val = ap->ops->scr_read(ap, reg);
5179 return 0;
5180 }
5181 return -EOPNOTSUPP;
5182}
5183
5184/**
5185 * sata_scr_write - write SCR register of the specified port
5186 * @ap: ATA port to write SCR for
5187 * @reg: SCR to write
5188 * @val: value to write
5189 *
5190 * Write @val to SCR register @reg of @ap. This function is
5191 * guaranteed to succeed if the cable type of the port is SATA
5192 * and the port implements ->scr_read.
5193 *
5194 * LOCKING:
5195 * None.
5196 *
5197 * RETURNS:
5198 * 0 on success, negative errno on failure.
5199 */
5200int sata_scr_write(struct ata_port *ap, int reg, u32 val)
5201{
5202 if (sata_scr_valid(ap)) {
5203 ap->ops->scr_write(ap, reg, val);
5204 return 0;
5205 }
5206 return -EOPNOTSUPP;
5207}
5208
5209/**
5210 * sata_scr_write_flush - write SCR register of the specified port and flush
5211 * @ap: ATA port to write SCR for
5212 * @reg: SCR to write
5213 * @val: value to write
5214 *
5215 * This function is identical to sata_scr_write() except that this
5216 * function performs flush after writing to the register.
5217 *
5218 * LOCKING:
5219 * None.
5220 *
5221 * RETURNS:
5222 * 0 on success, negative errno on failure.
5223 */
5224int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
5225{
5226 if (sata_scr_valid(ap)) {
5227 ap->ops->scr_write(ap, reg, val);
5228 ap->ops->scr_read(ap, reg);
5229 return 0;
5230 }
5231 return -EOPNOTSUPP;
5232}
5233
5234/**
5235 * ata_port_online - test whether the given port is online
5236 * @ap: ATA port to test
5237 *
5238 * Test whether @ap is online. Note that this function returns 0
5239 * if online status of @ap cannot be obtained, so
5240 * ata_port_online(ap) != !ata_port_offline(ap).
5241 *
5242 * LOCKING:
5243 * None.
5244 *
5245 * RETURNS:
5246 * 1 if the port online status is available and online.
5247 */
5248int ata_port_online(struct ata_port *ap)
5249{
5250 u32 sstatus;
5251
5252 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
5253 return 1;
5254 return 0;
5255}
5256
5257/**
5258 * ata_port_offline - test whether the given port is offline
5259 * @ap: ATA port to test
5260 *
5261 * Test whether @ap is offline. Note that this function returns
5262 * 0 if offline status of @ap cannot be obtained, so
5263 * ata_port_online(ap) != !ata_port_offline(ap).
5264 *
5265 * LOCKING:
5266 * None.
5267 *
5268 * RETURNS:
5269 * 1 if the port offline status is available and offline.
5270 */
5271int ata_port_offline(struct ata_port *ap)
5272{
5273 u32 sstatus;
5274
5275 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
5276 return 1;
5277 return 0;
5278}
0baab86b 5279
77b08fb5 5280int ata_flush_cache(struct ata_device *dev)
9b847548 5281{
977e6b9f 5282 unsigned int err_mask;
9b847548
JA
5283 u8 cmd;
5284
5285 if (!ata_try_flush_cache(dev))
5286 return 0;
5287
6fc49adb 5288 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
5289 cmd = ATA_CMD_FLUSH_EXT;
5290 else
5291 cmd = ATA_CMD_FLUSH;
5292
977e6b9f
TH
5293 err_mask = ata_do_simple_cmd(dev, cmd);
5294 if (err_mask) {
5295 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5296 return -EIO;
5297 }
5298
5299 return 0;
9b847548
JA
5300}
5301
cca3974e
JG
5302static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5303 unsigned int action, unsigned int ehi_flags,
5304 int wait)
500530f6
TH
5305{
5306 unsigned long flags;
5307 int i, rc;
5308
cca3974e
JG
5309 for (i = 0; i < host->n_ports; i++) {
5310 struct ata_port *ap = host->ports[i];
500530f6
TH
5311
5312 /* Previous resume operation might still be in
5313 * progress. Wait for PM_PENDING to clear.
5314 */
5315 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5316 ata_port_wait_eh(ap);
5317 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5318 }
5319
5320 /* request PM ops to EH */
5321 spin_lock_irqsave(ap->lock, flags);
5322
5323 ap->pm_mesg = mesg;
5324 if (wait) {
5325 rc = 0;
5326 ap->pm_result = &rc;
5327 }
5328
5329 ap->pflags |= ATA_PFLAG_PM_PENDING;
5330 ap->eh_info.action |= action;
5331 ap->eh_info.flags |= ehi_flags;
5332
5333 ata_port_schedule_eh(ap);
5334
5335 spin_unlock_irqrestore(ap->lock, flags);
5336
5337 /* wait and check result */
5338 if (wait) {
5339 ata_port_wait_eh(ap);
5340 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5341 if (rc)
5342 return rc;
5343 }
5344 }
5345
5346 return 0;
5347}
5348
5349/**
cca3974e
JG
5350 * ata_host_suspend - suspend host
5351 * @host: host to suspend
500530f6
TH
5352 * @mesg: PM message
5353 *
cca3974e 5354 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
5355 * function requests EH to perform PM operations and waits for EH
5356 * to finish.
5357 *
5358 * LOCKING:
5359 * Kernel thread context (may sleep).
5360 *
5361 * RETURNS:
5362 * 0 on success, -errno on failure.
5363 */
cca3974e 5364int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6
TH
5365{
5366 int i, j, rc;
5367
cca3974e 5368 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
500530f6
TH
5369 if (rc)
5370 goto fail;
5371
5372 /* EH is quiescent now. Fail if we have any ready device.
5373 * This happens if hotplug occurs between completion of device
5374 * suspension and here.
5375 */
cca3974e
JG
5376 for (i = 0; i < host->n_ports; i++) {
5377 struct ata_port *ap = host->ports[i];
500530f6
TH
5378
5379 for (j = 0; j < ATA_MAX_DEVICES; j++) {
5380 struct ata_device *dev = &ap->device[j];
5381
5382 if (ata_dev_ready(dev)) {
5383 ata_port_printk(ap, KERN_WARNING,
5384 "suspend failed, device %d "
5385 "still active\n", dev->devno);
5386 rc = -EBUSY;
5387 goto fail;
5388 }
5389 }
5390 }
5391
cca3974e 5392 host->dev->power.power_state = mesg;
500530f6
TH
5393 return 0;
5394
5395 fail:
cca3974e 5396 ata_host_resume(host);
500530f6
TH
5397 return rc;
5398}
5399
5400/**
cca3974e
JG
5401 * ata_host_resume - resume host
5402 * @host: host to resume
500530f6 5403 *
cca3974e 5404 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
5405 * function requests EH to perform PM operations and returns.
5406 * Note that all resume operations are performed parallely.
5407 *
5408 * LOCKING:
5409 * Kernel thread context (may sleep).
5410 */
cca3974e 5411void ata_host_resume(struct ata_host *host)
500530f6 5412{
cca3974e
JG
5413 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
5414 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5415 host->dev->power.power_state = PMSG_ON;
500530f6
TH
5416}
5417
c893a3ae
RD
5418/**
5419 * ata_port_start - Set port up for dma.
5420 * @ap: Port to initialize
5421 *
5422 * Called just after data structures for each port are
5423 * initialized. Allocates space for PRD table.
5424 *
5425 * May be used as the port_start() entry in ata_port_operations.
5426 *
5427 * LOCKING:
5428 * Inherited from caller.
5429 */
f0d36efd 5430int ata_port_start(struct ata_port *ap)
1da177e4 5431{
2f1f610b 5432 struct device *dev = ap->dev;
6037d6bb 5433 int rc;
1da177e4 5434
f0d36efd
TH
5435 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5436 GFP_KERNEL);
1da177e4
LT
5437 if (!ap->prd)
5438 return -ENOMEM;
5439
6037d6bb 5440 rc = ata_pad_alloc(ap, dev);
f0d36efd 5441 if (rc)
6037d6bb 5442 return rc;
1da177e4 5443
f0d36efd
TH
5444 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
5445 (unsigned long long)ap->prd_dma);
1da177e4
LT
5446 return 0;
5447}
5448
3ef3b43d
TH
5449/**
5450 * ata_dev_init - Initialize an ata_device structure
5451 * @dev: Device structure to initialize
5452 *
5453 * Initialize @dev in preparation for probing.
5454 *
5455 * LOCKING:
5456 * Inherited from caller.
5457 */
5458void ata_dev_init(struct ata_device *dev)
5459{
5460 struct ata_port *ap = dev->ap;
72fa4b74
TH
5461 unsigned long flags;
5462
5a04bf4b
TH
5463 /* SATA spd limit is bound to the first device */
5464 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5465
72fa4b74
TH
5466 /* High bits of dev->flags are used to record warm plug
5467 * requests which occur asynchronously. Synchronize using
cca3974e 5468 * host lock.
72fa4b74 5469 */
ba6a1308 5470 spin_lock_irqsave(ap->lock, flags);
72fa4b74 5471 dev->flags &= ~ATA_DFLAG_INIT_MASK;
ba6a1308 5472 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 5473
72fa4b74
TH
5474 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5475 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
5476 dev->pio_mask = UINT_MAX;
5477 dev->mwdma_mask = UINT_MAX;
5478 dev->udma_mask = UINT_MAX;
5479}
5480
1da177e4 5481/**
155a8a9c 5482 * ata_port_init - Initialize an ata_port structure
1da177e4 5483 * @ap: Structure to initialize
cca3974e 5484 * @host: Collection of hosts to which @ap belongs
1da177e4
LT
5485 * @ent: Probe information provided by low-level driver
5486 * @port_no: Port number associated with this ata_port
5487 *
155a8a9c 5488 * Initialize a new ata_port structure.
0cba632b 5489 *
1da177e4 5490 * LOCKING:
0cba632b 5491 * Inherited from caller.
1da177e4 5492 */
cca3974e 5493void ata_port_init(struct ata_port *ap, struct ata_host *host,
155a8a9c 5494 const struct ata_probe_ent *ent, unsigned int port_no)
1da177e4
LT
5495{
5496 unsigned int i;
5497
cca3974e 5498 ap->lock = &host->lock;
198e0fed 5499 ap->flags = ATA_FLAG_DISABLED;
155a8a9c 5500 ap->id = ata_unique_id++;
1da177e4 5501 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 5502 ap->host = host;
2f1f610b 5503 ap->dev = ent->dev;
1da177e4 5504 ap->port_no = port_no;
fea63e38
TH
5505 if (port_no == 1 && ent->pinfo2) {
5506 ap->pio_mask = ent->pinfo2->pio_mask;
5507 ap->mwdma_mask = ent->pinfo2->mwdma_mask;
5508 ap->udma_mask = ent->pinfo2->udma_mask;
5509 ap->flags |= ent->pinfo2->flags;
5510 ap->ops = ent->pinfo2->port_ops;
5511 } else {
5512 ap->pio_mask = ent->pio_mask;
5513 ap->mwdma_mask = ent->mwdma_mask;
5514 ap->udma_mask = ent->udma_mask;
5515 ap->flags |= ent->port_flags;
5516 ap->ops = ent->port_ops;
5517 }
5a04bf4b 5518 ap->hw_sata_spd_limit = UINT_MAX;
1da177e4
LT
5519 ap->active_tag = ATA_TAG_POISON;
5520 ap->last_ctl = 0xFF;
bd5d825c
BP
5521
5522#if defined(ATA_VERBOSE_DEBUG)
5523 /* turn on all debugging levels */
5524 ap->msg_enable = 0x00FF;
5525#elif defined(ATA_DEBUG)
5526 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 5527#else
0dd4b21f 5528 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 5529#endif
1da177e4 5530
65f27f38
DH
5531 INIT_DELAYED_WORK(&ap->port_task, NULL);
5532 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5533 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 5534 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 5535 init_waitqueue_head(&ap->eh_wait_q);
1da177e4 5536
838df628
TH
5537 /* set cable type */
5538 ap->cbl = ATA_CBL_NONE;
5539 if (ap->flags & ATA_FLAG_SATA)
5540 ap->cbl = ATA_CBL_SATA;
5541
acf356b1
TH
5542 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5543 struct ata_device *dev = &ap->device[i];
38d87234 5544 dev->ap = ap;
72fa4b74 5545 dev->devno = i;
3ef3b43d 5546 ata_dev_init(dev);
acf356b1 5547 }
1da177e4
LT
5548
5549#ifdef ATA_IRQ_TRAP
5550 ap->stats.unhandled_irq = 1;
5551 ap->stats.idle_irq = 1;
5552#endif
5553
5554 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
5555}
5556
155a8a9c 5557/**
4608c160
TH
5558 * ata_port_init_shost - Initialize SCSI host associated with ATA port
5559 * @ap: ATA port to initialize SCSI host for
5560 * @shost: SCSI host associated with @ap
155a8a9c 5561 *
4608c160 5562 * Initialize SCSI host @shost associated with ATA port @ap.
155a8a9c
BK
5563 *
5564 * LOCKING:
5565 * Inherited from caller.
5566 */
4608c160 5567static void ata_port_init_shost(struct ata_port *ap, struct Scsi_Host *shost)
155a8a9c 5568{
cca3974e 5569 ap->scsi_host = shost;
155a8a9c 5570
4608c160
TH
5571 shost->unique_id = ap->id;
5572 shost->max_id = 16;
5573 shost->max_lun = 1;
5574 shost->max_channel = 1;
5575 shost->max_cmd_len = 12;
155a8a9c
BK
5576}
5577
1da177e4 5578/**
996139f1 5579 * ata_port_add - Attach low-level ATA driver to system
1da177e4 5580 * @ent: Information provided by low-level driver
cca3974e 5581 * @host: Collections of ports to which we add
1da177e4
LT
5582 * @port_no: Port number associated with this host
5583 *
0cba632b
JG
5584 * Attach low-level ATA driver to system.
5585 *
1da177e4 5586 * LOCKING:
0cba632b 5587 * PCI/etc. bus probe sem.
1da177e4
LT
5588 *
5589 * RETURNS:
0cba632b 5590 * New ata_port on success, for NULL on error.
1da177e4 5591 */
996139f1 5592static struct ata_port * ata_port_add(const struct ata_probe_ent *ent,
cca3974e 5593 struct ata_host *host,
1da177e4
LT
5594 unsigned int port_no)
5595{
996139f1 5596 struct Scsi_Host *shost;
1da177e4 5597 struct ata_port *ap;
1da177e4
LT
5598
5599 DPRINTK("ENTER\n");
aec5c3c1 5600
52783c5d 5601 if (!ent->port_ops->error_handler &&
cca3974e 5602 !(ent->port_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
aec5c3c1
TH
5603 printk(KERN_ERR "ata%u: no reset mechanism available\n",
5604 port_no);
5605 return NULL;
5606 }
5607
996139f1
JG
5608 shost = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
5609 if (!shost)
1da177e4
LT
5610 return NULL;
5611
996139f1 5612 shost->transportt = &ata_scsi_transport_template;
30afc84c 5613
996139f1 5614 ap = ata_shost_to_port(shost);
1da177e4 5615
cca3974e 5616 ata_port_init(ap, host, ent, port_no);
996139f1 5617 ata_port_init_shost(ap, shost);
1da177e4 5618
1da177e4 5619 return ap;
1da177e4
LT
5620}
5621
f0d36efd
TH
5622static void ata_host_release(struct device *gendev, void *res)
5623{
5624 struct ata_host *host = dev_get_drvdata(gendev);
5625 int i;
5626
5627 for (i = 0; i < host->n_ports; i++) {
5628 struct ata_port *ap = host->ports[i];
5629
5630 if (!ap)
5631 continue;
5632
5633 if (ap->ops->port_stop)
5634 ap->ops->port_stop(ap);
5635
5636 scsi_host_put(ap->scsi_host);
5637 }
5638
5639 if (host->ops->host_stop)
5640 host->ops->host_stop(host);
5641}
5642
b03732f0 5643/**
cca3974e
JG
5644 * ata_sas_host_init - Initialize a host struct
5645 * @host: host to initialize
5646 * @dev: device host is attached to
5647 * @flags: host flags
5648 * @ops: port_ops
b03732f0
BK
5649 *
5650 * LOCKING:
5651 * PCI/etc. bus probe sem.
5652 *
5653 */
5654
cca3974e
JG
5655void ata_host_init(struct ata_host *host, struct device *dev,
5656 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 5657{
cca3974e
JG
5658 spin_lock_init(&host->lock);
5659 host->dev = dev;
5660 host->flags = flags;
5661 host->ops = ops;
b03732f0
BK
5662}
5663
1da177e4 5664/**
0cba632b
JG
5665 * ata_device_add - Register hardware device with ATA and SCSI layers
5666 * @ent: Probe information describing hardware device to be registered
5667 *
5668 * This function processes the information provided in the probe
5669 * information struct @ent, allocates the necessary ATA and SCSI
5670 * host information structures, initializes them, and registers
5671 * everything with requisite kernel subsystems.
5672 *
5673 * This function requests irqs, probes the ATA bus, and probes
5674 * the SCSI bus.
1da177e4
LT
5675 *
5676 * LOCKING:
0cba632b 5677 * PCI/etc. bus probe sem.
1da177e4
LT
5678 *
5679 * RETURNS:
0cba632b 5680 * Number of ports registered. Zero on error (no ports registered).
1da177e4 5681 */
057ace5e 5682int ata_device_add(const struct ata_probe_ent *ent)
1da177e4 5683{
6d0500df 5684 unsigned int i;
1da177e4 5685 struct device *dev = ent->dev;
cca3974e 5686 struct ata_host *host;
39b07ce6 5687 int rc;
1da177e4
LT
5688
5689 DPRINTK("ENTER\n");
f20b16ff 5690
02f076aa
AC
5691 if (ent->irq == 0) {
5692 dev_printk(KERN_ERR, dev, "is not available: No interrupt assigned.\n");
5693 return 0;
5694 }
f0d36efd
TH
5695
5696 if (!devres_open_group(dev, ata_device_add, GFP_KERNEL))
5697 return 0;
5698
1da177e4 5699 /* alloc a container for our list of ATA ports (buses) */
f0d36efd
TH
5700 host = devres_alloc(ata_host_release, sizeof(struct ata_host) +
5701 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
cca3974e 5702 if (!host)
f0d36efd
TH
5703 goto err_out;
5704 devres_add(dev, host);
5705 dev_set_drvdata(dev, host);
1da177e4 5706
cca3974e
JG
5707 ata_host_init(host, dev, ent->_host_flags, ent->port_ops);
5708 host->n_ports = ent->n_ports;
5709 host->irq = ent->irq;
5710 host->irq2 = ent->irq2;
0d5ff566 5711 host->iomap = ent->iomap;
cca3974e 5712 host->private_data = ent->private_data;
1da177e4
LT
5713
5714 /* register each port bound to this device */
cca3974e 5715 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5716 struct ata_port *ap;
5717 unsigned long xfer_mode_mask;
2ec7df04 5718 int irq_line = ent->irq;
1da177e4 5719
cca3974e 5720 ap = ata_port_add(ent, host, i);
c38778c3 5721 host->ports[i] = ap;
1da177e4
LT
5722 if (!ap)
5723 goto err_out;
5724
dd5b06c4
TH
5725 /* dummy? */
5726 if (ent->dummy_port_mask & (1 << i)) {
5727 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
5728 ap->ops = &ata_dummy_port_ops;
5729 continue;
5730 }
5731
5732 /* start port */
5733 rc = ap->ops->port_start(ap);
5734 if (rc) {
cca3974e
JG
5735 host->ports[i] = NULL;
5736 scsi_host_put(ap->scsi_host);
dd5b06c4
TH
5737 goto err_out;
5738 }
5739
2ec7df04
AC
5740 /* Report the secondary IRQ for second channel legacy */
5741 if (i == 1 && ent->irq2)
5742 irq_line = ent->irq2;
5743
1da177e4
LT
5744 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
5745 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
5746 (ap->pio_mask << ATA_SHIFT_PIO);
5747
5748 /* print per-port info to dmesg */
0d5ff566
TH
5749 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%p "
5750 "ctl 0x%p bmdma 0x%p irq %d\n",
f15a1daf
TH
5751 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
5752 ata_mode_string(xfer_mode_mask),
5753 ap->ioaddr.cmd_addr,
5754 ap->ioaddr.ctl_addr,
5755 ap->ioaddr.bmdma_addr,
2ec7df04 5756 irq_line);
1da177e4 5757
0f0a3ad3
TH
5758 /* freeze port before requesting IRQ */
5759 ata_eh_freeze_port(ap);
1da177e4
LT
5760 }
5761
2ec7df04 5762 /* obtain irq, that may be shared between channels */
f0d36efd
TH
5763 rc = devm_request_irq(dev, ent->irq, ent->port_ops->irq_handler,
5764 ent->irq_flags, DRV_NAME, host);
39b07ce6
JG
5765 if (rc) {
5766 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5767 ent->irq, rc);
1da177e4 5768 goto err_out;
39b07ce6 5769 }
1da177e4 5770
2ec7df04
AC
5771 /* do we have a second IRQ for the other channel, eg legacy mode */
5772 if (ent->irq2) {
5773 /* We will get weird core code crashes later if this is true
5774 so trap it now */
5775 BUG_ON(ent->irq == ent->irq2);
5776
f0d36efd
TH
5777 rc = devm_request_irq(dev, ent->irq2,
5778 ent->port_ops->irq_handler, ent->irq_flags,
5779 DRV_NAME, host);
2ec7df04
AC
5780 if (rc) {
5781 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5782 ent->irq2, rc);
f0d36efd 5783 goto err_out;
2ec7df04
AC
5784 }
5785 }
5786
f0d36efd 5787 /* resource acquisition complete */
b878ca5d 5788 devres_remove_group(dev, ata_device_add);
f0d36efd 5789
1da177e4
LT
5790 /* perform each probe synchronously */
5791 DPRINTK("probe begin\n");
cca3974e
JG
5792 for (i = 0; i < host->n_ports; i++) {
5793 struct ata_port *ap = host->ports[i];
5a04bf4b 5794 u32 scontrol;
1da177e4
LT
5795 int rc;
5796
5a04bf4b
TH
5797 /* init sata_spd_limit to the current value */
5798 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
5799 int spd = (scontrol >> 4) & 0xf;
5800 ap->hw_sata_spd_limit &= (1 << spd) - 1;
5801 }
5802 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5803
cca3974e 5804 rc = scsi_add_host(ap->scsi_host, dev);
1da177e4 5805 if (rc) {
f15a1daf 5806 ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
1da177e4
LT
5807 /* FIXME: do something useful here */
5808 /* FIXME: handle unconditional calls to
5809 * scsi_scan_host and ata_host_remove, below,
5810 * at the very least
5811 */
5812 }
3e706399 5813
52783c5d 5814 if (ap->ops->error_handler) {
1cdaf534 5815 struct ata_eh_info *ehi = &ap->eh_info;
3e706399
TH
5816 unsigned long flags;
5817
5818 ata_port_probe(ap);
5819
5820 /* kick EH for boot probing */
ba6a1308 5821 spin_lock_irqsave(ap->lock, flags);
3e706399 5822
1cdaf534
TH
5823 ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
5824 ehi->action |= ATA_EH_SOFTRESET;
5825 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
3e706399 5826
b51e9e5d 5827 ap->pflags |= ATA_PFLAG_LOADING;
3e706399
TH
5828 ata_port_schedule_eh(ap);
5829
ba6a1308 5830 spin_unlock_irqrestore(ap->lock, flags);
3e706399
TH
5831
5832 /* wait for EH to finish */
5833 ata_port_wait_eh(ap);
5834 } else {
5835 DPRINTK("ata%u: bus probe begin\n", ap->id);
5836 rc = ata_bus_probe(ap);
5837 DPRINTK("ata%u: bus probe end\n", ap->id);
5838
5839 if (rc) {
5840 /* FIXME: do something useful here?
5841 * Current libata behavior will
5842 * tear down everything when
5843 * the module is removed
5844 * or the h/w is unplugged.
5845 */
5846 }
5847 }
1da177e4
LT
5848 }
5849
5850 /* probes are done, now scan each port's disk(s) */
c893a3ae 5851 DPRINTK("host probe begin\n");
cca3974e
JG
5852 for (i = 0; i < host->n_ports; i++) {
5853 struct ata_port *ap = host->ports[i];
1da177e4 5854
644dd0cc 5855 ata_scsi_scan_host(ap);
1da177e4
LT
5856 }
5857
1da177e4
LT
5858 VPRINTK("EXIT, returning %u\n", ent->n_ports);
5859 return ent->n_ports; /* success */
5860
f0d36efd
TH
5861 err_out:
5862 devres_release_group(dev, ata_device_add);
5863 dev_set_drvdata(dev, NULL);
5864 VPRINTK("EXIT, returning %d\n", rc);
1da177e4
LT
5865 return 0;
5866}
5867
720ba126
TH
5868/**
5869 * ata_port_detach - Detach ATA port in prepration of device removal
5870 * @ap: ATA port to be detached
5871 *
5872 * Detach all ATA devices and the associated SCSI devices of @ap;
5873 * then, remove the associated SCSI host. @ap is guaranteed to
5874 * be quiescent on return from this function.
5875 *
5876 * LOCKING:
5877 * Kernel thread context (may sleep).
5878 */
5879void ata_port_detach(struct ata_port *ap)
5880{
5881 unsigned long flags;
5882 int i;
5883
5884 if (!ap->ops->error_handler)
c3cf30a9 5885 goto skip_eh;
720ba126
TH
5886
5887 /* tell EH we're leaving & flush EH */
ba6a1308 5888 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 5889 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 5890 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5891
5892 ata_port_wait_eh(ap);
5893
5894 /* EH is now guaranteed to see UNLOADING, so no new device
5895 * will be attached. Disable all existing devices.
5896 */
ba6a1308 5897 spin_lock_irqsave(ap->lock, flags);
720ba126
TH
5898
5899 for (i = 0; i < ATA_MAX_DEVICES; i++)
5900 ata_dev_disable(&ap->device[i]);
5901
ba6a1308 5902 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5903
5904 /* Final freeze & EH. All in-flight commands are aborted. EH
5905 * will be skipped and retrials will be terminated with bad
5906 * target.
5907 */
ba6a1308 5908 spin_lock_irqsave(ap->lock, flags);
720ba126 5909 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 5910 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5911
5912 ata_port_wait_eh(ap);
5913
5914 /* Flush hotplug task. The sequence is similar to
5915 * ata_port_flush_task().
5916 */
5917 flush_workqueue(ata_aux_wq);
5918 cancel_delayed_work(&ap->hotplug_task);
5919 flush_workqueue(ata_aux_wq);
5920
c3cf30a9 5921 skip_eh:
720ba126 5922 /* remove the associated SCSI host */
cca3974e 5923 scsi_remove_host(ap->scsi_host);
720ba126
TH
5924}
5925
0529c159
TH
5926/**
5927 * ata_host_detach - Detach all ports of an ATA host
5928 * @host: Host to detach
5929 *
5930 * Detach all ports of @host.
5931 *
5932 * LOCKING:
5933 * Kernel thread context (may sleep).
5934 */
5935void ata_host_detach(struct ata_host *host)
5936{
5937 int i;
5938
5939 for (i = 0; i < host->n_ports; i++)
5940 ata_port_detach(host->ports[i]);
5941}
5942
f6d950e2
BK
5943struct ata_probe_ent *
5944ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
5945{
5946 struct ata_probe_ent *probe_ent;
5947
4d05447e 5948 probe_ent = devm_kzalloc(dev, sizeof(*probe_ent), GFP_KERNEL);
f6d950e2
BK
5949 if (!probe_ent) {
5950 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
5951 kobject_name(&(dev->kobj)));
5952 return NULL;
5953 }
5954
5955 INIT_LIST_HEAD(&probe_ent->node);
5956 probe_ent->dev = dev;
5957
5958 probe_ent->sht = port->sht;
cca3974e 5959 probe_ent->port_flags = port->flags;
f6d950e2
BK
5960 probe_ent->pio_mask = port->pio_mask;
5961 probe_ent->mwdma_mask = port->mwdma_mask;
5962 probe_ent->udma_mask = port->udma_mask;
5963 probe_ent->port_ops = port->port_ops;
d639ca94 5964 probe_ent->private_data = port->private_data;
f6d950e2
BK
5965
5966 return probe_ent;
5967}
5968
1da177e4
LT
5969/**
5970 * ata_std_ports - initialize ioaddr with standard port offsets.
5971 * @ioaddr: IO address structure to be initialized
0baab86b
EF
5972 *
5973 * Utility function which initializes data_addr, error_addr,
5974 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
5975 * device_addr, status_addr, and command_addr to standard offsets
5976 * relative to cmd_addr.
5977 *
5978 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 5979 */
0baab86b 5980
1da177e4
LT
5981void ata_std_ports(struct ata_ioports *ioaddr)
5982{
5983 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
5984 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
5985 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
5986 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
5987 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
5988 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
5989 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
5990 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
5991 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
5992 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
5993}
5994
0baab86b 5995
374b1873
JG
5996#ifdef CONFIG_PCI
5997
1da177e4
LT
5998/**
5999 * ata_pci_remove_one - PCI layer callback for device removal
6000 * @pdev: PCI device that was removed
6001 *
b878ca5d
TH
6002 * PCI layer indicates to libata via this hook that hot-unplug or
6003 * module unload event has occurred. Detach all ports. Resource
6004 * release is handled via devres.
1da177e4
LT
6005 *
6006 * LOCKING:
6007 * Inherited from PCI layer (may sleep).
6008 */
f0d36efd 6009void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4
LT
6010{
6011 struct device *dev = pci_dev_to_dev(pdev);
cca3974e 6012 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 6013
b878ca5d 6014 ata_host_detach(host);
1da177e4
LT
6015}
6016
6017/* move to PCI subsystem */
057ace5e 6018int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
6019{
6020 unsigned long tmp = 0;
6021
6022 switch (bits->width) {
6023 case 1: {
6024 u8 tmp8 = 0;
6025 pci_read_config_byte(pdev, bits->reg, &tmp8);
6026 tmp = tmp8;
6027 break;
6028 }
6029 case 2: {
6030 u16 tmp16 = 0;
6031 pci_read_config_word(pdev, bits->reg, &tmp16);
6032 tmp = tmp16;
6033 break;
6034 }
6035 case 4: {
6036 u32 tmp32 = 0;
6037 pci_read_config_dword(pdev, bits->reg, &tmp32);
6038 tmp = tmp32;
6039 break;
6040 }
6041
6042 default:
6043 return -EINVAL;
6044 }
6045
6046 tmp &= bits->mask;
6047
6048 return (tmp == bits->val) ? 1 : 0;
6049}
9b847548 6050
3c5100c1 6051void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
6052{
6053 pci_save_state(pdev);
4c90d971 6054 pci_disable_device(pdev);
500530f6 6055
4c90d971 6056 if (mesg.event == PM_EVENT_SUSPEND)
500530f6 6057 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
6058}
6059
553c4aa6 6060int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 6061{
553c4aa6
TH
6062 int rc;
6063
9b847548
JA
6064 pci_set_power_state(pdev, PCI_D0);
6065 pci_restore_state(pdev);
553c4aa6 6066
b878ca5d 6067 rc = pcim_enable_device(pdev);
553c4aa6
TH
6068 if (rc) {
6069 dev_printk(KERN_ERR, &pdev->dev,
6070 "failed to enable device after resume (%d)\n", rc);
6071 return rc;
6072 }
6073
9b847548 6074 pci_set_master(pdev);
553c4aa6 6075 return 0;
500530f6
TH
6076}
6077
3c5100c1 6078int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 6079{
cca3974e 6080 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
6081 int rc = 0;
6082
cca3974e 6083 rc = ata_host_suspend(host, mesg);
500530f6
TH
6084 if (rc)
6085 return rc;
6086
3c5100c1 6087 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
6088
6089 return 0;
6090}
6091
6092int ata_pci_device_resume(struct pci_dev *pdev)
6093{
cca3974e 6094 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 6095 int rc;
500530f6 6096
553c4aa6
TH
6097 rc = ata_pci_device_do_resume(pdev);
6098 if (rc == 0)
6099 ata_host_resume(host);
6100 return rc;
9b847548 6101}
1da177e4
LT
6102#endif /* CONFIG_PCI */
6103
6104
1da177e4
LT
6105static int __init ata_init(void)
6106{
a8601e5f 6107 ata_probe_timeout *= HZ;
1da177e4
LT
6108 ata_wq = create_workqueue("ata");
6109 if (!ata_wq)
6110 return -ENOMEM;
6111
453b07ac
TH
6112 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6113 if (!ata_aux_wq) {
6114 destroy_workqueue(ata_wq);
6115 return -ENOMEM;
6116 }
6117
1da177e4
LT
6118 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6119 return 0;
6120}
6121
6122static void __exit ata_exit(void)
6123{
6124 destroy_workqueue(ata_wq);
453b07ac 6125 destroy_workqueue(ata_aux_wq);
1da177e4
LT
6126}
6127
a4625085 6128subsys_initcall(ata_init);
1da177e4
LT
6129module_exit(ata_exit);
6130
67846b30 6131static unsigned long ratelimit_time;
34af946a 6132static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
6133
6134int ata_ratelimit(void)
6135{
6136 int rc;
6137 unsigned long flags;
6138
6139 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6140
6141 if (time_after(jiffies, ratelimit_time)) {
6142 rc = 1;
6143 ratelimit_time = jiffies + (HZ/5);
6144 } else
6145 rc = 0;
6146
6147 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6148
6149 return rc;
6150}
6151
c22daff4
TH
6152/**
6153 * ata_wait_register - wait until register value changes
6154 * @reg: IO-mapped register
6155 * @mask: Mask to apply to read register value
6156 * @val: Wait condition
6157 * @interval_msec: polling interval in milliseconds
6158 * @timeout_msec: timeout in milliseconds
6159 *
6160 * Waiting for some bits of register to change is a common
6161 * operation for ATA controllers. This function reads 32bit LE
6162 * IO-mapped register @reg and tests for the following condition.
6163 *
6164 * (*@reg & mask) != val
6165 *
6166 * If the condition is met, it returns; otherwise, the process is
6167 * repeated after @interval_msec until timeout.
6168 *
6169 * LOCKING:
6170 * Kernel thread context (may sleep)
6171 *
6172 * RETURNS:
6173 * The final register value.
6174 */
6175u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6176 unsigned long interval_msec,
6177 unsigned long timeout_msec)
6178{
6179 unsigned long timeout;
6180 u32 tmp;
6181
6182 tmp = ioread32(reg);
6183
6184 /* Calculate timeout _after_ the first read to make sure
6185 * preceding writes reach the controller before starting to
6186 * eat away the timeout.
6187 */
6188 timeout = jiffies + (timeout_msec * HZ) / 1000;
6189
6190 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6191 msleep(interval_msec);
6192 tmp = ioread32(reg);
6193 }
6194
6195 return tmp;
6196}
6197
dd5b06c4
TH
6198/*
6199 * Dummy port_ops
6200 */
6201static void ata_dummy_noret(struct ata_port *ap) { }
6202static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
6203static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6204
6205static u8 ata_dummy_check_status(struct ata_port *ap)
6206{
6207 return ATA_DRDY;
6208}
6209
6210static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6211{
6212 return AC_ERR_SYSTEM;
6213}
6214
6215const struct ata_port_operations ata_dummy_port_ops = {
6216 .port_disable = ata_port_disable,
6217 .check_status = ata_dummy_check_status,
6218 .check_altstatus = ata_dummy_check_status,
6219 .dev_select = ata_noop_dev_select,
6220 .qc_prep = ata_noop_qc_prep,
6221 .qc_issue = ata_dummy_qc_issue,
6222 .freeze = ata_dummy_noret,
6223 .thaw = ata_dummy_noret,
6224 .error_handler = ata_dummy_noret,
6225 .post_internal_cmd = ata_dummy_qc_noret,
6226 .irq_clear = ata_dummy_noret,
6227 .port_start = ata_dummy_ret0,
6228 .port_stop = ata_dummy_noret,
6229};
6230
1da177e4
LT
6231/*
6232 * libata is essentially a library of internal helper functions for
6233 * low-level ATA host controller drivers. As such, the API/ABI is
6234 * likely to change as new drivers are added and updated.
6235 * Do not depend on ABI/API stability.
6236 */
6237
e9c83914
TH
6238EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6239EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6240EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 6241EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
1da177e4
LT
6242EXPORT_SYMBOL_GPL(ata_std_bios_param);
6243EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 6244EXPORT_SYMBOL_GPL(ata_host_init);
1da177e4 6245EXPORT_SYMBOL_GPL(ata_device_add);
0529c159 6246EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4
LT
6247EXPORT_SYMBOL_GPL(ata_sg_init);
6248EXPORT_SYMBOL_GPL(ata_sg_init_one);
9a1004d0 6249EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 6250EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 6251EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 6252EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
6253EXPORT_SYMBOL_GPL(ata_tf_load);
6254EXPORT_SYMBOL_GPL(ata_tf_read);
6255EXPORT_SYMBOL_GPL(ata_noop_dev_select);
6256EXPORT_SYMBOL_GPL(ata_std_dev_select);
6257EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6258EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6259EXPORT_SYMBOL_GPL(ata_check_status);
6260EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
6261EXPORT_SYMBOL_GPL(ata_exec_command);
6262EXPORT_SYMBOL_GPL(ata_port_start);
1da177e4 6263EXPORT_SYMBOL_GPL(ata_interrupt);
0d5ff566
TH
6264EXPORT_SYMBOL_GPL(ata_data_xfer);
6265EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
1da177e4 6266EXPORT_SYMBOL_GPL(ata_qc_prep);
e46834cd 6267EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
6268EXPORT_SYMBOL_GPL(ata_bmdma_setup);
6269EXPORT_SYMBOL_GPL(ata_bmdma_start);
6270EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
6271EXPORT_SYMBOL_GPL(ata_bmdma_status);
6272EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
6273EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
6274EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
6275EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
6276EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
6277EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 6278EXPORT_SYMBOL_GPL(ata_port_probe);
3c567b7d 6279EXPORT_SYMBOL_GPL(sata_set_spd);
d7bb4cc7
TH
6280EXPORT_SYMBOL_GPL(sata_phy_debounce);
6281EXPORT_SYMBOL_GPL(sata_phy_resume);
1da177e4
LT
6282EXPORT_SYMBOL_GPL(sata_phy_reset);
6283EXPORT_SYMBOL_GPL(__sata_phy_reset);
6284EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 6285EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 6286EXPORT_SYMBOL_GPL(ata_std_softreset);
b6103f6d 6287EXPORT_SYMBOL_GPL(sata_port_hardreset);
c2bd5804
TH
6288EXPORT_SYMBOL_GPL(sata_std_hardreset);
6289EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
6290EXPORT_SYMBOL_GPL(ata_dev_classify);
6291EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 6292EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 6293EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 6294EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 6295EXPORT_SYMBOL_GPL(ata_busy_sleep);
86e45b6b 6296EXPORT_SYMBOL_GPL(ata_port_queue_task);
1da177e4
LT
6297EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6298EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 6299EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 6300EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 6301EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4 6302EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
6303EXPORT_SYMBOL_GPL(sata_scr_valid);
6304EXPORT_SYMBOL_GPL(sata_scr_read);
6305EXPORT_SYMBOL_GPL(sata_scr_write);
6306EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6307EXPORT_SYMBOL_GPL(ata_port_online);
6308EXPORT_SYMBOL_GPL(ata_port_offline);
cca3974e
JG
6309EXPORT_SYMBOL_GPL(ata_host_suspend);
6310EXPORT_SYMBOL_GPL(ata_host_resume);
6a62a04d
TH
6311EXPORT_SYMBOL_GPL(ata_id_string);
6312EXPORT_SYMBOL_GPL(ata_id_c_string);
6919a0a6 6313EXPORT_SYMBOL_GPL(ata_device_blacklisted);
1da177e4
LT
6314EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6315
1bc4ccff 6316EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
452503f9
AC
6317EXPORT_SYMBOL_GPL(ata_timing_compute);
6318EXPORT_SYMBOL_GPL(ata_timing_merge);
6319
1da177e4
LT
6320#ifdef CONFIG_PCI
6321EXPORT_SYMBOL_GPL(pci_test_config_bits);
6322EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
6323EXPORT_SYMBOL_GPL(ata_pci_init_one);
6324EXPORT_SYMBOL_GPL(ata_pci_remove_one);
500530f6
TH
6325EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6326EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
6327EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6328EXPORT_SYMBOL_GPL(ata_pci_device_resume);
67951ade
AC
6329EXPORT_SYMBOL_GPL(ata_pci_default_filter);
6330EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 6331#endif /* CONFIG_PCI */
9b847548 6332
9b847548
JA
6333EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
6334EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
ece1d636 6335
ece1d636 6336EXPORT_SYMBOL_GPL(ata_eng_timeout);
7b70fc03
TH
6337EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6338EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499
TH
6339EXPORT_SYMBOL_GPL(ata_port_freeze);
6340EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6341EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
6342EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6343EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 6344EXPORT_SYMBOL_GPL(ata_do_eh);
83625006
AI
6345EXPORT_SYMBOL_GPL(ata_irq_on);
6346EXPORT_SYMBOL_GPL(ata_dummy_irq_on);
6347EXPORT_SYMBOL_GPL(ata_irq_ack);
6348EXPORT_SYMBOL_GPL(ata_dummy_irq_ack);
a619f981 6349EXPORT_SYMBOL_GPL(ata_dev_try_classify);