]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/ata/libata-core.c
Merge branch 'upstream-linus' of master.kernel.org:/pub/scm/linux/kernel/git/jgarzik...
[net-next-2.6.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
1da177e4
LT
33 */
34
1da177e4
LT
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/mm.h>
41#include <linux/highmem.h>
42#include <linux/spinlock.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/timer.h>
46#include <linux/interrupt.h>
47#include <linux/completion.h>
48#include <linux/suspend.h>
49#include <linux/workqueue.h>
67846b30 50#include <linux/jiffies.h>
378f058c 51#include <linux/scatterlist.h>
1da177e4 52#include <scsi/scsi.h>
193515d5 53#include <scsi/scsi_cmnd.h>
1da177e4
LT
54#include <scsi/scsi_host.h>
55#include <linux/libata.h>
56#include <asm/io.h>
57#include <asm/semaphore.h>
58#include <asm/byteorder.h>
59
60#include "libata.h"
61
cb48cab7 62#define DRV_VERSION "2.20" /* must be exactly four chars */
fda0efc5
JG
63
64
d7bb4cc7 65/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
66const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
67const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
68const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 69
3373efd8
TH
70static unsigned int ata_dev_init_params(struct ata_device *dev,
71 u16 heads, u16 sectors);
72static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
73static void ata_dev_xfermask(struct ata_device *dev);
1da177e4 74
44877b4e 75static unsigned int ata_print_id = 1;
1da177e4
LT
76static struct workqueue_struct *ata_wq;
77
453b07ac
TH
78struct workqueue_struct *ata_aux_wq;
79
418dc1f5 80int atapi_enabled = 1;
1623c81e
JG
81module_param(atapi_enabled, int, 0444);
82MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
83
95de719a
AL
84int atapi_dmadir = 0;
85module_param(atapi_dmadir, int, 0444);
86MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
87
c3c013a2
JG
88int libata_fua = 0;
89module_param_named(fua, libata_fua, int, 0444);
90MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
91
a8601e5f
AM
92static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
93module_param(ata_probe_timeout, int, 0444);
94MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
95
d7d0dad6
JG
96int libata_noacpi = 1;
97module_param_named(noacpi, libata_noacpi, int, 0444);
11ef697b
KCA
98MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
99
1da177e4
LT
100MODULE_AUTHOR("Jeff Garzik");
101MODULE_DESCRIPTION("Library module for ATA devices");
102MODULE_LICENSE("GPL");
103MODULE_VERSION(DRV_VERSION);
104
0baab86b 105
1da177e4
LT
106/**
107 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
108 * @tf: Taskfile to convert
109 * @fis: Buffer into which data will output
110 * @pmp: Port multiplier port
111 *
112 * Converts a standard ATA taskfile to a Serial ATA
113 * FIS structure (Register - Host to Device).
114 *
115 * LOCKING:
116 * Inherited from caller.
117 */
118
057ace5e 119void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
1da177e4
LT
120{
121 fis[0] = 0x27; /* Register - Host to Device FIS */
122 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
123 bit 7 indicates Command FIS */
124 fis[2] = tf->command;
125 fis[3] = tf->feature;
126
127 fis[4] = tf->lbal;
128 fis[5] = tf->lbam;
129 fis[6] = tf->lbah;
130 fis[7] = tf->device;
131
132 fis[8] = tf->hob_lbal;
133 fis[9] = tf->hob_lbam;
134 fis[10] = tf->hob_lbah;
135 fis[11] = tf->hob_feature;
136
137 fis[12] = tf->nsect;
138 fis[13] = tf->hob_nsect;
139 fis[14] = 0;
140 fis[15] = tf->ctl;
141
142 fis[16] = 0;
143 fis[17] = 0;
144 fis[18] = 0;
145 fis[19] = 0;
146}
147
148/**
149 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
150 * @fis: Buffer from which data will be input
151 * @tf: Taskfile to output
152 *
e12a1be6 153 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
154 *
155 * LOCKING:
156 * Inherited from caller.
157 */
158
057ace5e 159void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
160{
161 tf->command = fis[2]; /* status */
162 tf->feature = fis[3]; /* error */
163
164 tf->lbal = fis[4];
165 tf->lbam = fis[5];
166 tf->lbah = fis[6];
167 tf->device = fis[7];
168
169 tf->hob_lbal = fis[8];
170 tf->hob_lbam = fis[9];
171 tf->hob_lbah = fis[10];
172
173 tf->nsect = fis[12];
174 tf->hob_nsect = fis[13];
175}
176
8cbd6df1
AL
177static const u8 ata_rw_cmds[] = {
178 /* pio multi */
179 ATA_CMD_READ_MULTI,
180 ATA_CMD_WRITE_MULTI,
181 ATA_CMD_READ_MULTI_EXT,
182 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
183 0,
184 0,
185 0,
186 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
187 /* pio */
188 ATA_CMD_PIO_READ,
189 ATA_CMD_PIO_WRITE,
190 ATA_CMD_PIO_READ_EXT,
191 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
192 0,
193 0,
194 0,
195 0,
8cbd6df1
AL
196 /* dma */
197 ATA_CMD_READ,
198 ATA_CMD_WRITE,
199 ATA_CMD_READ_EXT,
9a3dccc4
TH
200 ATA_CMD_WRITE_EXT,
201 0,
202 0,
203 0,
204 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 205};
1da177e4
LT
206
207/**
8cbd6df1 208 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
209 * @tf: command to examine and configure
210 * @dev: device tf belongs to
1da177e4 211 *
2e9edbf8 212 * Examine the device configuration and tf->flags to calculate
8cbd6df1 213 * the proper read/write commands and protocol to use.
1da177e4
LT
214 *
215 * LOCKING:
216 * caller.
217 */
bd056d7e 218static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 219{
9a3dccc4 220 u8 cmd;
1da177e4 221
9a3dccc4 222 int index, fua, lba48, write;
2e9edbf8 223
9a3dccc4 224 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
225 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
226 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 227
8cbd6df1
AL
228 if (dev->flags & ATA_DFLAG_PIO) {
229 tf->protocol = ATA_PROT_PIO;
9a3dccc4 230 index = dev->multi_count ? 0 : 8;
bd056d7e 231 } else if (lba48 && (dev->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
232 /* Unable to use DMA due to host limitation */
233 tf->protocol = ATA_PROT_PIO;
0565c26d 234 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
235 } else {
236 tf->protocol = ATA_PROT_DMA;
9a3dccc4 237 index = 16;
8cbd6df1 238 }
1da177e4 239
9a3dccc4
TH
240 cmd = ata_rw_cmds[index + fua + lba48 + write];
241 if (cmd) {
242 tf->command = cmd;
243 return 0;
244 }
245 return -1;
1da177e4
LT
246}
247
35b649fe
TH
248/**
249 * ata_tf_read_block - Read block address from ATA taskfile
250 * @tf: ATA taskfile of interest
251 * @dev: ATA device @tf belongs to
252 *
253 * LOCKING:
254 * None.
255 *
256 * Read block address from @tf. This function can handle all
257 * three address formats - LBA, LBA48 and CHS. tf->protocol and
258 * flags select the address format to use.
259 *
260 * RETURNS:
261 * Block address read from @tf.
262 */
263u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
264{
265 u64 block = 0;
266
267 if (tf->flags & ATA_TFLAG_LBA) {
268 if (tf->flags & ATA_TFLAG_LBA48) {
269 block |= (u64)tf->hob_lbah << 40;
270 block |= (u64)tf->hob_lbam << 32;
271 block |= tf->hob_lbal << 24;
272 } else
273 block |= (tf->device & 0xf) << 24;
274
275 block |= tf->lbah << 16;
276 block |= tf->lbam << 8;
277 block |= tf->lbal;
278 } else {
279 u32 cyl, head, sect;
280
281 cyl = tf->lbam | (tf->lbah << 8);
282 head = tf->device & 0xf;
283 sect = tf->lbal;
284
285 block = (cyl * dev->heads + head) * dev->sectors + sect;
286 }
287
288 return block;
289}
290
bd056d7e
TH
291/**
292 * ata_build_rw_tf - Build ATA taskfile for given read/write request
293 * @tf: Target ATA taskfile
294 * @dev: ATA device @tf belongs to
295 * @block: Block address
296 * @n_block: Number of blocks
297 * @tf_flags: RW/FUA etc...
298 * @tag: tag
299 *
300 * LOCKING:
301 * None.
302 *
303 * Build ATA taskfile @tf for read/write request described by
304 * @block, @n_block, @tf_flags and @tag on @dev.
305 *
306 * RETURNS:
307 *
308 * 0 on success, -ERANGE if the request is too large for @dev,
309 * -EINVAL if the request is invalid.
310 */
311int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
312 u64 block, u32 n_block, unsigned int tf_flags,
313 unsigned int tag)
314{
315 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
316 tf->flags |= tf_flags;
317
6d1245bf 318 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
319 /* yay, NCQ */
320 if (!lba_48_ok(block, n_block))
321 return -ERANGE;
322
323 tf->protocol = ATA_PROT_NCQ;
324 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
325
326 if (tf->flags & ATA_TFLAG_WRITE)
327 tf->command = ATA_CMD_FPDMA_WRITE;
328 else
329 tf->command = ATA_CMD_FPDMA_READ;
330
331 tf->nsect = tag << 3;
332 tf->hob_feature = (n_block >> 8) & 0xff;
333 tf->feature = n_block & 0xff;
334
335 tf->hob_lbah = (block >> 40) & 0xff;
336 tf->hob_lbam = (block >> 32) & 0xff;
337 tf->hob_lbal = (block >> 24) & 0xff;
338 tf->lbah = (block >> 16) & 0xff;
339 tf->lbam = (block >> 8) & 0xff;
340 tf->lbal = block & 0xff;
341
342 tf->device = 1 << 6;
343 if (tf->flags & ATA_TFLAG_FUA)
344 tf->device |= 1 << 7;
345 } else if (dev->flags & ATA_DFLAG_LBA) {
346 tf->flags |= ATA_TFLAG_LBA;
347
348 if (lba_28_ok(block, n_block)) {
349 /* use LBA28 */
350 tf->device |= (block >> 24) & 0xf;
351 } else if (lba_48_ok(block, n_block)) {
352 if (!(dev->flags & ATA_DFLAG_LBA48))
353 return -ERANGE;
354
355 /* use LBA48 */
356 tf->flags |= ATA_TFLAG_LBA48;
357
358 tf->hob_nsect = (n_block >> 8) & 0xff;
359
360 tf->hob_lbah = (block >> 40) & 0xff;
361 tf->hob_lbam = (block >> 32) & 0xff;
362 tf->hob_lbal = (block >> 24) & 0xff;
363 } else
364 /* request too large even for LBA48 */
365 return -ERANGE;
366
367 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
368 return -EINVAL;
369
370 tf->nsect = n_block & 0xff;
371
372 tf->lbah = (block >> 16) & 0xff;
373 tf->lbam = (block >> 8) & 0xff;
374 tf->lbal = block & 0xff;
375
376 tf->device |= ATA_LBA;
377 } else {
378 /* CHS */
379 u32 sect, head, cyl, track;
380
381 /* The request -may- be too large for CHS addressing. */
382 if (!lba_28_ok(block, n_block))
383 return -ERANGE;
384
385 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
386 return -EINVAL;
387
388 /* Convert LBA to CHS */
389 track = (u32)block / dev->sectors;
390 cyl = track / dev->heads;
391 head = track % dev->heads;
392 sect = (u32)block % dev->sectors + 1;
393
394 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
395 (u32)block, track, cyl, head, sect);
396
397 /* Check whether the converted CHS can fit.
398 Cylinder: 0-65535
399 Head: 0-15
400 Sector: 1-255*/
401 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
402 return -ERANGE;
403
404 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
405 tf->lbal = sect;
406 tf->lbam = cyl;
407 tf->lbah = cyl >> 8;
408 tf->device |= head;
409 }
410
411 return 0;
412}
413
cb95d562
TH
414/**
415 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
416 * @pio_mask: pio_mask
417 * @mwdma_mask: mwdma_mask
418 * @udma_mask: udma_mask
419 *
420 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
421 * unsigned int xfer_mask.
422 *
423 * LOCKING:
424 * None.
425 *
426 * RETURNS:
427 * Packed xfer_mask.
428 */
429static unsigned int ata_pack_xfermask(unsigned int pio_mask,
430 unsigned int mwdma_mask,
431 unsigned int udma_mask)
432{
433 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
434 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
435 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
436}
437
c0489e4e
TH
438/**
439 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
440 * @xfer_mask: xfer_mask to unpack
441 * @pio_mask: resulting pio_mask
442 * @mwdma_mask: resulting mwdma_mask
443 * @udma_mask: resulting udma_mask
444 *
445 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
446 * Any NULL distination masks will be ignored.
447 */
448static void ata_unpack_xfermask(unsigned int xfer_mask,
449 unsigned int *pio_mask,
450 unsigned int *mwdma_mask,
451 unsigned int *udma_mask)
452{
453 if (pio_mask)
454 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
455 if (mwdma_mask)
456 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
457 if (udma_mask)
458 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
459}
460
cb95d562 461static const struct ata_xfer_ent {
be9a50c8 462 int shift, bits;
cb95d562
TH
463 u8 base;
464} ata_xfer_tbl[] = {
465 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
466 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
467 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
468 { -1, },
469};
470
471/**
472 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
473 * @xfer_mask: xfer_mask of interest
474 *
475 * Return matching XFER_* value for @xfer_mask. Only the highest
476 * bit of @xfer_mask is considered.
477 *
478 * LOCKING:
479 * None.
480 *
481 * RETURNS:
482 * Matching XFER_* value, 0 if no match found.
483 */
484static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
485{
486 int highbit = fls(xfer_mask) - 1;
487 const struct ata_xfer_ent *ent;
488
489 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
490 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
491 return ent->base + highbit - ent->shift;
492 return 0;
493}
494
495/**
496 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
497 * @xfer_mode: XFER_* of interest
498 *
499 * Return matching xfer_mask for @xfer_mode.
500 *
501 * LOCKING:
502 * None.
503 *
504 * RETURNS:
505 * Matching xfer_mask, 0 if no match found.
506 */
507static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
508{
509 const struct ata_xfer_ent *ent;
510
511 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
512 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
513 return 1 << (ent->shift + xfer_mode - ent->base);
514 return 0;
515}
516
517/**
518 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
519 * @xfer_mode: XFER_* of interest
520 *
521 * Return matching xfer_shift for @xfer_mode.
522 *
523 * LOCKING:
524 * None.
525 *
526 * RETURNS:
527 * Matching xfer_shift, -1 if no match found.
528 */
529static int ata_xfer_mode2shift(unsigned int xfer_mode)
530{
531 const struct ata_xfer_ent *ent;
532
533 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
534 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
535 return ent->shift;
536 return -1;
537}
538
1da177e4 539/**
1da7b0d0
TH
540 * ata_mode_string - convert xfer_mask to string
541 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
542 *
543 * Determine string which represents the highest speed
1da7b0d0 544 * (highest bit in @modemask).
1da177e4
LT
545 *
546 * LOCKING:
547 * None.
548 *
549 * RETURNS:
550 * Constant C string representing highest speed listed in
1da7b0d0 551 * @mode_mask, or the constant C string "<n/a>".
1da177e4 552 */
1da7b0d0 553static const char *ata_mode_string(unsigned int xfer_mask)
1da177e4 554{
75f554bc
TH
555 static const char * const xfer_mode_str[] = {
556 "PIO0",
557 "PIO1",
558 "PIO2",
559 "PIO3",
560 "PIO4",
b352e57d
AC
561 "PIO5",
562 "PIO6",
75f554bc
TH
563 "MWDMA0",
564 "MWDMA1",
565 "MWDMA2",
b352e57d
AC
566 "MWDMA3",
567 "MWDMA4",
75f554bc
TH
568 "UDMA/16",
569 "UDMA/25",
570 "UDMA/33",
571 "UDMA/44",
572 "UDMA/66",
573 "UDMA/100",
574 "UDMA/133",
575 "UDMA7",
576 };
1da7b0d0 577 int highbit;
1da177e4 578
1da7b0d0
TH
579 highbit = fls(xfer_mask) - 1;
580 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
581 return xfer_mode_str[highbit];
1da177e4 582 return "<n/a>";
1da177e4
LT
583}
584
4c360c81
TH
585static const char *sata_spd_string(unsigned int spd)
586{
587 static const char * const spd_str[] = {
588 "1.5 Gbps",
589 "3.0 Gbps",
590 };
591
592 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
593 return "<unknown>";
594 return spd_str[spd - 1];
595}
596
3373efd8 597void ata_dev_disable(struct ata_device *dev)
0b8efb0a 598{
0dd4b21f 599 if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
f15a1daf 600 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
4ae72a1e
TH
601 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
602 ATA_DNXFER_QUIET);
0b8efb0a
TH
603 dev->class++;
604 }
605}
606
1da177e4 607/**
0d5ff566 608 * ata_devchk - PATA device presence detection
1da177e4
LT
609 * @ap: ATA channel to examine
610 * @device: Device to examine (starting at zero)
611 *
612 * This technique was originally described in
613 * Hale Landis's ATADRVR (www.ata-atapi.com), and
614 * later found its way into the ATA/ATAPI spec.
615 *
616 * Write a pattern to the ATA shadow registers,
617 * and if a device is present, it will respond by
618 * correctly storing and echoing back the
619 * ATA shadow register contents.
620 *
621 * LOCKING:
622 * caller.
623 */
624
0d5ff566 625static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1da177e4
LT
626{
627 struct ata_ioports *ioaddr = &ap->ioaddr;
628 u8 nsect, lbal;
629
630 ap->ops->dev_select(ap, device);
631
0d5ff566
TH
632 iowrite8(0x55, ioaddr->nsect_addr);
633 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 634
0d5ff566
TH
635 iowrite8(0xaa, ioaddr->nsect_addr);
636 iowrite8(0x55, ioaddr->lbal_addr);
1da177e4 637
0d5ff566
TH
638 iowrite8(0x55, ioaddr->nsect_addr);
639 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 640
0d5ff566
TH
641 nsect = ioread8(ioaddr->nsect_addr);
642 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
643
644 if ((nsect == 0x55) && (lbal == 0xaa))
645 return 1; /* we found a device */
646
647 return 0; /* nothing found */
648}
649
1da177e4
LT
650/**
651 * ata_dev_classify - determine device type based on ATA-spec signature
652 * @tf: ATA taskfile register set for device to be identified
653 *
654 * Determine from taskfile register contents whether a device is
655 * ATA or ATAPI, as per "Signature and persistence" section
656 * of ATA/PI spec (volume 1, sect 5.14).
657 *
658 * LOCKING:
659 * None.
660 *
661 * RETURNS:
662 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
663 * the event of failure.
664 */
665
057ace5e 666unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
667{
668 /* Apple's open source Darwin code hints that some devices only
669 * put a proper signature into the LBA mid/high registers,
670 * So, we only check those. It's sufficient for uniqueness.
671 */
672
673 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
674 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
675 DPRINTK("found ATA device by sig\n");
676 return ATA_DEV_ATA;
677 }
678
679 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
680 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
681 DPRINTK("found ATAPI device by sig\n");
682 return ATA_DEV_ATAPI;
683 }
684
685 DPRINTK("unknown device\n");
686 return ATA_DEV_UNKNOWN;
687}
688
689/**
690 * ata_dev_try_classify - Parse returned ATA device signature
691 * @ap: ATA channel to examine
692 * @device: Device to examine (starting at zero)
b4dc7623 693 * @r_err: Value of error register on completion
1da177e4
LT
694 *
695 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
696 * an ATA/ATAPI-defined set of values is placed in the ATA
697 * shadow registers, indicating the results of device detection
698 * and diagnostics.
699 *
700 * Select the ATA device, and read the values from the ATA shadow
701 * registers. Then parse according to the Error register value,
702 * and the spec-defined values examined by ata_dev_classify().
703 *
704 * LOCKING:
705 * caller.
b4dc7623
TH
706 *
707 * RETURNS:
708 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4
LT
709 */
710
a619f981 711unsigned int
b4dc7623 712ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
1da177e4 713{
1da177e4
LT
714 struct ata_taskfile tf;
715 unsigned int class;
716 u8 err;
717
718 ap->ops->dev_select(ap, device);
719
720 memset(&tf, 0, sizeof(tf));
721
1da177e4 722 ap->ops->tf_read(ap, &tf);
0169e284 723 err = tf.feature;
b4dc7623
TH
724 if (r_err)
725 *r_err = err;
1da177e4 726
93590859
AC
727 /* see if device passed diags: if master then continue and warn later */
728 if (err == 0 && device == 0)
729 /* diagnostic fail : do nothing _YET_ */
730 ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
731 else if (err == 1)
1da177e4
LT
732 /* do nothing */ ;
733 else if ((device == 0) && (err == 0x81))
734 /* do nothing */ ;
735 else
b4dc7623 736 return ATA_DEV_NONE;
1da177e4 737
b4dc7623 738 /* determine if device is ATA or ATAPI */
1da177e4 739 class = ata_dev_classify(&tf);
b4dc7623 740
1da177e4 741 if (class == ATA_DEV_UNKNOWN)
b4dc7623 742 return ATA_DEV_NONE;
1da177e4 743 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
b4dc7623
TH
744 return ATA_DEV_NONE;
745 return class;
1da177e4
LT
746}
747
748/**
6a62a04d 749 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
750 * @id: IDENTIFY DEVICE results we will examine
751 * @s: string into which data is output
752 * @ofs: offset into identify device page
753 * @len: length of string to return. must be an even number.
754 *
755 * The strings in the IDENTIFY DEVICE page are broken up into
756 * 16-bit chunks. Run through the string, and output each
757 * 8-bit chunk linearly, regardless of platform.
758 *
759 * LOCKING:
760 * caller.
761 */
762
6a62a04d
TH
763void ata_id_string(const u16 *id, unsigned char *s,
764 unsigned int ofs, unsigned int len)
1da177e4
LT
765{
766 unsigned int c;
767
768 while (len > 0) {
769 c = id[ofs] >> 8;
770 *s = c;
771 s++;
772
773 c = id[ofs] & 0xff;
774 *s = c;
775 s++;
776
777 ofs++;
778 len -= 2;
779 }
780}
781
0e949ff3 782/**
6a62a04d 783 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
784 * @id: IDENTIFY DEVICE results we will examine
785 * @s: string into which data is output
786 * @ofs: offset into identify device page
787 * @len: length of string to return. must be an odd number.
788 *
6a62a04d 789 * This function is identical to ata_id_string except that it
0e949ff3
TH
790 * trims trailing spaces and terminates the resulting string with
791 * null. @len must be actual maximum length (even number) + 1.
792 *
793 * LOCKING:
794 * caller.
795 */
6a62a04d
TH
796void ata_id_c_string(const u16 *id, unsigned char *s,
797 unsigned int ofs, unsigned int len)
0e949ff3
TH
798{
799 unsigned char *p;
800
801 WARN_ON(!(len & 1));
802
6a62a04d 803 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
804
805 p = s + strnlen(s, len - 1);
806 while (p > s && p[-1] == ' ')
807 p--;
808 *p = '\0';
809}
0baab86b 810
2940740b
TH
811static u64 ata_id_n_sectors(const u16 *id)
812{
813 if (ata_id_has_lba(id)) {
814 if (ata_id_has_lba48(id))
815 return ata_id_u64(id, 100);
816 else
817 return ata_id_u32(id, 60);
818 } else {
819 if (ata_id_current_chs_valid(id))
820 return ata_id_u32(id, 57);
821 else
822 return id[1] * id[3] * id[6];
823 }
824}
825
10305f0f
AC
826/**
827 * ata_id_to_dma_mode - Identify DMA mode from id block
828 * @dev: device to identify
cc261267 829 * @unknown: mode to assume if we cannot tell
10305f0f
AC
830 *
831 * Set up the timing values for the device based upon the identify
832 * reported values for the DMA mode. This function is used by drivers
833 * which rely upon firmware configured modes, but wish to report the
834 * mode correctly when possible.
835 *
836 * In addition we emit similarly formatted messages to the default
837 * ata_dev_set_mode handler, in order to provide consistency of
838 * presentation.
839 */
840
841void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
842{
843 unsigned int mask;
844 u8 mode;
845
846 /* Pack the DMA modes */
847 mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
848 if (dev->id[53] & 0x04)
849 mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
850
851 /* Select the mode in use */
852 mode = ata_xfer_mask2mode(mask);
853
854 if (mode != 0) {
855 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
856 ata_mode_string(mask));
857 } else {
858 /* SWDMA perhaps ? */
859 mode = unknown;
860 ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
861 }
862
863 /* Configure the device reporting */
864 dev->xfer_mode = mode;
865 dev->xfer_shift = ata_xfer_mode2shift(mode);
866}
867
0baab86b
EF
868/**
869 * ata_noop_dev_select - Select device 0/1 on ATA bus
870 * @ap: ATA channel to manipulate
871 * @device: ATA device (numbered from zero) to select
872 *
873 * This function performs no actual function.
874 *
875 * May be used as the dev_select() entry in ata_port_operations.
876 *
877 * LOCKING:
878 * caller.
879 */
1da177e4
LT
880void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
881{
882}
883
0baab86b 884
1da177e4
LT
885/**
886 * ata_std_dev_select - Select device 0/1 on ATA bus
887 * @ap: ATA channel to manipulate
888 * @device: ATA device (numbered from zero) to select
889 *
890 * Use the method defined in the ATA specification to
891 * make either device 0, or device 1, active on the
0baab86b
EF
892 * ATA channel. Works with both PIO and MMIO.
893 *
894 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
895 *
896 * LOCKING:
897 * caller.
898 */
899
900void ata_std_dev_select (struct ata_port *ap, unsigned int device)
901{
902 u8 tmp;
903
904 if (device == 0)
905 tmp = ATA_DEVICE_OBS;
906 else
907 tmp = ATA_DEVICE_OBS | ATA_DEV1;
908
0d5ff566 909 iowrite8(tmp, ap->ioaddr.device_addr);
1da177e4
LT
910 ata_pause(ap); /* needed; also flushes, for mmio */
911}
912
913/**
914 * ata_dev_select - Select device 0/1 on ATA bus
915 * @ap: ATA channel to manipulate
916 * @device: ATA device (numbered from zero) to select
917 * @wait: non-zero to wait for Status register BSY bit to clear
918 * @can_sleep: non-zero if context allows sleeping
919 *
920 * Use the method defined in the ATA specification to
921 * make either device 0, or device 1, active on the
922 * ATA channel.
923 *
924 * This is a high-level version of ata_std_dev_select(),
925 * which additionally provides the services of inserting
926 * the proper pauses and status polling, where needed.
927 *
928 * LOCKING:
929 * caller.
930 */
931
932void ata_dev_select(struct ata_port *ap, unsigned int device,
933 unsigned int wait, unsigned int can_sleep)
934{
88574551 935 if (ata_msg_probe(ap))
44877b4e
TH
936 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
937 "device %u, wait %u\n", device, wait);
1da177e4
LT
938
939 if (wait)
940 ata_wait_idle(ap);
941
942 ap->ops->dev_select(ap, device);
943
944 if (wait) {
945 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
946 msleep(150);
947 ata_wait_idle(ap);
948 }
949}
950
951/**
952 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 953 * @id: IDENTIFY DEVICE page to dump
1da177e4 954 *
0bd3300a
TH
955 * Dump selected 16-bit words from the given IDENTIFY DEVICE
956 * page.
1da177e4
LT
957 *
958 * LOCKING:
959 * caller.
960 */
961
0bd3300a 962static inline void ata_dump_id(const u16 *id)
1da177e4
LT
963{
964 DPRINTK("49==0x%04x "
965 "53==0x%04x "
966 "63==0x%04x "
967 "64==0x%04x "
968 "75==0x%04x \n",
0bd3300a
TH
969 id[49],
970 id[53],
971 id[63],
972 id[64],
973 id[75]);
1da177e4
LT
974 DPRINTK("80==0x%04x "
975 "81==0x%04x "
976 "82==0x%04x "
977 "83==0x%04x "
978 "84==0x%04x \n",
0bd3300a
TH
979 id[80],
980 id[81],
981 id[82],
982 id[83],
983 id[84]);
1da177e4
LT
984 DPRINTK("88==0x%04x "
985 "93==0x%04x\n",
0bd3300a
TH
986 id[88],
987 id[93]);
1da177e4
LT
988}
989
cb95d562
TH
990/**
991 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
992 * @id: IDENTIFY data to compute xfer mask from
993 *
994 * Compute the xfermask for this device. This is not as trivial
995 * as it seems if we must consider early devices correctly.
996 *
997 * FIXME: pre IDE drive timing (do we care ?).
998 *
999 * LOCKING:
1000 * None.
1001 *
1002 * RETURNS:
1003 * Computed xfermask
1004 */
1005static unsigned int ata_id_xfermask(const u16 *id)
1006{
1007 unsigned int pio_mask, mwdma_mask, udma_mask;
1008
1009 /* Usual case. Word 53 indicates word 64 is valid */
1010 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1011 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1012 pio_mask <<= 3;
1013 pio_mask |= 0x7;
1014 } else {
1015 /* If word 64 isn't valid then Word 51 high byte holds
1016 * the PIO timing number for the maximum. Turn it into
1017 * a mask.
1018 */
7a0f1c8a 1019 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb
AC
1020 if (mode < 5) /* Valid PIO range */
1021 pio_mask = (2 << mode) - 1;
1022 else
1023 pio_mask = 1;
cb95d562
TH
1024
1025 /* But wait.. there's more. Design your standards by
1026 * committee and you too can get a free iordy field to
1027 * process. However its the speeds not the modes that
1028 * are supported... Note drivers using the timing API
1029 * will get this right anyway
1030 */
1031 }
1032
1033 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1034
b352e57d
AC
1035 if (ata_id_is_cfa(id)) {
1036 /*
1037 * Process compact flash extended modes
1038 */
1039 int pio = id[163] & 0x7;
1040 int dma = (id[163] >> 3) & 7;
1041
1042 if (pio)
1043 pio_mask |= (1 << 5);
1044 if (pio > 1)
1045 pio_mask |= (1 << 6);
1046 if (dma)
1047 mwdma_mask |= (1 << 3);
1048 if (dma > 1)
1049 mwdma_mask |= (1 << 4);
1050 }
1051
fb21f0d0
TH
1052 udma_mask = 0;
1053 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1054 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1055
1056 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1057}
1058
86e45b6b
TH
1059/**
1060 * ata_port_queue_task - Queue port_task
1061 * @ap: The ata_port to queue port_task for
e2a7f77a 1062 * @fn: workqueue function to be scheduled
65f27f38 1063 * @data: data for @fn to use
e2a7f77a 1064 * @delay: delay time for workqueue function
86e45b6b
TH
1065 *
1066 * Schedule @fn(@data) for execution after @delay jiffies using
1067 * port_task. There is one port_task per port and it's the
1068 * user(low level driver)'s responsibility to make sure that only
1069 * one task is active at any given time.
1070 *
1071 * libata core layer takes care of synchronization between
1072 * port_task and EH. ata_port_queue_task() may be ignored for EH
1073 * synchronization.
1074 *
1075 * LOCKING:
1076 * Inherited from caller.
1077 */
65f27f38 1078void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
86e45b6b
TH
1079 unsigned long delay)
1080{
1081 int rc;
1082
b51e9e5d 1083 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
86e45b6b
TH
1084 return;
1085
65f27f38
DH
1086 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1087 ap->port_task_data = data;
86e45b6b 1088
52bad64d 1089 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1090
1091 /* rc == 0 means that another user is using port task */
1092 WARN_ON(rc == 0);
1093}
1094
1095/**
1096 * ata_port_flush_task - Flush port_task
1097 * @ap: The ata_port to flush port_task for
1098 *
1099 * After this function completes, port_task is guranteed not to
1100 * be running or scheduled.
1101 *
1102 * LOCKING:
1103 * Kernel thread context (may sleep)
1104 */
1105void ata_port_flush_task(struct ata_port *ap)
1106{
1107 unsigned long flags;
1108
1109 DPRINTK("ENTER\n");
1110
ba6a1308 1111 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 1112 ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK;
ba6a1308 1113 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b
TH
1114
1115 DPRINTK("flush #1\n");
1116 flush_workqueue(ata_wq);
1117
1118 /*
1119 * At this point, if a task is running, it's guaranteed to see
1120 * the FLUSH flag; thus, it will never queue pio tasks again.
1121 * Cancel and flush.
1122 */
1123 if (!cancel_delayed_work(&ap->port_task)) {
0dd4b21f 1124 if (ata_msg_ctl(ap))
88574551
TH
1125 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
1126 __FUNCTION__);
86e45b6b
TH
1127 flush_workqueue(ata_wq);
1128 }
1129
ba6a1308 1130 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 1131 ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK;
ba6a1308 1132 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b 1133
0dd4b21f
BP
1134 if (ata_msg_ctl(ap))
1135 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
86e45b6b
TH
1136}
1137
7102d230 1138static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1139{
77853bf2 1140 struct completion *waiting = qc->private_data;
a2a7a662 1141
a2a7a662 1142 complete(waiting);
a2a7a662
TH
1143}
1144
1145/**
2432697b 1146 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1147 * @dev: Device to which the command is sent
1148 * @tf: Taskfile registers for the command and the result
d69cf37d 1149 * @cdb: CDB for packet command
a2a7a662 1150 * @dma_dir: Data tranfer direction of the command
2432697b
TH
1151 * @sg: sg list for the data buffer of the command
1152 * @n_elem: Number of sg entries
a2a7a662
TH
1153 *
1154 * Executes libata internal command with timeout. @tf contains
1155 * command on entry and result on return. Timeout and error
1156 * conditions are reported via return value. No recovery action
1157 * is taken after a command times out. It's caller's duty to
1158 * clean up after timeout.
1159 *
1160 * LOCKING:
1161 * None. Should be called with kernel context, might sleep.
551e8889
TH
1162 *
1163 * RETURNS:
1164 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1165 */
2432697b
TH
1166unsigned ata_exec_internal_sg(struct ata_device *dev,
1167 struct ata_taskfile *tf, const u8 *cdb,
1168 int dma_dir, struct scatterlist *sg,
1169 unsigned int n_elem)
a2a7a662 1170{
3373efd8 1171 struct ata_port *ap = dev->ap;
a2a7a662
TH
1172 u8 command = tf->command;
1173 struct ata_queued_cmd *qc;
2ab7db1f 1174 unsigned int tag, preempted_tag;
dedaf2b0 1175 u32 preempted_sactive, preempted_qc_active;
60be6b9a 1176 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1177 unsigned long flags;
77853bf2 1178 unsigned int err_mask;
d95a717f 1179 int rc;
a2a7a662 1180
ba6a1308 1181 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1182
e3180499 1183 /* no internal command while frozen */
b51e9e5d 1184 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1185 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1186 return AC_ERR_SYSTEM;
1187 }
1188
2ab7db1f 1189 /* initialize internal qc */
a2a7a662 1190
2ab7db1f
TH
1191 /* XXX: Tag 0 is used for drivers with legacy EH as some
1192 * drivers choke if any other tag is given. This breaks
1193 * ata_tag_internal() test for those drivers. Don't use new
1194 * EH stuff without converting to it.
1195 */
1196 if (ap->ops->error_handler)
1197 tag = ATA_TAG_INTERNAL;
1198 else
1199 tag = 0;
1200
6cec4a39 1201 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1202 BUG();
f69499f4 1203 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1204
1205 qc->tag = tag;
1206 qc->scsicmd = NULL;
1207 qc->ap = ap;
1208 qc->dev = dev;
1209 ata_qc_reinit(qc);
1210
1211 preempted_tag = ap->active_tag;
dedaf2b0
TH
1212 preempted_sactive = ap->sactive;
1213 preempted_qc_active = ap->qc_active;
2ab7db1f 1214 ap->active_tag = ATA_TAG_POISON;
dedaf2b0
TH
1215 ap->sactive = 0;
1216 ap->qc_active = 0;
2ab7db1f
TH
1217
1218 /* prepare & issue qc */
a2a7a662 1219 qc->tf = *tf;
d69cf37d
TH
1220 if (cdb)
1221 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1222 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1223 qc->dma_dir = dma_dir;
1224 if (dma_dir != DMA_NONE) {
2432697b
TH
1225 unsigned int i, buflen = 0;
1226
1227 for (i = 0; i < n_elem; i++)
1228 buflen += sg[i].length;
1229
1230 ata_sg_init(qc, sg, n_elem);
49c80429 1231 qc->nbytes = buflen;
a2a7a662
TH
1232 }
1233
77853bf2 1234 qc->private_data = &wait;
a2a7a662
TH
1235 qc->complete_fn = ata_qc_complete_internal;
1236
8e0e694a 1237 ata_qc_issue(qc);
a2a7a662 1238
ba6a1308 1239 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1240
a8601e5f 1241 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
d95a717f
TH
1242
1243 ata_port_flush_task(ap);
41ade50c 1244
d95a717f 1245 if (!rc) {
ba6a1308 1246 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1247
1248 /* We're racing with irq here. If we lose, the
1249 * following test prevents us from completing the qc
d95a717f
TH
1250 * twice. If we win, the port is frozen and will be
1251 * cleaned up by ->post_internal_cmd().
a2a7a662 1252 */
77853bf2 1253 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1254 qc->err_mask |= AC_ERR_TIMEOUT;
1255
1256 if (ap->ops->error_handler)
1257 ata_port_freeze(ap);
1258 else
1259 ata_qc_complete(qc);
f15a1daf 1260
0dd4b21f
BP
1261 if (ata_msg_warn(ap))
1262 ata_dev_printk(dev, KERN_WARNING,
88574551 1263 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1264 }
1265
ba6a1308 1266 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1267 }
1268
d95a717f
TH
1269 /* do post_internal_cmd */
1270 if (ap->ops->post_internal_cmd)
1271 ap->ops->post_internal_cmd(qc);
1272
18d90deb 1273 if ((qc->flags & ATA_QCFLAG_FAILED) && !qc->err_mask) {
0dd4b21f 1274 if (ata_msg_warn(ap))
88574551 1275 ata_dev_printk(dev, KERN_WARNING,
0dd4b21f 1276 "zero err_mask for failed "
88574551 1277 "internal command, assuming AC_ERR_OTHER\n");
d95a717f
TH
1278 qc->err_mask |= AC_ERR_OTHER;
1279 }
1280
15869303 1281 /* finish up */
ba6a1308 1282 spin_lock_irqsave(ap->lock, flags);
15869303 1283
e61e0672 1284 *tf = qc->result_tf;
77853bf2
TH
1285 err_mask = qc->err_mask;
1286
1287 ata_qc_free(qc);
2ab7db1f 1288 ap->active_tag = preempted_tag;
dedaf2b0
TH
1289 ap->sactive = preempted_sactive;
1290 ap->qc_active = preempted_qc_active;
77853bf2 1291
1f7dd3e9
TH
1292 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1293 * Until those drivers are fixed, we detect the condition
1294 * here, fail the command with AC_ERR_SYSTEM and reenable the
1295 * port.
1296 *
1297 * Note that this doesn't change any behavior as internal
1298 * command failure results in disabling the device in the
1299 * higher layer for LLDDs without new reset/EH callbacks.
1300 *
1301 * Kill the following code as soon as those drivers are fixed.
1302 */
198e0fed 1303 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1304 err_mask |= AC_ERR_SYSTEM;
1305 ata_port_probe(ap);
1306 }
1307
ba6a1308 1308 spin_unlock_irqrestore(ap->lock, flags);
15869303 1309
77853bf2 1310 return err_mask;
a2a7a662
TH
1311}
1312
2432697b 1313/**
33480a0e 1314 * ata_exec_internal - execute libata internal command
2432697b
TH
1315 * @dev: Device to which the command is sent
1316 * @tf: Taskfile registers for the command and the result
1317 * @cdb: CDB for packet command
1318 * @dma_dir: Data tranfer direction of the command
1319 * @buf: Data buffer of the command
1320 * @buflen: Length of data buffer
1321 *
1322 * Wrapper around ata_exec_internal_sg() which takes simple
1323 * buffer instead of sg list.
1324 *
1325 * LOCKING:
1326 * None. Should be called with kernel context, might sleep.
1327 *
1328 * RETURNS:
1329 * Zero on success, AC_ERR_* mask on failure
1330 */
1331unsigned ata_exec_internal(struct ata_device *dev,
1332 struct ata_taskfile *tf, const u8 *cdb,
1333 int dma_dir, void *buf, unsigned int buflen)
1334{
33480a0e
TH
1335 struct scatterlist *psg = NULL, sg;
1336 unsigned int n_elem = 0;
2432697b 1337
33480a0e
TH
1338 if (dma_dir != DMA_NONE) {
1339 WARN_ON(!buf);
1340 sg_init_one(&sg, buf, buflen);
1341 psg = &sg;
1342 n_elem++;
1343 }
2432697b 1344
33480a0e 1345 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
2432697b
TH
1346}
1347
977e6b9f
TH
1348/**
1349 * ata_do_simple_cmd - execute simple internal command
1350 * @dev: Device to which the command is sent
1351 * @cmd: Opcode to execute
1352 *
1353 * Execute a 'simple' command, that only consists of the opcode
1354 * 'cmd' itself, without filling any other registers
1355 *
1356 * LOCKING:
1357 * Kernel thread context (may sleep).
1358 *
1359 * RETURNS:
1360 * Zero on success, AC_ERR_* mask on failure
e58eb583 1361 */
77b08fb5 1362unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1363{
1364 struct ata_taskfile tf;
e58eb583
TH
1365
1366 ata_tf_init(dev, &tf);
1367
1368 tf.command = cmd;
1369 tf.flags |= ATA_TFLAG_DEVICE;
1370 tf.protocol = ATA_PROT_NODATA;
1371
977e6b9f 1372 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
e58eb583
TH
1373}
1374
1bc4ccff
AC
1375/**
1376 * ata_pio_need_iordy - check if iordy needed
1377 * @adev: ATA device
1378 *
1379 * Check if the current speed of the device requires IORDY. Used
1380 * by various controllers for chip configuration.
1381 */
1382
1383unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1384{
1385 int pio;
1386 int speed = adev->pio_mode - XFER_PIO_0;
1387
1388 if (speed < 2)
1389 return 0;
1390 if (speed > 2)
1391 return 1;
2e9edbf8 1392
1bc4ccff
AC
1393 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1394
1395 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1396 pio = adev->id[ATA_ID_EIDE_PIO];
1397 /* Is the speed faster than the drive allows non IORDY ? */
1398 if (pio) {
1399 /* This is cycle times not frequency - watch the logic! */
1400 if (pio > 240) /* PIO2 is 240nS per cycle */
1401 return 1;
1402 return 0;
1403 }
1404 }
1405 return 0;
1406}
1407
1da177e4 1408/**
49016aca 1409 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1410 * @dev: target device
1411 * @p_class: pointer to class of the target device (may be changed)
bff04647 1412 * @flags: ATA_READID_* flags
fe635c7e 1413 * @id: buffer to read IDENTIFY data into
1da177e4 1414 *
49016aca
TH
1415 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1416 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1417 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1418 * for pre-ATA4 drives.
1da177e4
LT
1419 *
1420 * LOCKING:
49016aca
TH
1421 * Kernel thread context (may sleep)
1422 *
1423 * RETURNS:
1424 * 0 on success, -errno otherwise.
1da177e4 1425 */
a9beec95 1426int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1427 unsigned int flags, u16 *id)
1da177e4 1428{
3373efd8 1429 struct ata_port *ap = dev->ap;
49016aca 1430 unsigned int class = *p_class;
a0123703 1431 struct ata_taskfile tf;
49016aca
TH
1432 unsigned int err_mask = 0;
1433 const char *reason;
1434 int rc;
1da177e4 1435
0dd4b21f 1436 if (ata_msg_ctl(ap))
44877b4e 1437 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1438
49016aca 1439 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1da177e4 1440
49016aca 1441 retry:
3373efd8 1442 ata_tf_init(dev, &tf);
a0123703 1443
49016aca
TH
1444 switch (class) {
1445 case ATA_DEV_ATA:
a0123703 1446 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1447 break;
1448 case ATA_DEV_ATAPI:
a0123703 1449 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1450 break;
1451 default:
1452 rc = -ENODEV;
1453 reason = "unsupported class";
1454 goto err_out;
1da177e4
LT
1455 }
1456
a0123703 1457 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1458
1459 /* Some devices choke if TF registers contain garbage. Make
1460 * sure those are properly initialized.
1461 */
1462 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1463
1464 /* Device presence detection is unreliable on some
1465 * controllers. Always poll IDENTIFY if available.
1466 */
1467 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 1468
3373efd8 1469 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
49016aca 1470 id, sizeof(id[0]) * ATA_ID_WORDS);
a0123703 1471 if (err_mask) {
800b3996 1472 if (err_mask & AC_ERR_NODEV_HINT) {
55a8e2c8 1473 DPRINTK("ata%u.%d: NODEV after polling detection\n",
44877b4e 1474 ap->print_id, dev->devno);
55a8e2c8
TH
1475 return -ENOENT;
1476 }
1477
49016aca
TH
1478 rc = -EIO;
1479 reason = "I/O error";
1da177e4
LT
1480 goto err_out;
1481 }
1482
49016aca 1483 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1484
49016aca 1485 /* sanity check */
a4f5749b
TH
1486 rc = -EINVAL;
1487 reason = "device reports illegal type";
1488
1489 if (class == ATA_DEV_ATA) {
1490 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1491 goto err_out;
1492 } else {
1493 if (ata_id_is_ata(id))
1494 goto err_out;
49016aca
TH
1495 }
1496
bff04647 1497 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
1498 /*
1499 * The exact sequence expected by certain pre-ATA4 drives is:
1500 * SRST RESET
1501 * IDENTIFY
1502 * INITIALIZE DEVICE PARAMETERS
1503 * anything else..
1504 * Some drives were very specific about that exact sequence.
1505 */
1506 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 1507 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
1508 if (err_mask) {
1509 rc = -EIO;
1510 reason = "INIT_DEV_PARAMS failed";
1511 goto err_out;
1512 }
1513
1514 /* current CHS translation info (id[53-58]) might be
1515 * changed. reread the identify device info.
1516 */
bff04647 1517 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
1518 goto retry;
1519 }
1520 }
1521
1522 *p_class = class;
fe635c7e 1523
49016aca
TH
1524 return 0;
1525
1526 err_out:
88574551 1527 if (ata_msg_warn(ap))
0dd4b21f 1528 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 1529 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
1530 return rc;
1531}
1532
3373efd8 1533static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 1534{
3373efd8 1535 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
1536}
1537
a6e6ce8e
TH
1538static void ata_dev_config_ncq(struct ata_device *dev,
1539 char *desc, size_t desc_sz)
1540{
1541 struct ata_port *ap = dev->ap;
1542 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1543
1544 if (!ata_id_has_ncq(dev->id)) {
1545 desc[0] = '\0';
1546 return;
1547 }
6919a0a6
AC
1548 if (ata_device_blacklisted(dev) & ATA_HORKAGE_NONCQ) {
1549 snprintf(desc, desc_sz, "NCQ (not used)");
1550 return;
1551 }
a6e6ce8e 1552 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 1553 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
1554 dev->flags |= ATA_DFLAG_NCQ;
1555 }
1556
1557 if (hdepth >= ddepth)
1558 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1559 else
1560 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1561}
1562
e6d902a3
BK
1563static void ata_set_port_max_cmd_len(struct ata_port *ap)
1564{
1565 int i;
1566
cca3974e
JG
1567 if (ap->scsi_host) {
1568 unsigned int len = 0;
1569
e6d902a3 1570 for (i = 0; i < ATA_MAX_DEVICES; i++)
cca3974e
JG
1571 len = max(len, ap->device[i].cdb_len);
1572
1573 ap->scsi_host->max_cmd_len = len;
e6d902a3
BK
1574 }
1575}
1576
49016aca 1577/**
ffeae418 1578 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
1579 * @dev: Target device to configure
1580 *
1581 * Configure @dev according to @dev->id. Generic and low-level
1582 * driver specific fixups are also applied.
49016aca
TH
1583 *
1584 * LOCKING:
ffeae418
TH
1585 * Kernel thread context (may sleep)
1586 *
1587 * RETURNS:
1588 * 0 on success, -errno otherwise
49016aca 1589 */
efdaedc4 1590int ata_dev_configure(struct ata_device *dev)
49016aca 1591{
3373efd8 1592 struct ata_port *ap = dev->ap;
efdaedc4 1593 int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO;
1148c3a7 1594 const u16 *id = dev->id;
ff8854b2 1595 unsigned int xfer_mask;
b352e57d 1596 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
1597 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
1598 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 1599 int rc;
49016aca 1600
0dd4b21f 1601 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
44877b4e
TH
1602 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
1603 __FUNCTION__);
ffeae418 1604 return 0;
49016aca
TH
1605 }
1606
0dd4b21f 1607 if (ata_msg_probe(ap))
44877b4e 1608 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1609
08573a86
KCA
1610 /* set _SDD */
1611 rc = ata_acpi_push_id(ap, dev->devno);
1612 if (rc) {
1613 ata_dev_printk(dev, KERN_WARNING, "failed to set _SDD(%d)\n",
1614 rc);
1615 }
1616
1617 /* retrieve and execute the ATA task file of _GTF */
1618 ata_acpi_exec_tfs(ap);
1619
c39f5ebe 1620 /* print device capabilities */
0dd4b21f 1621 if (ata_msg_probe(ap))
88574551
TH
1622 ata_dev_printk(dev, KERN_DEBUG,
1623 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1624 "85:%04x 86:%04x 87:%04x 88:%04x\n",
0dd4b21f 1625 __FUNCTION__,
f15a1daf
TH
1626 id[49], id[82], id[83], id[84],
1627 id[85], id[86], id[87], id[88]);
c39f5ebe 1628
208a9933 1629 /* initialize to-be-configured parameters */
ea1dd4e1 1630 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
1631 dev->max_sectors = 0;
1632 dev->cdb_len = 0;
1633 dev->n_sectors = 0;
1634 dev->cylinders = 0;
1635 dev->heads = 0;
1636 dev->sectors = 0;
1637
1da177e4
LT
1638 /*
1639 * common ATA, ATAPI feature tests
1640 */
1641
ff8854b2 1642 /* find max transfer mode; for printk only */
1148c3a7 1643 xfer_mask = ata_id_xfermask(id);
1da177e4 1644
0dd4b21f
BP
1645 if (ata_msg_probe(ap))
1646 ata_dump_id(id);
1da177e4
LT
1647
1648 /* ATA-specific feature tests */
1649 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
1650 if (ata_id_is_cfa(id)) {
1651 if (id[162] & 1) /* CPRM may make this media unusable */
44877b4e
TH
1652 ata_dev_printk(dev, KERN_WARNING,
1653 "supports DRM functions and may "
1654 "not be fully accessable.\n");
b352e57d
AC
1655 snprintf(revbuf, 7, "CFA");
1656 }
1657 else
1658 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1659
1148c3a7 1660 dev->n_sectors = ata_id_n_sectors(id);
2940740b 1661
3f64f565 1662 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
591a6e8e 1663 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
3f64f565
EM
1664 sizeof(fwrevbuf));
1665
591a6e8e 1666 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
3f64f565
EM
1667 sizeof(modelbuf));
1668
1669 if (dev->id[59] & 0x100)
1670 dev->multi_count = dev->id[59] & 0xff;
1671
1148c3a7 1672 if (ata_id_has_lba(id)) {
4c2d721a 1673 const char *lba_desc;
a6e6ce8e 1674 char ncq_desc[20];
8bf62ece 1675
4c2d721a
TH
1676 lba_desc = "LBA";
1677 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 1678 if (ata_id_has_lba48(id)) {
8bf62ece 1679 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 1680 lba_desc = "LBA48";
6fc49adb
TH
1681
1682 if (dev->n_sectors >= (1UL << 28) &&
1683 ata_id_has_flush_ext(id))
1684 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 1685 }
8bf62ece 1686
a6e6ce8e
TH
1687 /* config NCQ */
1688 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1689
8bf62ece 1690 /* print device info to dmesg */
3f64f565
EM
1691 if (ata_msg_drv(ap) && print_info) {
1692 ata_dev_printk(dev, KERN_INFO,
1693 "%s: %s, %s, max %s\n",
1694 revbuf, modelbuf, fwrevbuf,
1695 ata_mode_string(xfer_mask));
1696 ata_dev_printk(dev, KERN_INFO,
1697 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 1698 (unsigned long long)dev->n_sectors,
3f64f565
EM
1699 dev->multi_count, lba_desc, ncq_desc);
1700 }
ffeae418 1701 } else {
8bf62ece
AL
1702 /* CHS */
1703
1704 /* Default translation */
1148c3a7
TH
1705 dev->cylinders = id[1];
1706 dev->heads = id[3];
1707 dev->sectors = id[6];
8bf62ece 1708
1148c3a7 1709 if (ata_id_current_chs_valid(id)) {
8bf62ece 1710 /* Current CHS translation is valid. */
1148c3a7
TH
1711 dev->cylinders = id[54];
1712 dev->heads = id[55];
1713 dev->sectors = id[56];
8bf62ece
AL
1714 }
1715
1716 /* print device info to dmesg */
3f64f565 1717 if (ata_msg_drv(ap) && print_info) {
88574551 1718 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
1719 "%s: %s, %s, max %s\n",
1720 revbuf, modelbuf, fwrevbuf,
1721 ata_mode_string(xfer_mask));
a84471fe 1722 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
1723 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
1724 (unsigned long long)dev->n_sectors,
1725 dev->multi_count, dev->cylinders,
1726 dev->heads, dev->sectors);
1727 }
07f6f7d0
AL
1728 }
1729
6e7846e9 1730 dev->cdb_len = 16;
1da177e4
LT
1731 }
1732
1733 /* ATAPI-specific feature tests */
2c13b7ce 1734 else if (dev->class == ATA_DEV_ATAPI) {
08a556db
AL
1735 char *cdb_intr_string = "";
1736
1148c3a7 1737 rc = atapi_cdb_len(id);
1da177e4 1738 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 1739 if (ata_msg_warn(ap))
88574551
TH
1740 ata_dev_printk(dev, KERN_WARNING,
1741 "unsupported CDB len\n");
ffeae418 1742 rc = -EINVAL;
1da177e4
LT
1743 goto err_out_nosup;
1744 }
6e7846e9 1745 dev->cdb_len = (unsigned int) rc;
1da177e4 1746
08a556db 1747 if (ata_id_cdb_intr(dev->id)) {
312f7da2 1748 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
1749 cdb_intr_string = ", CDB intr";
1750 }
312f7da2 1751
1da177e4 1752 /* print device info to dmesg */
5afc8142 1753 if (ata_msg_drv(ap) && print_info)
12436c30
TH
1754 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1755 ata_mode_string(xfer_mask),
1756 cdb_intr_string);
1da177e4
LT
1757 }
1758
914ed354
TH
1759 /* determine max_sectors */
1760 dev->max_sectors = ATA_MAX_SECTORS;
1761 if (dev->flags & ATA_DFLAG_LBA48)
1762 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
1763
93590859
AC
1764 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
1765 /* Let the user know. We don't want to disallow opens for
1766 rescue purposes, or in case the vendor is just a blithering
1767 idiot */
1768 if (print_info) {
1769 ata_dev_printk(dev, KERN_WARNING,
1770"Drive reports diagnostics failure. This may indicate a drive\n");
1771 ata_dev_printk(dev, KERN_WARNING,
1772"fault or invalid emulation. Contact drive vendor for information.\n");
1773 }
1774 }
1775
e6d902a3 1776 ata_set_port_max_cmd_len(ap);
6e7846e9 1777
4b2f3ede 1778 /* limit bridge transfers to udma5, 200 sectors */
3373efd8 1779 if (ata_dev_knobble(dev)) {
5afc8142 1780 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
1781 ata_dev_printk(dev, KERN_INFO,
1782 "applying bridge limits\n");
5a529139 1783 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
1784 dev->max_sectors = ATA_MAX_SECTORS;
1785 }
1786
1787 if (ap->ops->dev_config)
1788 ap->ops->dev_config(ap, dev);
1789
0dd4b21f
BP
1790 if (ata_msg_probe(ap))
1791 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
1792 __FUNCTION__, ata_chk_status(ap));
ffeae418 1793 return 0;
1da177e4
LT
1794
1795err_out_nosup:
0dd4b21f 1796 if (ata_msg_probe(ap))
88574551
TH
1797 ata_dev_printk(dev, KERN_DEBUG,
1798 "%s: EXIT, err\n", __FUNCTION__);
ffeae418 1799 return rc;
1da177e4
LT
1800}
1801
1802/**
1803 * ata_bus_probe - Reset and probe ATA bus
1804 * @ap: Bus to probe
1805 *
0cba632b
JG
1806 * Master ATA bus probing function. Initiates a hardware-dependent
1807 * bus reset, then attempts to identify any devices found on
1808 * the bus.
1809 *
1da177e4 1810 * LOCKING:
0cba632b 1811 * PCI/etc. bus probe sem.
1da177e4
LT
1812 *
1813 * RETURNS:
96072e69 1814 * Zero on success, negative errno otherwise.
1da177e4
LT
1815 */
1816
80289167 1817int ata_bus_probe(struct ata_port *ap)
1da177e4 1818{
28ca5c57 1819 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 1820 int tries[ATA_MAX_DEVICES];
4ae72a1e 1821 int i, rc;
e82cbdb9 1822 struct ata_device *dev;
1da177e4 1823
28ca5c57 1824 ata_port_probe(ap);
c19ba8af 1825
14d2bac1
TH
1826 for (i = 0; i < ATA_MAX_DEVICES; i++)
1827 tries[i] = ATA_PROBE_MAX_TRIES;
1828
1829 retry:
2044470c 1830 /* reset and determine device classes */
52783c5d 1831 ap->ops->phy_reset(ap);
2061a47a 1832
52783c5d
TH
1833 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1834 dev = &ap->device[i];
c19ba8af 1835
52783c5d
TH
1836 if (!(ap->flags & ATA_FLAG_DISABLED) &&
1837 dev->class != ATA_DEV_UNKNOWN)
1838 classes[dev->devno] = dev->class;
1839 else
1840 classes[dev->devno] = ATA_DEV_NONE;
2044470c 1841
52783c5d 1842 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 1843 }
1da177e4 1844
52783c5d 1845 ata_port_probe(ap);
2044470c 1846
b6079ca4
AC
1847 /* after the reset the device state is PIO 0 and the controller
1848 state is undefined. Record the mode */
1849
1850 for (i = 0; i < ATA_MAX_DEVICES; i++)
1851 ap->device[i].pio_mode = XFER_PIO_0;
1852
f31f0cc2
JG
1853 /* read IDENTIFY page and configure devices. We have to do the identify
1854 specific sequence bass-ackwards so that PDIAG- is released by
1855 the slave device */
1856
1857 for (i = ATA_MAX_DEVICES - 1; i >= 0; i--) {
e82cbdb9 1858 dev = &ap->device[i];
28ca5c57 1859
ec573755
TH
1860 if (tries[i])
1861 dev->class = classes[i];
ffeae418 1862
14d2bac1 1863 if (!ata_dev_enabled(dev))
ffeae418 1864 continue;
ffeae418 1865
bff04647
TH
1866 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
1867 dev->id);
14d2bac1
TH
1868 if (rc)
1869 goto fail;
f31f0cc2
JG
1870 }
1871
1872 /* After the identify sequence we can now set up the devices. We do
1873 this in the normal order so that the user doesn't get confused */
1874
1875 for(i = 0; i < ATA_MAX_DEVICES; i++) {
1876 dev = &ap->device[i];
1877 if (!ata_dev_enabled(dev))
1878 continue;
14d2bac1 1879
efdaedc4
TH
1880 ap->eh_context.i.flags |= ATA_EHI_PRINTINFO;
1881 rc = ata_dev_configure(dev);
1882 ap->eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
1883 if (rc)
1884 goto fail;
1da177e4
LT
1885 }
1886
e82cbdb9 1887 /* configure transfer mode */
3adcebb2 1888 rc = ata_set_mode(ap, &dev);
4ae72a1e 1889 if (rc)
51713d35 1890 goto fail;
1da177e4 1891
e82cbdb9
TH
1892 for (i = 0; i < ATA_MAX_DEVICES; i++)
1893 if (ata_dev_enabled(&ap->device[i]))
1894 return 0;
1da177e4 1895
e82cbdb9
TH
1896 /* no device present, disable port */
1897 ata_port_disable(ap);
1da177e4 1898 ap->ops->port_disable(ap);
96072e69 1899 return -ENODEV;
14d2bac1
TH
1900
1901 fail:
4ae72a1e
TH
1902 tries[dev->devno]--;
1903
14d2bac1
TH
1904 switch (rc) {
1905 case -EINVAL:
4ae72a1e 1906 /* eeek, something went very wrong, give up */
14d2bac1
TH
1907 tries[dev->devno] = 0;
1908 break;
4ae72a1e
TH
1909
1910 case -ENODEV:
1911 /* give it just one more chance */
1912 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 1913 case -EIO:
4ae72a1e
TH
1914 if (tries[dev->devno] == 1) {
1915 /* This is the last chance, better to slow
1916 * down than lose it.
1917 */
1918 sata_down_spd_limit(ap);
1919 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
1920 }
14d2bac1
TH
1921 }
1922
4ae72a1e 1923 if (!tries[dev->devno])
3373efd8 1924 ata_dev_disable(dev);
ec573755 1925
14d2bac1 1926 goto retry;
1da177e4
LT
1927}
1928
1929/**
0cba632b
JG
1930 * ata_port_probe - Mark port as enabled
1931 * @ap: Port for which we indicate enablement
1da177e4 1932 *
0cba632b
JG
1933 * Modify @ap data structure such that the system
1934 * thinks that the entire port is enabled.
1935 *
cca3974e 1936 * LOCKING: host lock, or some other form of
0cba632b 1937 * serialization.
1da177e4
LT
1938 */
1939
1940void ata_port_probe(struct ata_port *ap)
1941{
198e0fed 1942 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
1943}
1944
3be680b7
TH
1945/**
1946 * sata_print_link_status - Print SATA link status
1947 * @ap: SATA port to printk link status about
1948 *
1949 * This function prints link speed and status of a SATA link.
1950 *
1951 * LOCKING:
1952 * None.
1953 */
1954static void sata_print_link_status(struct ata_port *ap)
1955{
6d5f9732 1956 u32 sstatus, scontrol, tmp;
3be680b7 1957
81952c54 1958 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
3be680b7 1959 return;
81952c54 1960 sata_scr_read(ap, SCR_CONTROL, &scontrol);
3be680b7 1961
81952c54 1962 if (ata_port_online(ap)) {
3be680b7 1963 tmp = (sstatus >> 4) & 0xf;
f15a1daf
TH
1964 ata_port_printk(ap, KERN_INFO,
1965 "SATA link up %s (SStatus %X SControl %X)\n",
1966 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 1967 } else {
f15a1daf
TH
1968 ata_port_printk(ap, KERN_INFO,
1969 "SATA link down (SStatus %X SControl %X)\n",
1970 sstatus, scontrol);
3be680b7
TH
1971 }
1972}
1973
1da177e4 1974/**
780a87f7
JG
1975 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1976 * @ap: SATA port associated with target SATA PHY.
1da177e4 1977 *
780a87f7
JG
1978 * This function issues commands to standard SATA Sxxx
1979 * PHY registers, to wake up the phy (and device), and
1980 * clear any reset condition.
1da177e4
LT
1981 *
1982 * LOCKING:
0cba632b 1983 * PCI/etc. bus probe sem.
1da177e4
LT
1984 *
1985 */
1986void __sata_phy_reset(struct ata_port *ap)
1987{
1988 u32 sstatus;
1989 unsigned long timeout = jiffies + (HZ * 5);
1990
1991 if (ap->flags & ATA_FLAG_SATA_RESET) {
cdcca89e 1992 /* issue phy wake/reset */
81952c54 1993 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
62ba2841
TH
1994 /* Couldn't find anything in SATA I/II specs, but
1995 * AHCI-1.1 10.4.2 says at least 1 ms. */
1996 mdelay(1);
1da177e4 1997 }
81952c54
TH
1998 /* phy wake/clear reset */
1999 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1da177e4
LT
2000
2001 /* wait for phy to become ready, if necessary */
2002 do {
2003 msleep(200);
81952c54 2004 sata_scr_read(ap, SCR_STATUS, &sstatus);
1da177e4
LT
2005 if ((sstatus & 0xf) != 1)
2006 break;
2007 } while (time_before(jiffies, timeout));
2008
3be680b7
TH
2009 /* print link status */
2010 sata_print_link_status(ap);
656563e3 2011
3be680b7 2012 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 2013 if (!ata_port_offline(ap))
1da177e4 2014 ata_port_probe(ap);
3be680b7 2015 else
1da177e4 2016 ata_port_disable(ap);
1da177e4 2017
198e0fed 2018 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2019 return;
2020
2021 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2022 ata_port_disable(ap);
2023 return;
2024 }
2025
2026 ap->cbl = ATA_CBL_SATA;
2027}
2028
2029/**
780a87f7
JG
2030 * sata_phy_reset - Reset SATA bus.
2031 * @ap: SATA port associated with target SATA PHY.
1da177e4 2032 *
780a87f7
JG
2033 * This function resets the SATA bus, and then probes
2034 * the bus for devices.
1da177e4
LT
2035 *
2036 * LOCKING:
0cba632b 2037 * PCI/etc. bus probe sem.
1da177e4
LT
2038 *
2039 */
2040void sata_phy_reset(struct ata_port *ap)
2041{
2042 __sata_phy_reset(ap);
198e0fed 2043 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2044 return;
2045 ata_bus_reset(ap);
2046}
2047
ebdfca6e
AC
2048/**
2049 * ata_dev_pair - return other device on cable
ebdfca6e
AC
2050 * @adev: device
2051 *
2052 * Obtain the other device on the same cable, or if none is
2053 * present NULL is returned
2054 */
2e9edbf8 2055
3373efd8 2056struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2057{
3373efd8 2058 struct ata_port *ap = adev->ap;
ebdfca6e 2059 struct ata_device *pair = &ap->device[1 - adev->devno];
e1211e3f 2060 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2061 return NULL;
2062 return pair;
2063}
2064
1da177e4 2065/**
780a87f7
JG
2066 * ata_port_disable - Disable port.
2067 * @ap: Port to be disabled.
1da177e4 2068 *
780a87f7
JG
2069 * Modify @ap data structure such that the system
2070 * thinks that the entire port is disabled, and should
2071 * never attempt to probe or communicate with devices
2072 * on this port.
2073 *
cca3974e 2074 * LOCKING: host lock, or some other form of
780a87f7 2075 * serialization.
1da177e4
LT
2076 */
2077
2078void ata_port_disable(struct ata_port *ap)
2079{
2080 ap->device[0].class = ATA_DEV_NONE;
2081 ap->device[1].class = ATA_DEV_NONE;
198e0fed 2082 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2083}
2084
1c3fae4d 2085/**
3c567b7d 2086 * sata_down_spd_limit - adjust SATA spd limit downward
1c3fae4d
TH
2087 * @ap: Port to adjust SATA spd limit for
2088 *
2089 * Adjust SATA spd limit of @ap downward. Note that this
2090 * function only adjusts the limit. The change must be applied
3c567b7d 2091 * using sata_set_spd().
1c3fae4d
TH
2092 *
2093 * LOCKING:
2094 * Inherited from caller.
2095 *
2096 * RETURNS:
2097 * 0 on success, negative errno on failure
2098 */
3c567b7d 2099int sata_down_spd_limit(struct ata_port *ap)
1c3fae4d 2100{
81952c54
TH
2101 u32 sstatus, spd, mask;
2102 int rc, highbit;
1c3fae4d 2103
81952c54
TH
2104 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
2105 if (rc)
2106 return rc;
1c3fae4d
TH
2107
2108 mask = ap->sata_spd_limit;
2109 if (mask <= 1)
2110 return -EINVAL;
2111 highbit = fls(mask) - 1;
2112 mask &= ~(1 << highbit);
2113
81952c54 2114 spd = (sstatus >> 4) & 0xf;
1c3fae4d
TH
2115 if (spd <= 1)
2116 return -EINVAL;
2117 spd--;
2118 mask &= (1 << spd) - 1;
2119 if (!mask)
2120 return -EINVAL;
2121
2122 ap->sata_spd_limit = mask;
2123
f15a1daf
TH
2124 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
2125 sata_spd_string(fls(mask)));
1c3fae4d
TH
2126
2127 return 0;
2128}
2129
3c567b7d 2130static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
1c3fae4d
TH
2131{
2132 u32 spd, limit;
2133
2134 if (ap->sata_spd_limit == UINT_MAX)
2135 limit = 0;
2136 else
2137 limit = fls(ap->sata_spd_limit);
2138
2139 spd = (*scontrol >> 4) & 0xf;
2140 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2141
2142 return spd != limit;
2143}
2144
2145/**
3c567b7d 2146 * sata_set_spd_needed - is SATA spd configuration needed
1c3fae4d
TH
2147 * @ap: Port in question
2148 *
2149 * Test whether the spd limit in SControl matches
2150 * @ap->sata_spd_limit. This function is used to determine
2151 * whether hardreset is necessary to apply SATA spd
2152 * configuration.
2153 *
2154 * LOCKING:
2155 * Inherited from caller.
2156 *
2157 * RETURNS:
2158 * 1 if SATA spd configuration is needed, 0 otherwise.
2159 */
3c567b7d 2160int sata_set_spd_needed(struct ata_port *ap)
1c3fae4d
TH
2161{
2162 u32 scontrol;
2163
81952c54 2164 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
1c3fae4d
TH
2165 return 0;
2166
3c567b7d 2167 return __sata_set_spd_needed(ap, &scontrol);
1c3fae4d
TH
2168}
2169
2170/**
3c567b7d 2171 * sata_set_spd - set SATA spd according to spd limit
1c3fae4d
TH
2172 * @ap: Port to set SATA spd for
2173 *
2174 * Set SATA spd of @ap according to sata_spd_limit.
2175 *
2176 * LOCKING:
2177 * Inherited from caller.
2178 *
2179 * RETURNS:
2180 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2181 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2182 */
3c567b7d 2183int sata_set_spd(struct ata_port *ap)
1c3fae4d
TH
2184{
2185 u32 scontrol;
81952c54 2186 int rc;
1c3fae4d 2187
81952c54
TH
2188 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2189 return rc;
1c3fae4d 2190
3c567b7d 2191 if (!__sata_set_spd_needed(ap, &scontrol))
1c3fae4d
TH
2192 return 0;
2193
81952c54
TH
2194 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2195 return rc;
2196
1c3fae4d
TH
2197 return 1;
2198}
2199
452503f9
AC
2200/*
2201 * This mode timing computation functionality is ported over from
2202 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2203 */
2204/*
b352e57d 2205 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2206 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2207 * for UDMA6, which is currently supported only by Maxtor drives.
2208 *
2209 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2210 */
2211
2212static const struct ata_timing ata_timing[] = {
2213
2214 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2215 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2216 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2217 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2218
b352e57d
AC
2219 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2220 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
452503f9
AC
2221 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2222 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2223 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2224
2225/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2e9edbf8 2226
452503f9
AC
2227 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2228 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2229 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2e9edbf8 2230
452503f9
AC
2231 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2232 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2233 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2234
b352e57d
AC
2235 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2236 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
452503f9
AC
2237 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2238 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2239
2240 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2241 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2242 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2243
2244/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2245
2246 { 0xFF }
2247};
2248
2249#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2250#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2251
2252static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2253{
2254 q->setup = EZ(t->setup * 1000, T);
2255 q->act8b = EZ(t->act8b * 1000, T);
2256 q->rec8b = EZ(t->rec8b * 1000, T);
2257 q->cyc8b = EZ(t->cyc8b * 1000, T);
2258 q->active = EZ(t->active * 1000, T);
2259 q->recover = EZ(t->recover * 1000, T);
2260 q->cycle = EZ(t->cycle * 1000, T);
2261 q->udma = EZ(t->udma * 1000, UT);
2262}
2263
2264void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2265 struct ata_timing *m, unsigned int what)
2266{
2267 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2268 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2269 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2270 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2271 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2272 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2273 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2274 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2275}
2276
2277static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2278{
2279 const struct ata_timing *t;
2280
2281 for (t = ata_timing; t->mode != speed; t++)
91190758 2282 if (t->mode == 0xFF)
452503f9 2283 return NULL;
2e9edbf8 2284 return t;
452503f9
AC
2285}
2286
2287int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2288 struct ata_timing *t, int T, int UT)
2289{
2290 const struct ata_timing *s;
2291 struct ata_timing p;
2292
2293 /*
2e9edbf8 2294 * Find the mode.
75b1f2f8 2295 */
452503f9
AC
2296
2297 if (!(s = ata_timing_find_mode(speed)))
2298 return -EINVAL;
2299
75b1f2f8
AL
2300 memcpy(t, s, sizeof(*s));
2301
452503f9
AC
2302 /*
2303 * If the drive is an EIDE drive, it can tell us it needs extended
2304 * PIO/MW_DMA cycle timing.
2305 */
2306
2307 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2308 memset(&p, 0, sizeof(p));
2309 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2310 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2311 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2312 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2313 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2314 }
2315 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2316 }
2317
2318 /*
2319 * Convert the timing to bus clock counts.
2320 */
2321
75b1f2f8 2322 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2323
2324 /*
c893a3ae
RD
2325 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2326 * S.M.A.R.T * and some other commands. We have to ensure that the
2327 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2328 */
2329
fd3367af 2330 if (speed > XFER_PIO_6) {
452503f9
AC
2331 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2332 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2333 }
2334
2335 /*
c893a3ae 2336 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2337 */
2338
2339 if (t->act8b + t->rec8b < t->cyc8b) {
2340 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2341 t->rec8b = t->cyc8b - t->act8b;
2342 }
2343
2344 if (t->active + t->recover < t->cycle) {
2345 t->active += (t->cycle - (t->active + t->recover)) / 2;
2346 t->recover = t->cycle - t->active;
2347 }
2348
2349 return 0;
2350}
2351
cf176e1a
TH
2352/**
2353 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 2354 * @dev: Device to adjust xfer masks
458337db 2355 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
2356 *
2357 * Adjust xfer masks of @dev downward. Note that this function
2358 * does not apply the change. Invoking ata_set_mode() afterwards
2359 * will apply the limit.
2360 *
2361 * LOCKING:
2362 * Inherited from caller.
2363 *
2364 * RETURNS:
2365 * 0 on success, negative errno on failure
2366 */
458337db 2367int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 2368{
458337db
TH
2369 char buf[32];
2370 unsigned int orig_mask, xfer_mask;
2371 unsigned int pio_mask, mwdma_mask, udma_mask;
2372 int quiet, highbit;
cf176e1a 2373
458337db
TH
2374 quiet = !!(sel & ATA_DNXFER_QUIET);
2375 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 2376
458337db
TH
2377 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2378 dev->mwdma_mask,
2379 dev->udma_mask);
2380 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 2381
458337db
TH
2382 switch (sel) {
2383 case ATA_DNXFER_PIO:
2384 highbit = fls(pio_mask) - 1;
2385 pio_mask &= ~(1 << highbit);
2386 break;
2387
2388 case ATA_DNXFER_DMA:
2389 if (udma_mask) {
2390 highbit = fls(udma_mask) - 1;
2391 udma_mask &= ~(1 << highbit);
2392 if (!udma_mask)
2393 return -ENOENT;
2394 } else if (mwdma_mask) {
2395 highbit = fls(mwdma_mask) - 1;
2396 mwdma_mask &= ~(1 << highbit);
2397 if (!mwdma_mask)
2398 return -ENOENT;
2399 }
2400 break;
2401
2402 case ATA_DNXFER_40C:
2403 udma_mask &= ATA_UDMA_MASK_40C;
2404 break;
2405
2406 case ATA_DNXFER_FORCE_PIO0:
2407 pio_mask &= 1;
2408 case ATA_DNXFER_FORCE_PIO:
2409 mwdma_mask = 0;
2410 udma_mask = 0;
2411 break;
2412
458337db
TH
2413 default:
2414 BUG();
2415 }
2416
2417 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2418
2419 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2420 return -ENOENT;
2421
2422 if (!quiet) {
2423 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2424 snprintf(buf, sizeof(buf), "%s:%s",
2425 ata_mode_string(xfer_mask),
2426 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2427 else
2428 snprintf(buf, sizeof(buf), "%s",
2429 ata_mode_string(xfer_mask));
2430
2431 ata_dev_printk(dev, KERN_WARNING,
2432 "limiting speed to %s\n", buf);
2433 }
cf176e1a
TH
2434
2435 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2436 &dev->udma_mask);
2437
cf176e1a 2438 return 0;
cf176e1a
TH
2439}
2440
3373efd8 2441static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 2442{
baa1e78a 2443 struct ata_eh_context *ehc = &dev->ap->eh_context;
83206a29
TH
2444 unsigned int err_mask;
2445 int rc;
1da177e4 2446
e8384607 2447 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
2448 if (dev->xfer_shift == ATA_SHIFT_PIO)
2449 dev->flags |= ATA_DFLAG_PIO;
2450
3373efd8 2451 err_mask = ata_dev_set_xfermode(dev);
11750a40
AC
2452 /* Old CFA may refuse this command, which is just fine */
2453 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2454 err_mask &= ~AC_ERR_DEV;
2455
83206a29 2456 if (err_mask) {
f15a1daf
TH
2457 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2458 "(err_mask=0x%x)\n", err_mask);
83206a29
TH
2459 return -EIO;
2460 }
1da177e4 2461
baa1e78a 2462 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3373efd8 2463 rc = ata_dev_revalidate(dev, 0);
baa1e78a 2464 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
5eb45c02 2465 if (rc)
83206a29 2466 return rc;
48a8a14f 2467
23e71c3d
TH
2468 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2469 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 2470
f15a1daf
TH
2471 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2472 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
83206a29 2473 return 0;
1da177e4
LT
2474}
2475
1da177e4
LT
2476/**
2477 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2478 * @ap: port on which timings will be programmed
e82cbdb9 2479 * @r_failed_dev: out paramter for failed device
1da177e4 2480 *
e82cbdb9
TH
2481 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2482 * ata_set_mode() fails, pointer to the failing device is
2483 * returned in @r_failed_dev.
780a87f7 2484 *
1da177e4 2485 * LOCKING:
0cba632b 2486 * PCI/etc. bus probe sem.
e82cbdb9
TH
2487 *
2488 * RETURNS:
2489 * 0 on success, negative errno otherwise
1da177e4 2490 */
1ad8e7f9 2491int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
1da177e4 2492{
e8e0619f 2493 struct ata_device *dev;
e82cbdb9 2494 int i, rc = 0, used_dma = 0, found = 0;
1da177e4 2495
3adcebb2 2496 /* has private set_mode? */
b229a7b0
AC
2497 if (ap->ops->set_mode)
2498 return ap->ops->set_mode(ap, r_failed_dev);
3adcebb2 2499
a6d5a51c
TH
2500 /* step 1: calculate xfer_mask */
2501 for (i = 0; i < ATA_MAX_DEVICES; i++) {
acf356b1 2502 unsigned int pio_mask, dma_mask;
a6d5a51c 2503
e8e0619f
TH
2504 dev = &ap->device[i];
2505
e1211e3f 2506 if (!ata_dev_enabled(dev))
a6d5a51c
TH
2507 continue;
2508
3373efd8 2509 ata_dev_xfermask(dev);
1da177e4 2510
acf356b1
TH
2511 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2512 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2513 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2514 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 2515
4f65977d 2516 found = 1;
5444a6f4
AC
2517 if (dev->dma_mode)
2518 used_dma = 1;
a6d5a51c 2519 }
4f65977d 2520 if (!found)
e82cbdb9 2521 goto out;
a6d5a51c
TH
2522
2523 /* step 2: always set host PIO timings */
e8e0619f
TH
2524 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2525 dev = &ap->device[i];
2526 if (!ata_dev_enabled(dev))
2527 continue;
2528
2529 if (!dev->pio_mode) {
f15a1daf 2530 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 2531 rc = -EINVAL;
e82cbdb9 2532 goto out;
e8e0619f
TH
2533 }
2534
2535 dev->xfer_mode = dev->pio_mode;
2536 dev->xfer_shift = ATA_SHIFT_PIO;
2537 if (ap->ops->set_piomode)
2538 ap->ops->set_piomode(ap, dev);
2539 }
1da177e4 2540
a6d5a51c 2541 /* step 3: set host DMA timings */
e8e0619f
TH
2542 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2543 dev = &ap->device[i];
2544
2545 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2546 continue;
2547
2548 dev->xfer_mode = dev->dma_mode;
2549 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2550 if (ap->ops->set_dmamode)
2551 ap->ops->set_dmamode(ap, dev);
2552 }
1da177e4
LT
2553
2554 /* step 4: update devices' xfer mode */
83206a29 2555 for (i = 0; i < ATA_MAX_DEVICES; i++) {
e8e0619f 2556 dev = &ap->device[i];
1da177e4 2557
18d90deb 2558 /* don't update suspended devices' xfer mode */
02670bf3 2559 if (!ata_dev_ready(dev))
83206a29
TH
2560 continue;
2561
3373efd8 2562 rc = ata_dev_set_mode(dev);
5bbc53f4 2563 if (rc)
e82cbdb9 2564 goto out;
83206a29 2565 }
1da177e4 2566
e8e0619f
TH
2567 /* Record simplex status. If we selected DMA then the other
2568 * host channels are not permitted to do so.
5444a6f4 2569 */
cca3974e 2570 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 2571 ap->host->simplex_claimed = ap;
5444a6f4 2572
e8e0619f 2573 /* step5: chip specific finalisation */
1da177e4
LT
2574 if (ap->ops->post_set_mode)
2575 ap->ops->post_set_mode(ap);
e82cbdb9
TH
2576 out:
2577 if (rc)
2578 *r_failed_dev = dev;
2579 return rc;
1da177e4
LT
2580}
2581
1fdffbce
JG
2582/**
2583 * ata_tf_to_host - issue ATA taskfile to host controller
2584 * @ap: port to which command is being issued
2585 * @tf: ATA taskfile register set
2586 *
2587 * Issues ATA taskfile register set to ATA host controller,
2588 * with proper synchronization with interrupt handler and
2589 * other threads.
2590 *
2591 * LOCKING:
cca3974e 2592 * spin_lock_irqsave(host lock)
1fdffbce
JG
2593 */
2594
2595static inline void ata_tf_to_host(struct ata_port *ap,
2596 const struct ata_taskfile *tf)
2597{
2598 ap->ops->tf_load(ap, tf);
2599 ap->ops->exec_command(ap, tf);
2600}
2601
1da177e4
LT
2602/**
2603 * ata_busy_sleep - sleep until BSY clears, or timeout
2604 * @ap: port containing status register to be polled
2605 * @tmout_pat: impatience timeout
2606 * @tmout: overall timeout
2607 *
780a87f7
JG
2608 * Sleep until ATA Status register bit BSY clears,
2609 * or a timeout occurs.
2610 *
d1adc1bb
TH
2611 * LOCKING:
2612 * Kernel thread context (may sleep).
2613 *
2614 * RETURNS:
2615 * 0 on success, -errno otherwise.
1da177e4 2616 */
d1adc1bb
TH
2617int ata_busy_sleep(struct ata_port *ap,
2618 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
2619{
2620 unsigned long timer_start, timeout;
2621 u8 status;
2622
2623 status = ata_busy_wait(ap, ATA_BUSY, 300);
2624 timer_start = jiffies;
2625 timeout = timer_start + tmout_pat;
d1adc1bb
TH
2626 while (status != 0xff && (status & ATA_BUSY) &&
2627 time_before(jiffies, timeout)) {
1da177e4
LT
2628 msleep(50);
2629 status = ata_busy_wait(ap, ATA_BUSY, 3);
2630 }
2631
d1adc1bb 2632 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 2633 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
2634 "port is slow to respond, please be patient "
2635 "(Status 0x%x)\n", status);
1da177e4
LT
2636
2637 timeout = timer_start + tmout;
d1adc1bb
TH
2638 while (status != 0xff && (status & ATA_BUSY) &&
2639 time_before(jiffies, timeout)) {
1da177e4
LT
2640 msleep(50);
2641 status = ata_chk_status(ap);
2642 }
2643
d1adc1bb
TH
2644 if (status == 0xff)
2645 return -ENODEV;
2646
1da177e4 2647 if (status & ATA_BUSY) {
f15a1daf 2648 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
2649 "(%lu secs, Status 0x%x)\n",
2650 tmout / HZ, status);
d1adc1bb 2651 return -EBUSY;
1da177e4
LT
2652 }
2653
2654 return 0;
2655}
2656
2657static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
2658{
2659 struct ata_ioports *ioaddr = &ap->ioaddr;
2660 unsigned int dev0 = devmask & (1 << 0);
2661 unsigned int dev1 = devmask & (1 << 1);
2662 unsigned long timeout;
2663
2664 /* if device 0 was found in ata_devchk, wait for its
2665 * BSY bit to clear
2666 */
2667 if (dev0)
2668 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2669
2670 /* if device 1 was found in ata_devchk, wait for
2671 * register access, then wait for BSY to clear
2672 */
2673 timeout = jiffies + ATA_TMOUT_BOOT;
2674 while (dev1) {
2675 u8 nsect, lbal;
2676
2677 ap->ops->dev_select(ap, 1);
0d5ff566
TH
2678 nsect = ioread8(ioaddr->nsect_addr);
2679 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
2680 if ((nsect == 1) && (lbal == 1))
2681 break;
2682 if (time_after(jiffies, timeout)) {
2683 dev1 = 0;
2684 break;
2685 }
2686 msleep(50); /* give drive a breather */
2687 }
2688 if (dev1)
2689 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2690
2691 /* is all this really necessary? */
2692 ap->ops->dev_select(ap, 0);
2693 if (dev1)
2694 ap->ops->dev_select(ap, 1);
2695 if (dev0)
2696 ap->ops->dev_select(ap, 0);
2697}
2698
1da177e4
LT
2699static unsigned int ata_bus_softreset(struct ata_port *ap,
2700 unsigned int devmask)
2701{
2702 struct ata_ioports *ioaddr = &ap->ioaddr;
2703
44877b4e 2704 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
1da177e4
LT
2705
2706 /* software reset. causes dev0 to be selected */
0d5ff566
TH
2707 iowrite8(ap->ctl, ioaddr->ctl_addr);
2708 udelay(20); /* FIXME: flush */
2709 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2710 udelay(20); /* FIXME: flush */
2711 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
2712
2713 /* spec mandates ">= 2ms" before checking status.
2714 * We wait 150ms, because that was the magic delay used for
2715 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2716 * between when the ATA command register is written, and then
2717 * status is checked. Because waiting for "a while" before
2718 * checking status is fine, post SRST, we perform this magic
2719 * delay here as well.
09c7ad79
AC
2720 *
2721 * Old drivers/ide uses the 2mS rule and then waits for ready
1da177e4
LT
2722 */
2723 msleep(150);
2724
2e9edbf8 2725 /* Before we perform post reset processing we want to see if
298a41ca
TH
2726 * the bus shows 0xFF because the odd clown forgets the D7
2727 * pulldown resistor.
2728 */
d1adc1bb
TH
2729 if (ata_check_status(ap) == 0xFF)
2730 return 0;
09c7ad79 2731
1da177e4
LT
2732 ata_bus_post_reset(ap, devmask);
2733
2734 return 0;
2735}
2736
2737/**
2738 * ata_bus_reset - reset host port and associated ATA channel
2739 * @ap: port to reset
2740 *
2741 * This is typically the first time we actually start issuing
2742 * commands to the ATA channel. We wait for BSY to clear, then
2743 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2744 * result. Determine what devices, if any, are on the channel
2745 * by looking at the device 0/1 error register. Look at the signature
2746 * stored in each device's taskfile registers, to determine if
2747 * the device is ATA or ATAPI.
2748 *
2749 * LOCKING:
0cba632b 2750 * PCI/etc. bus probe sem.
cca3974e 2751 * Obtains host lock.
1da177e4
LT
2752 *
2753 * SIDE EFFECTS:
198e0fed 2754 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
2755 */
2756
2757void ata_bus_reset(struct ata_port *ap)
2758{
2759 struct ata_ioports *ioaddr = &ap->ioaddr;
2760 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2761 u8 err;
aec5c3c1 2762 unsigned int dev0, dev1 = 0, devmask = 0;
1da177e4 2763
44877b4e 2764 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
1da177e4
LT
2765
2766 /* determine if device 0/1 are present */
2767 if (ap->flags & ATA_FLAG_SATA_RESET)
2768 dev0 = 1;
2769 else {
2770 dev0 = ata_devchk(ap, 0);
2771 if (slave_possible)
2772 dev1 = ata_devchk(ap, 1);
2773 }
2774
2775 if (dev0)
2776 devmask |= (1 << 0);
2777 if (dev1)
2778 devmask |= (1 << 1);
2779
2780 /* select device 0 again */
2781 ap->ops->dev_select(ap, 0);
2782
2783 /* issue bus reset */
2784 if (ap->flags & ATA_FLAG_SRST)
aec5c3c1
TH
2785 if (ata_bus_softreset(ap, devmask))
2786 goto err_out;
1da177e4
LT
2787
2788 /*
2789 * determine by signature whether we have ATA or ATAPI devices
2790 */
b4dc7623 2791 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
1da177e4 2792 if ((slave_possible) && (err != 0x81))
b4dc7623 2793 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
1da177e4
LT
2794
2795 /* re-enable interrupts */
83625006 2796 ap->ops->irq_on(ap);
1da177e4
LT
2797
2798 /* is double-select really necessary? */
2799 if (ap->device[1].class != ATA_DEV_NONE)
2800 ap->ops->dev_select(ap, 1);
2801 if (ap->device[0].class != ATA_DEV_NONE)
2802 ap->ops->dev_select(ap, 0);
2803
2804 /* if no devices were detected, disable this port */
2805 if ((ap->device[0].class == ATA_DEV_NONE) &&
2806 (ap->device[1].class == ATA_DEV_NONE))
2807 goto err_out;
2808
2809 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2810 /* set up device control for ATA_FLAG_SATA_RESET */
0d5ff566 2811 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
2812 }
2813
2814 DPRINTK("EXIT\n");
2815 return;
2816
2817err_out:
f15a1daf 2818 ata_port_printk(ap, KERN_ERR, "disabling port\n");
1da177e4
LT
2819 ap->ops->port_disable(ap);
2820
2821 DPRINTK("EXIT\n");
2822}
2823
d7bb4cc7
TH
2824/**
2825 * sata_phy_debounce - debounce SATA phy status
2826 * @ap: ATA port to debounce SATA phy status for
2827 * @params: timing parameters { interval, duratinon, timeout } in msec
2828 *
2829 * Make sure SStatus of @ap reaches stable state, determined by
2830 * holding the same value where DET is not 1 for @duration polled
2831 * every @interval, before @timeout. Timeout constraints the
2832 * beginning of the stable state. Because, after hot unplugging,
2833 * DET gets stuck at 1 on some controllers, this functions waits
2834 * until timeout then returns 0 if DET is stable at 1.
2835 *
2836 * LOCKING:
2837 * Kernel thread context (may sleep)
2838 *
2839 * RETURNS:
2840 * 0 on success, -errno on failure.
2841 */
2842int sata_phy_debounce(struct ata_port *ap, const unsigned long *params)
7a7921e8 2843{
d7bb4cc7
TH
2844 unsigned long interval_msec = params[0];
2845 unsigned long duration = params[1] * HZ / 1000;
2846 unsigned long timeout = jiffies + params[2] * HZ / 1000;
2847 unsigned long last_jiffies;
2848 u32 last, cur;
2849 int rc;
2850
2851 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2852 return rc;
2853 cur &= 0xf;
2854
2855 last = cur;
2856 last_jiffies = jiffies;
2857
2858 while (1) {
2859 msleep(interval_msec);
2860 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2861 return rc;
2862 cur &= 0xf;
2863
2864 /* DET stable? */
2865 if (cur == last) {
2866 if (cur == 1 && time_before(jiffies, timeout))
2867 continue;
2868 if (time_after(jiffies, last_jiffies + duration))
2869 return 0;
2870 continue;
2871 }
2872
2873 /* unstable, start over */
2874 last = cur;
2875 last_jiffies = jiffies;
2876
2877 /* check timeout */
2878 if (time_after(jiffies, timeout))
2879 return -EBUSY;
2880 }
2881}
2882
2883/**
2884 * sata_phy_resume - resume SATA phy
2885 * @ap: ATA port to resume SATA phy for
2886 * @params: timing parameters { interval, duratinon, timeout } in msec
2887 *
2888 * Resume SATA phy of @ap and debounce it.
2889 *
2890 * LOCKING:
2891 * Kernel thread context (may sleep)
2892 *
2893 * RETURNS:
2894 * 0 on success, -errno on failure.
2895 */
2896int sata_phy_resume(struct ata_port *ap, const unsigned long *params)
2897{
2898 u32 scontrol;
81952c54
TH
2899 int rc;
2900
2901 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2902 return rc;
7a7921e8 2903
852ee16a 2904 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54
TH
2905
2906 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2907 return rc;
7a7921e8 2908
d7bb4cc7
TH
2909 /* Some PHYs react badly if SStatus is pounded immediately
2910 * after resuming. Delay 200ms before debouncing.
2911 */
2912 msleep(200);
7a7921e8 2913
d7bb4cc7 2914 return sata_phy_debounce(ap, params);
7a7921e8
TH
2915}
2916
f5914a46
TH
2917static void ata_wait_spinup(struct ata_port *ap)
2918{
2919 struct ata_eh_context *ehc = &ap->eh_context;
2920 unsigned long end, secs;
2921 int rc;
2922
2923 /* first, debounce phy if SATA */
2924 if (ap->cbl == ATA_CBL_SATA) {
e9c83914 2925 rc = sata_phy_debounce(ap, sata_deb_timing_hotplug);
f5914a46
TH
2926
2927 /* if debounced successfully and offline, no need to wait */
2928 if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap))
2929 return;
2930 }
2931
2932 /* okay, let's give the drive time to spin up */
2933 end = ehc->i.hotplug_timestamp + ATA_SPINUP_WAIT * HZ / 1000;
2934 secs = ((end - jiffies) + HZ - 1) / HZ;
2935
2936 if (time_after(jiffies, end))
2937 return;
2938
2939 if (secs > 5)
2940 ata_port_printk(ap, KERN_INFO, "waiting for device to spin up "
2941 "(%lu secs)\n", secs);
2942
2943 schedule_timeout_uninterruptible(end - jiffies);
2944}
2945
2946/**
2947 * ata_std_prereset - prepare for reset
2948 * @ap: ATA port to be reset
2949 *
2950 * @ap is about to be reset. Initialize it.
2951 *
2952 * LOCKING:
2953 * Kernel thread context (may sleep)
2954 *
2955 * RETURNS:
2956 * 0 on success, -errno otherwise.
2957 */
2958int ata_std_prereset(struct ata_port *ap)
2959{
2960 struct ata_eh_context *ehc = &ap->eh_context;
e9c83914 2961 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
2962 int rc;
2963
28324304
TH
2964 /* handle link resume & hotplug spinup */
2965 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
2966 (ap->flags & ATA_FLAG_HRST_TO_RESUME))
2967 ehc->i.action |= ATA_EH_HARDRESET;
2968
2969 if ((ehc->i.flags & ATA_EHI_HOTPLUGGED) &&
2970 (ap->flags & ATA_FLAG_SKIP_D2H_BSY))
2971 ata_wait_spinup(ap);
f5914a46
TH
2972
2973 /* if we're about to do hardreset, nothing more to do */
2974 if (ehc->i.action & ATA_EH_HARDRESET)
2975 return 0;
2976
2977 /* if SATA, resume phy */
2978 if (ap->cbl == ATA_CBL_SATA) {
f5914a46
TH
2979 rc = sata_phy_resume(ap, timing);
2980 if (rc && rc != -EOPNOTSUPP) {
2981 /* phy resume failed */
2982 ata_port_printk(ap, KERN_WARNING, "failed to resume "
2983 "link for reset (errno=%d)\n", rc);
2984 return rc;
2985 }
2986 }
2987
2988 /* Wait for !BSY if the controller can wait for the first D2H
2989 * Reg FIS and we don't know that no device is attached.
2990 */
2991 if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap))
2992 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2993
2994 return 0;
2995}
2996
c2bd5804
TH
2997/**
2998 * ata_std_softreset - reset host port via ATA SRST
2999 * @ap: port to reset
c2bd5804
TH
3000 * @classes: resulting classes of attached devices
3001 *
52783c5d 3002 * Reset host port using ATA SRST.
c2bd5804
TH
3003 *
3004 * LOCKING:
3005 * Kernel thread context (may sleep)
3006 *
3007 * RETURNS:
3008 * 0 on success, -errno otherwise.
3009 */
2bf2cb26 3010int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
c2bd5804
TH
3011{
3012 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3013 unsigned int devmask = 0, err_mask;
3014 u8 err;
3015
3016 DPRINTK("ENTER\n");
3017
81952c54 3018 if (ata_port_offline(ap)) {
3a39746a
TH
3019 classes[0] = ATA_DEV_NONE;
3020 goto out;
3021 }
3022
c2bd5804
TH
3023 /* determine if device 0/1 are present */
3024 if (ata_devchk(ap, 0))
3025 devmask |= (1 << 0);
3026 if (slave_possible && ata_devchk(ap, 1))
3027 devmask |= (1 << 1);
3028
c2bd5804
TH
3029 /* select device 0 again */
3030 ap->ops->dev_select(ap, 0);
3031
3032 /* issue bus reset */
3033 DPRINTK("about to softreset, devmask=%x\n", devmask);
3034 err_mask = ata_bus_softreset(ap, devmask);
3035 if (err_mask) {
f15a1daf
TH
3036 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
3037 err_mask);
c2bd5804
TH
3038 return -EIO;
3039 }
3040
3041 /* determine by signature whether we have ATA or ATAPI devices */
3042 classes[0] = ata_dev_try_classify(ap, 0, &err);
3043 if (slave_possible && err != 0x81)
3044 classes[1] = ata_dev_try_classify(ap, 1, &err);
3045
3a39746a 3046 out:
c2bd5804
TH
3047 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3048 return 0;
3049}
3050
3051/**
b6103f6d 3052 * sata_port_hardreset - reset port via SATA phy reset
c2bd5804 3053 * @ap: port to reset
b6103f6d 3054 * @timing: timing parameters { interval, duratinon, timeout } in msec
c2bd5804
TH
3055 *
3056 * SATA phy-reset host port using DET bits of SControl register.
c2bd5804
TH
3057 *
3058 * LOCKING:
3059 * Kernel thread context (may sleep)
3060 *
3061 * RETURNS:
3062 * 0 on success, -errno otherwise.
3063 */
b6103f6d 3064int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing)
c2bd5804 3065{
852ee16a 3066 u32 scontrol;
81952c54 3067 int rc;
852ee16a 3068
c2bd5804
TH
3069 DPRINTK("ENTER\n");
3070
3c567b7d 3071 if (sata_set_spd_needed(ap)) {
1c3fae4d
TH
3072 /* SATA spec says nothing about how to reconfigure
3073 * spd. To be on the safe side, turn off phy during
3074 * reconfiguration. This works for at least ICH7 AHCI
3075 * and Sil3124.
3076 */
81952c54 3077 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
b6103f6d 3078 goto out;
81952c54 3079
a34b6fc0 3080 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54
TH
3081
3082 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
b6103f6d 3083 goto out;
1c3fae4d 3084
3c567b7d 3085 sata_set_spd(ap);
1c3fae4d
TH
3086 }
3087
3088 /* issue phy wake/reset */
81952c54 3089 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
b6103f6d 3090 goto out;
81952c54 3091
852ee16a 3092 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54
TH
3093
3094 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
b6103f6d 3095 goto out;
c2bd5804 3096
1c3fae4d 3097 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
3098 * 10.4.2 says at least 1 ms.
3099 */
3100 msleep(1);
3101
1c3fae4d 3102 /* bring phy back */
b6103f6d
TH
3103 rc = sata_phy_resume(ap, timing);
3104 out:
3105 DPRINTK("EXIT, rc=%d\n", rc);
3106 return rc;
3107}
3108
3109/**
3110 * sata_std_hardreset - reset host port via SATA phy reset
3111 * @ap: port to reset
3112 * @class: resulting class of attached device
3113 *
3114 * SATA phy-reset host port using DET bits of SControl register,
3115 * wait for !BSY and classify the attached device.
3116 *
3117 * LOCKING:
3118 * Kernel thread context (may sleep)
3119 *
3120 * RETURNS:
3121 * 0 on success, -errno otherwise.
3122 */
3123int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
3124{
3125 const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context);
3126 int rc;
3127
3128 DPRINTK("ENTER\n");
3129
3130 /* do hardreset */
3131 rc = sata_port_hardreset(ap, timing);
3132 if (rc) {
3133 ata_port_printk(ap, KERN_ERR,
3134 "COMRESET failed (errno=%d)\n", rc);
3135 return rc;
3136 }
c2bd5804 3137
c2bd5804 3138 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 3139 if (ata_port_offline(ap)) {
c2bd5804
TH
3140 *class = ATA_DEV_NONE;
3141 DPRINTK("EXIT, link offline\n");
3142 return 0;
3143 }
3144
34fee227
TH
3145 /* wait a while before checking status, see SRST for more info */
3146 msleep(150);
3147
c2bd5804 3148 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
f15a1daf
TH
3149 ata_port_printk(ap, KERN_ERR,
3150 "COMRESET failed (device not ready)\n");
c2bd5804
TH
3151 return -EIO;
3152 }
3153
3a39746a
TH
3154 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3155
c2bd5804
TH
3156 *class = ata_dev_try_classify(ap, 0, NULL);
3157
3158 DPRINTK("EXIT, class=%u\n", *class);
3159 return 0;
3160}
3161
3162/**
3163 * ata_std_postreset - standard postreset callback
3164 * @ap: the target ata_port
3165 * @classes: classes of attached devices
3166 *
3167 * This function is invoked after a successful reset. Note that
3168 * the device might have been reset more than once using
3169 * different reset methods before postreset is invoked.
c2bd5804 3170 *
c2bd5804
TH
3171 * LOCKING:
3172 * Kernel thread context (may sleep)
3173 */
3174void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
3175{
dc2b3515
TH
3176 u32 serror;
3177
c2bd5804
TH
3178 DPRINTK("ENTER\n");
3179
c2bd5804 3180 /* print link status */
81952c54 3181 sata_print_link_status(ap);
c2bd5804 3182
dc2b3515
TH
3183 /* clear SError */
3184 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
3185 sata_scr_write(ap, SCR_ERROR, serror);
3186
3a39746a 3187 /* re-enable interrupts */
83625006
AI
3188 if (!ap->ops->error_handler)
3189 ap->ops->irq_on(ap);
c2bd5804
TH
3190
3191 /* is double-select really necessary? */
3192 if (classes[0] != ATA_DEV_NONE)
3193 ap->ops->dev_select(ap, 1);
3194 if (classes[1] != ATA_DEV_NONE)
3195 ap->ops->dev_select(ap, 0);
3196
3a39746a
TH
3197 /* bail out if no device is present */
3198 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3199 DPRINTK("EXIT, no device\n");
3200 return;
3201 }
3202
3203 /* set up device control */
0d5ff566
TH
3204 if (ap->ioaddr.ctl_addr)
3205 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
c2bd5804
TH
3206
3207 DPRINTK("EXIT\n");
3208}
3209
623a3128
TH
3210/**
3211 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3212 * @dev: device to compare against
3213 * @new_class: class of the new device
3214 * @new_id: IDENTIFY page of the new device
3215 *
3216 * Compare @new_class and @new_id against @dev and determine
3217 * whether @dev is the device indicated by @new_class and
3218 * @new_id.
3219 *
3220 * LOCKING:
3221 * None.
3222 *
3223 * RETURNS:
3224 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3225 */
3373efd8
TH
3226static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3227 const u16 *new_id)
623a3128
TH
3228{
3229 const u16 *old_id = dev->id;
a0cf733b
TH
3230 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3231 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
3232 u64 new_n_sectors;
3233
3234 if (dev->class != new_class) {
f15a1daf
TH
3235 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3236 dev->class, new_class);
623a3128
TH
3237 return 0;
3238 }
3239
a0cf733b
TH
3240 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3241 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3242 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3243 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
3244 new_n_sectors = ata_id_n_sectors(new_id);
3245
3246 if (strcmp(model[0], model[1])) {
f15a1daf
TH
3247 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3248 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
3249 return 0;
3250 }
3251
3252 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
3253 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3254 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
3255 return 0;
3256 }
3257
3258 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
f15a1daf
TH
3259 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3260 "%llu != %llu\n",
3261 (unsigned long long)dev->n_sectors,
3262 (unsigned long long)new_n_sectors);
623a3128
TH
3263 return 0;
3264 }
3265
3266 return 1;
3267}
3268
3269/**
3270 * ata_dev_revalidate - Revalidate ATA device
623a3128 3271 * @dev: device to revalidate
bff04647 3272 * @readid_flags: read ID flags
623a3128
TH
3273 *
3274 * Re-read IDENTIFY page and make sure @dev is still attached to
3275 * the port.
3276 *
3277 * LOCKING:
3278 * Kernel thread context (may sleep)
3279 *
3280 * RETURNS:
3281 * 0 on success, negative errno otherwise
3282 */
bff04647 3283int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
623a3128 3284{
5eb45c02 3285 unsigned int class = dev->class;
f15a1daf 3286 u16 *id = (void *)dev->ap->sector_buf;
623a3128
TH
3287 int rc;
3288
5eb45c02
TH
3289 if (!ata_dev_enabled(dev)) {
3290 rc = -ENODEV;
3291 goto fail;
3292 }
623a3128 3293
fe635c7e 3294 /* read ID data */
bff04647 3295 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128
TH
3296 if (rc)
3297 goto fail;
3298
3299 /* is the device still there? */
3373efd8 3300 if (!ata_dev_same_device(dev, class, id)) {
623a3128
TH
3301 rc = -ENODEV;
3302 goto fail;
3303 }
3304
fe635c7e 3305 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
623a3128
TH
3306
3307 /* configure device according to the new ID */
efdaedc4 3308 rc = ata_dev_configure(dev);
5eb45c02
TH
3309 if (rc == 0)
3310 return 0;
623a3128
TH
3311
3312 fail:
f15a1daf 3313 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
3314 return rc;
3315}
3316
6919a0a6
AC
3317struct ata_blacklist_entry {
3318 const char *model_num;
3319 const char *model_rev;
3320 unsigned long horkage;
3321};
3322
3323static const struct ata_blacklist_entry ata_device_blacklist [] = {
3324 /* Devices with DMA related problems under Linux */
3325 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3326 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3327 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3328 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3329 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3330 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3331 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3332 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3333 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3334 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3335 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3336 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3337 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3338 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3339 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3340 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3341 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3342 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3343 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3344 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3345 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3346 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3347 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3348 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3349 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3350 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
3351 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3352 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3353 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
3354
3355 /* Devices we expect to fail diagnostics */
3356
3357 /* Devices where NCQ should be avoided */
3358 /* NCQ is slow */
3359 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
09125ea6
TH
3360 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3361 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30
PR
3362 /* NCQ is broken */
3363 { "Maxtor 6L250S0", "BANC1G10", ATA_HORKAGE_NONCQ },
96442925
JA
3364 /* NCQ hard hangs device under heavier load, needs hard power cycle */
3365 { "Maxtor 6B250S0", "BANC1B70", ATA_HORKAGE_NONCQ },
6919a0a6
AC
3366
3367 /* Devices with NCQ limits */
3368
3369 /* End Marker */
3370 { }
1da177e4 3371};
2e9edbf8 3372
6919a0a6 3373unsigned long ata_device_blacklisted(const struct ata_device *dev)
1da177e4 3374{
8bfa79fc
TH
3375 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3376 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 3377 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 3378
8bfa79fc
TH
3379 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3380 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 3381
6919a0a6 3382 while (ad->model_num) {
8bfa79fc 3383 if (!strcmp(ad->model_num, model_num)) {
6919a0a6
AC
3384 if (ad->model_rev == NULL)
3385 return ad->horkage;
8bfa79fc 3386 if (!strcmp(ad->model_rev, model_rev))
6919a0a6 3387 return ad->horkage;
f4b15fef 3388 }
6919a0a6 3389 ad++;
f4b15fef 3390 }
1da177e4
LT
3391 return 0;
3392}
3393
6919a0a6
AC
3394static int ata_dma_blacklisted(const struct ata_device *dev)
3395{
3396 /* We don't support polling DMA.
3397 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3398 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3399 */
3400 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
3401 (dev->flags & ATA_DFLAG_CDB_INTR))
3402 return 1;
3403 return (ata_device_blacklisted(dev) & ATA_HORKAGE_NODMA) ? 1 : 0;
3404}
3405
a6d5a51c
TH
3406/**
3407 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
3408 * @dev: Device to compute xfermask for
3409 *
acf356b1
TH
3410 * Compute supported xfermask of @dev and store it in
3411 * dev->*_mask. This function is responsible for applying all
3412 * known limits including host controller limits, device
3413 * blacklist, etc...
a6d5a51c
TH
3414 *
3415 * LOCKING:
3416 * None.
a6d5a51c 3417 */
3373efd8 3418static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 3419{
3373efd8 3420 struct ata_port *ap = dev->ap;
cca3974e 3421 struct ata_host *host = ap->host;
a6d5a51c 3422 unsigned long xfer_mask;
1da177e4 3423
37deecb5 3424 /* controller modes available */
565083e1
TH
3425 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3426 ap->mwdma_mask, ap->udma_mask);
3427
3428 /* Apply cable rule here. Don't apply it early because when
3429 * we handle hot plug the cable type can itself change.
3430 */
3431 if (ap->cbl == ATA_CBL_PATA40)
3432 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
fc085150
AC
3433 /* Apply drive side cable rule. Unknown or 80 pin cables reported
3434 * host side are checked drive side as well. Cases where we know a
3435 * 40wire cable is used safely for 80 are not checked here.
3436 */
3437 if (ata_drive_40wire(dev->id) && (ap->cbl == ATA_CBL_PATA_UNK || ap->cbl == ATA_CBL_PATA80))
3438 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3439
1da177e4 3440
37deecb5
TH
3441 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3442 dev->mwdma_mask, dev->udma_mask);
3443 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 3444
b352e57d
AC
3445 /*
3446 * CFA Advanced TrueIDE timings are not allowed on a shared
3447 * cable
3448 */
3449 if (ata_dev_pair(dev)) {
3450 /* No PIO5 or PIO6 */
3451 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3452 /* No MWDMA3 or MWDMA 4 */
3453 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3454 }
3455
37deecb5
TH
3456 if (ata_dma_blacklisted(dev)) {
3457 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
3458 ata_dev_printk(dev, KERN_WARNING,
3459 "device is on DMA blacklist, disabling DMA\n");
37deecb5 3460 }
a6d5a51c 3461
14d66ab7
PV
3462 if ((host->flags & ATA_HOST_SIMPLEX) &&
3463 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5
TH
3464 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3465 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3466 "other device, disabling DMA\n");
5444a6f4 3467 }
565083e1 3468
5444a6f4
AC
3469 if (ap->ops->mode_filter)
3470 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
3471
565083e1
TH
3472 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3473 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
3474}
3475
1da177e4
LT
3476/**
3477 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
3478 * @dev: Device to which command will be sent
3479 *
780a87f7
JG
3480 * Issue SET FEATURES - XFER MODE command to device @dev
3481 * on port @ap.
3482 *
1da177e4 3483 * LOCKING:
0cba632b 3484 * PCI/etc. bus probe sem.
83206a29
TH
3485 *
3486 * RETURNS:
3487 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
3488 */
3489
3373efd8 3490static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 3491{
a0123703 3492 struct ata_taskfile tf;
83206a29 3493 unsigned int err_mask;
1da177e4
LT
3494
3495 /* set up set-features taskfile */
3496 DPRINTK("set features - xfer mode\n");
3497
3373efd8 3498 ata_tf_init(dev, &tf);
a0123703
TH
3499 tf.command = ATA_CMD_SET_FEATURES;
3500 tf.feature = SETFEATURES_XFER;
3501 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3502 tf.protocol = ATA_PROT_NODATA;
3503 tf.nsect = dev->xfer_mode;
1da177e4 3504
3373efd8 3505 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1da177e4 3506
83206a29
TH
3507 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3508 return err_mask;
1da177e4
LT
3509}
3510
8bf62ece
AL
3511/**
3512 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 3513 * @dev: Device to which command will be sent
e2a7f77a
RD
3514 * @heads: Number of heads (taskfile parameter)
3515 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
3516 *
3517 * LOCKING:
6aff8f1f
TH
3518 * Kernel thread context (may sleep)
3519 *
3520 * RETURNS:
3521 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 3522 */
3373efd8
TH
3523static unsigned int ata_dev_init_params(struct ata_device *dev,
3524 u16 heads, u16 sectors)
8bf62ece 3525{
a0123703 3526 struct ata_taskfile tf;
6aff8f1f 3527 unsigned int err_mask;
8bf62ece
AL
3528
3529 /* Number of sectors per track 1-255. Number of heads 1-16 */
3530 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 3531 return AC_ERR_INVALID;
8bf62ece
AL
3532
3533 /* set up init dev params taskfile */
3534 DPRINTK("init dev params \n");
3535
3373efd8 3536 ata_tf_init(dev, &tf);
a0123703
TH
3537 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3538 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3539 tf.protocol = ATA_PROT_NODATA;
3540 tf.nsect = sectors;
3541 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 3542
3373efd8 3543 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
8bf62ece 3544
6aff8f1f
TH
3545 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3546 return err_mask;
8bf62ece
AL
3547}
3548
1da177e4 3549/**
0cba632b
JG
3550 * ata_sg_clean - Unmap DMA memory associated with command
3551 * @qc: Command containing DMA memory to be released
3552 *
3553 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
3554 *
3555 * LOCKING:
cca3974e 3556 * spin_lock_irqsave(host lock)
1da177e4 3557 */
70e6ad0c 3558void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
3559{
3560 struct ata_port *ap = qc->ap;
cedc9a47 3561 struct scatterlist *sg = qc->__sg;
1da177e4 3562 int dir = qc->dma_dir;
cedc9a47 3563 void *pad_buf = NULL;
1da177e4 3564
a4631474
TH
3565 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
3566 WARN_ON(sg == NULL);
1da177e4
LT
3567
3568 if (qc->flags & ATA_QCFLAG_SINGLE)
f131883e 3569 WARN_ON(qc->n_elem > 1);
1da177e4 3570
2c13b7ce 3571 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 3572
cedc9a47
JG
3573 /* if we padded the buffer out to 32-bit bound, and data
3574 * xfer direction is from-device, we must copy from the
3575 * pad buffer back into the supplied buffer
3576 */
3577 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
3578 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3579
3580 if (qc->flags & ATA_QCFLAG_SG) {
e1410f2d 3581 if (qc->n_elem)
2f1f610b 3582 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
cedc9a47
JG
3583 /* restore last sg */
3584 sg[qc->orig_n_elem - 1].length += qc->pad_len;
3585 if (pad_buf) {
3586 struct scatterlist *psg = &qc->pad_sgent;
3587 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3588 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
dfa15988 3589 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
3590 }
3591 } else {
2e242fa9 3592 if (qc->n_elem)
2f1f610b 3593 dma_unmap_single(ap->dev,
e1410f2d
JG
3594 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
3595 dir);
cedc9a47
JG
3596 /* restore sg */
3597 sg->length += qc->pad_len;
3598 if (pad_buf)
3599 memcpy(qc->buf_virt + sg->length - qc->pad_len,
3600 pad_buf, qc->pad_len);
3601 }
1da177e4
LT
3602
3603 qc->flags &= ~ATA_QCFLAG_DMAMAP;
cedc9a47 3604 qc->__sg = NULL;
1da177e4
LT
3605}
3606
3607/**
3608 * ata_fill_sg - Fill PCI IDE PRD table
3609 * @qc: Metadata associated with taskfile to be transferred
3610 *
780a87f7
JG
3611 * Fill PCI IDE PRD (scatter-gather) table with segments
3612 * associated with the current disk command.
3613 *
1da177e4 3614 * LOCKING:
cca3974e 3615 * spin_lock_irqsave(host lock)
1da177e4
LT
3616 *
3617 */
3618static void ata_fill_sg(struct ata_queued_cmd *qc)
3619{
1da177e4 3620 struct ata_port *ap = qc->ap;
cedc9a47
JG
3621 struct scatterlist *sg;
3622 unsigned int idx;
1da177e4 3623
a4631474 3624 WARN_ON(qc->__sg == NULL);
f131883e 3625 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1da177e4
LT
3626
3627 idx = 0;
cedc9a47 3628 ata_for_each_sg(sg, qc) {
1da177e4
LT
3629 u32 addr, offset;
3630 u32 sg_len, len;
3631
3632 /* determine if physical DMA addr spans 64K boundary.
3633 * Note h/w doesn't support 64-bit, so we unconditionally
3634 * truncate dma_addr_t to u32.
3635 */
3636 addr = (u32) sg_dma_address(sg);
3637 sg_len = sg_dma_len(sg);
3638
3639 while (sg_len) {
3640 offset = addr & 0xffff;
3641 len = sg_len;
3642 if ((offset + sg_len) > 0x10000)
3643 len = 0x10000 - offset;
3644
3645 ap->prd[idx].addr = cpu_to_le32(addr);
3646 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
3647 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
3648
3649 idx++;
3650 sg_len -= len;
3651 addr += len;
3652 }
3653 }
3654
3655 if (idx)
3656 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
3657}
3658/**
3659 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
3660 * @qc: Metadata associated with taskfile to check
3661 *
780a87f7
JG
3662 * Allow low-level driver to filter ATA PACKET commands, returning
3663 * a status indicating whether or not it is OK to use DMA for the
3664 * supplied PACKET command.
3665 *
1da177e4 3666 * LOCKING:
cca3974e 3667 * spin_lock_irqsave(host lock)
0cba632b 3668 *
1da177e4
LT
3669 * RETURNS: 0 when ATAPI DMA can be used
3670 * nonzero otherwise
3671 */
3672int ata_check_atapi_dma(struct ata_queued_cmd *qc)
3673{
3674 struct ata_port *ap = qc->ap;
3675 int rc = 0; /* Assume ATAPI DMA is OK by default */
3676
3677 if (ap->ops->check_atapi_dma)
3678 rc = ap->ops->check_atapi_dma(qc);
3679
3680 return rc;
3681}
3682/**
3683 * ata_qc_prep - Prepare taskfile for submission
3684 * @qc: Metadata associated with taskfile to be prepared
3685 *
780a87f7
JG
3686 * Prepare ATA taskfile for submission.
3687 *
1da177e4 3688 * LOCKING:
cca3974e 3689 * spin_lock_irqsave(host lock)
1da177e4
LT
3690 */
3691void ata_qc_prep(struct ata_queued_cmd *qc)
3692{
3693 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
3694 return;
3695
3696 ata_fill_sg(qc);
3697}
3698
e46834cd
BK
3699void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
3700
0cba632b
JG
3701/**
3702 * ata_sg_init_one - Associate command with memory buffer
3703 * @qc: Command to be associated
3704 * @buf: Memory buffer
3705 * @buflen: Length of memory buffer, in bytes.
3706 *
3707 * Initialize the data-related elements of queued_cmd @qc
3708 * to point to a single memory buffer, @buf of byte length @buflen.
3709 *
3710 * LOCKING:
cca3974e 3711 * spin_lock_irqsave(host lock)
0cba632b
JG
3712 */
3713
1da177e4
LT
3714void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
3715{
1da177e4
LT
3716 qc->flags |= ATA_QCFLAG_SINGLE;
3717
cedc9a47 3718 qc->__sg = &qc->sgent;
1da177e4 3719 qc->n_elem = 1;
cedc9a47 3720 qc->orig_n_elem = 1;
1da177e4 3721 qc->buf_virt = buf;
233277ca 3722 qc->nbytes = buflen;
1da177e4 3723
61c0596c 3724 sg_init_one(&qc->sgent, buf, buflen);
1da177e4
LT
3725}
3726
0cba632b
JG
3727/**
3728 * ata_sg_init - Associate command with scatter-gather table.
3729 * @qc: Command to be associated
3730 * @sg: Scatter-gather table.
3731 * @n_elem: Number of elements in s/g table.
3732 *
3733 * Initialize the data-related elements of queued_cmd @qc
3734 * to point to a scatter-gather table @sg, containing @n_elem
3735 * elements.
3736 *
3737 * LOCKING:
cca3974e 3738 * spin_lock_irqsave(host lock)
0cba632b
JG
3739 */
3740
1da177e4
LT
3741void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
3742 unsigned int n_elem)
3743{
3744 qc->flags |= ATA_QCFLAG_SG;
cedc9a47 3745 qc->__sg = sg;
1da177e4 3746 qc->n_elem = n_elem;
cedc9a47 3747 qc->orig_n_elem = n_elem;
1da177e4
LT
3748}
3749
3750/**
0cba632b
JG
3751 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
3752 * @qc: Command with memory buffer to be mapped.
3753 *
3754 * DMA-map the memory buffer associated with queued_cmd @qc.
1da177e4
LT
3755 *
3756 * LOCKING:
cca3974e 3757 * spin_lock_irqsave(host lock)
1da177e4
LT
3758 *
3759 * RETURNS:
0cba632b 3760 * Zero on success, negative on error.
1da177e4
LT
3761 */
3762
3763static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3764{
3765 struct ata_port *ap = qc->ap;
3766 int dir = qc->dma_dir;
cedc9a47 3767 struct scatterlist *sg = qc->__sg;
1da177e4 3768 dma_addr_t dma_address;
2e242fa9 3769 int trim_sg = 0;
1da177e4 3770
cedc9a47
JG
3771 /* we must lengthen transfers to end on a 32-bit boundary */
3772 qc->pad_len = sg->length & 3;
3773 if (qc->pad_len) {
3774 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3775 struct scatterlist *psg = &qc->pad_sgent;
3776
a4631474 3777 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
3778
3779 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3780
3781 if (qc->tf.flags & ATA_TFLAG_WRITE)
3782 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3783 qc->pad_len);
3784
3785 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3786 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3787 /* trim sg */
3788 sg->length -= qc->pad_len;
2e242fa9
TH
3789 if (sg->length == 0)
3790 trim_sg = 1;
cedc9a47
JG
3791
3792 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3793 sg->length, qc->pad_len);
3794 }
3795
2e242fa9
TH
3796 if (trim_sg) {
3797 qc->n_elem--;
e1410f2d
JG
3798 goto skip_map;
3799 }
3800
2f1f610b 3801 dma_address = dma_map_single(ap->dev, qc->buf_virt,
32529e01 3802 sg->length, dir);
537a95d9
TH
3803 if (dma_mapping_error(dma_address)) {
3804 /* restore sg */
3805 sg->length += qc->pad_len;
1da177e4 3806 return -1;
537a95d9 3807 }
1da177e4
LT
3808
3809 sg_dma_address(sg) = dma_address;
32529e01 3810 sg_dma_len(sg) = sg->length;
1da177e4 3811
2e242fa9 3812skip_map:
1da177e4
LT
3813 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3814 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3815
3816 return 0;
3817}
3818
3819/**
0cba632b
JG
3820 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3821 * @qc: Command with scatter-gather table to be mapped.
3822 *
3823 * DMA-map the scatter-gather table associated with queued_cmd @qc.
1da177e4
LT
3824 *
3825 * LOCKING:
cca3974e 3826 * spin_lock_irqsave(host lock)
1da177e4
LT
3827 *
3828 * RETURNS:
0cba632b 3829 * Zero on success, negative on error.
1da177e4
LT
3830 *
3831 */
3832
3833static int ata_sg_setup(struct ata_queued_cmd *qc)
3834{
3835 struct ata_port *ap = qc->ap;
cedc9a47
JG
3836 struct scatterlist *sg = qc->__sg;
3837 struct scatterlist *lsg = &sg[qc->n_elem - 1];
e1410f2d 3838 int n_elem, pre_n_elem, dir, trim_sg = 0;
1da177e4 3839
44877b4e 3840 VPRINTK("ENTER, ata%u\n", ap->print_id);
a4631474 3841 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
1da177e4 3842
cedc9a47
JG
3843 /* we must lengthen transfers to end on a 32-bit boundary */
3844 qc->pad_len = lsg->length & 3;
3845 if (qc->pad_len) {
3846 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3847 struct scatterlist *psg = &qc->pad_sgent;
3848 unsigned int offset;
3849
a4631474 3850 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
3851
3852 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3853
3854 /*
3855 * psg->page/offset are used to copy to-be-written
3856 * data in this function or read data in ata_sg_clean.
3857 */
3858 offset = lsg->offset + lsg->length - qc->pad_len;
3859 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3860 psg->offset = offset_in_page(offset);
3861
3862 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3863 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3864 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
dfa15988 3865 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
3866 }
3867
3868 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3869 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3870 /* trim last sg */
3871 lsg->length -= qc->pad_len;
e1410f2d
JG
3872 if (lsg->length == 0)
3873 trim_sg = 1;
cedc9a47
JG
3874
3875 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3876 qc->n_elem - 1, lsg->length, qc->pad_len);
3877 }
3878
e1410f2d
JG
3879 pre_n_elem = qc->n_elem;
3880 if (trim_sg && pre_n_elem)
3881 pre_n_elem--;
3882
3883 if (!pre_n_elem) {
3884 n_elem = 0;
3885 goto skip_map;
3886 }
3887
1da177e4 3888 dir = qc->dma_dir;
2f1f610b 3889 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
537a95d9
TH
3890 if (n_elem < 1) {
3891 /* restore last sg */
3892 lsg->length += qc->pad_len;
1da177e4 3893 return -1;
537a95d9 3894 }
1da177e4
LT
3895
3896 DPRINTK("%d sg elements mapped\n", n_elem);
3897
e1410f2d 3898skip_map:
1da177e4
LT
3899 qc->n_elem = n_elem;
3900
3901 return 0;
3902}
3903
0baab86b 3904/**
c893a3ae 3905 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
3906 * @buf: Buffer to swap
3907 * @buf_words: Number of 16-bit words in buffer.
3908 *
3909 * Swap halves of 16-bit words if needed to convert from
3910 * little-endian byte order to native cpu byte order, or
3911 * vice-versa.
3912 *
3913 * LOCKING:
6f0ef4fa 3914 * Inherited from caller.
0baab86b 3915 */
1da177e4
LT
3916void swap_buf_le16(u16 *buf, unsigned int buf_words)
3917{
3918#ifdef __BIG_ENDIAN
3919 unsigned int i;
3920
3921 for (i = 0; i < buf_words; i++)
3922 buf[i] = le16_to_cpu(buf[i]);
3923#endif /* __BIG_ENDIAN */
3924}
3925
6ae4cfb5 3926/**
0d5ff566 3927 * ata_data_xfer - Transfer data by PIO
a6b2c5d4 3928 * @adev: device to target
6ae4cfb5
AL
3929 * @buf: data buffer
3930 * @buflen: buffer length
344babaa 3931 * @write_data: read/write
6ae4cfb5
AL
3932 *
3933 * Transfer data from/to the device data register by PIO.
3934 *
3935 * LOCKING:
3936 * Inherited from caller.
6ae4cfb5 3937 */
0d5ff566
TH
3938void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
3939 unsigned int buflen, int write_data)
1da177e4 3940{
a6b2c5d4 3941 struct ata_port *ap = adev->ap;
6ae4cfb5 3942 unsigned int words = buflen >> 1;
1da177e4 3943
6ae4cfb5 3944 /* Transfer multiple of 2 bytes */
1da177e4 3945 if (write_data)
0d5ff566 3946 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
1da177e4 3947 else
0d5ff566 3948 ioread16_rep(ap->ioaddr.data_addr, buf, words);
6ae4cfb5
AL
3949
3950 /* Transfer trailing 1 byte, if any. */
3951 if (unlikely(buflen & 0x01)) {
3952 u16 align_buf[1] = { 0 };
3953 unsigned char *trailing_buf = buf + buflen - 1;
3954
3955 if (write_data) {
3956 memcpy(align_buf, trailing_buf, 1);
0d5ff566 3957 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
6ae4cfb5 3958 } else {
0d5ff566 3959 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
6ae4cfb5
AL
3960 memcpy(trailing_buf, align_buf, 1);
3961 }
3962 }
1da177e4
LT
3963}
3964
75e99585 3965/**
0d5ff566 3966 * ata_data_xfer_noirq - Transfer data by PIO
75e99585
AC
3967 * @adev: device to target
3968 * @buf: data buffer
3969 * @buflen: buffer length
3970 * @write_data: read/write
3971 *
88574551 3972 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
3973 * transfer with interrupts disabled.
3974 *
3975 * LOCKING:
3976 * Inherited from caller.
3977 */
0d5ff566
TH
3978void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
3979 unsigned int buflen, int write_data)
75e99585
AC
3980{
3981 unsigned long flags;
3982 local_irq_save(flags);
0d5ff566 3983 ata_data_xfer(adev, buf, buflen, write_data);
75e99585
AC
3984 local_irq_restore(flags);
3985}
3986
3987
6ae4cfb5
AL
3988/**
3989 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3990 * @qc: Command on going
3991 *
3992 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3993 *
3994 * LOCKING:
3995 * Inherited from caller.
3996 */
3997
1da177e4
LT
3998static void ata_pio_sector(struct ata_queued_cmd *qc)
3999{
4000 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4001 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4002 struct ata_port *ap = qc->ap;
4003 struct page *page;
4004 unsigned int offset;
4005 unsigned char *buf;
4006
726f0785 4007 if (qc->curbytes == qc->nbytes - ATA_SECT_SIZE)
14be71f4 4008 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4009
4010 page = sg[qc->cursg].page;
726f0785 4011 offset = sg[qc->cursg].offset + qc->cursg_ofs;
1da177e4
LT
4012
4013 /* get the current page and offset */
4014 page = nth_page(page, (offset >> PAGE_SHIFT));
4015 offset %= PAGE_SIZE;
4016
1da177e4
LT
4017 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4018
91b8b313
AL
4019 if (PageHighMem(page)) {
4020 unsigned long flags;
4021
a6b2c5d4 4022 /* FIXME: use a bounce buffer */
91b8b313
AL
4023 local_irq_save(flags);
4024 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4025
91b8b313 4026 /* do the actual data transfer */
a6b2c5d4 4027 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
1da177e4 4028
91b8b313
AL
4029 kunmap_atomic(buf, KM_IRQ0);
4030 local_irq_restore(flags);
4031 } else {
4032 buf = page_address(page);
a6b2c5d4 4033 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
91b8b313 4034 }
1da177e4 4035
726f0785
TH
4036 qc->curbytes += ATA_SECT_SIZE;
4037 qc->cursg_ofs += ATA_SECT_SIZE;
1da177e4 4038
726f0785 4039 if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
1da177e4
LT
4040 qc->cursg++;
4041 qc->cursg_ofs = 0;
4042 }
1da177e4 4043}
1da177e4 4044
07f6f7d0
AL
4045/**
4046 * ata_pio_sectors - Transfer one or many 512-byte sectors.
4047 * @qc: Command on going
4048 *
c81e29b4 4049 * Transfer one or many ATA_SECT_SIZE of data from/to the
07f6f7d0
AL
4050 * ATA device for the DRQ request.
4051 *
4052 * LOCKING:
4053 * Inherited from caller.
4054 */
1da177e4 4055
07f6f7d0
AL
4056static void ata_pio_sectors(struct ata_queued_cmd *qc)
4057{
4058 if (is_multi_taskfile(&qc->tf)) {
4059 /* READ/WRITE MULTIPLE */
4060 unsigned int nsect;
4061
587005de 4062 WARN_ON(qc->dev->multi_count == 0);
1da177e4 4063
726f0785
TH
4064 nsect = min((qc->nbytes - qc->curbytes) / ATA_SECT_SIZE,
4065 qc->dev->multi_count);
07f6f7d0
AL
4066 while (nsect--)
4067 ata_pio_sector(qc);
4068 } else
4069 ata_pio_sector(qc);
4070}
4071
c71c1857
AL
4072/**
4073 * atapi_send_cdb - Write CDB bytes to hardware
4074 * @ap: Port to which ATAPI device is attached.
4075 * @qc: Taskfile currently active
4076 *
4077 * When device has indicated its readiness to accept
4078 * a CDB, this function is called. Send the CDB.
4079 *
4080 * LOCKING:
4081 * caller.
4082 */
4083
4084static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4085{
4086 /* send SCSI cdb */
4087 DPRINTK("send cdb\n");
db024d53 4088 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 4089
a6b2c5d4 4090 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
4091 ata_altstatus(ap); /* flush */
4092
4093 switch (qc->tf.protocol) {
4094 case ATA_PROT_ATAPI:
4095 ap->hsm_task_state = HSM_ST;
4096 break;
4097 case ATA_PROT_ATAPI_NODATA:
4098 ap->hsm_task_state = HSM_ST_LAST;
4099 break;
4100 case ATA_PROT_ATAPI_DMA:
4101 ap->hsm_task_state = HSM_ST_LAST;
4102 /* initiate bmdma */
4103 ap->ops->bmdma_start(qc);
4104 break;
4105 }
1da177e4
LT
4106}
4107
6ae4cfb5
AL
4108/**
4109 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
4110 * @qc: Command on going
4111 * @bytes: number of bytes
4112 *
4113 * Transfer Transfer data from/to the ATAPI device.
4114 *
4115 * LOCKING:
4116 * Inherited from caller.
4117 *
4118 */
4119
1da177e4
LT
4120static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4121{
4122 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4123 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4124 struct ata_port *ap = qc->ap;
4125 struct page *page;
4126 unsigned char *buf;
4127 unsigned int offset, count;
4128
563a6e1f 4129 if (qc->curbytes + bytes >= qc->nbytes)
14be71f4 4130 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4131
4132next_sg:
563a6e1f 4133 if (unlikely(qc->cursg >= qc->n_elem)) {
7fb6ec28 4134 /*
563a6e1f
AL
4135 * The end of qc->sg is reached and the device expects
4136 * more data to transfer. In order not to overrun qc->sg
4137 * and fulfill length specified in the byte count register,
4138 * - for read case, discard trailing data from the device
4139 * - for write case, padding zero data to the device
4140 */
4141 u16 pad_buf[1] = { 0 };
4142 unsigned int words = bytes >> 1;
4143 unsigned int i;
4144
4145 if (words) /* warning if bytes > 1 */
f15a1daf
TH
4146 ata_dev_printk(qc->dev, KERN_WARNING,
4147 "%u bytes trailing data\n", bytes);
563a6e1f
AL
4148
4149 for (i = 0; i < words; i++)
a6b2c5d4 4150 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
563a6e1f 4151
14be71f4 4152 ap->hsm_task_state = HSM_ST_LAST;
563a6e1f
AL
4153 return;
4154 }
4155
cedc9a47 4156 sg = &qc->__sg[qc->cursg];
1da177e4 4157
1da177e4
LT
4158 page = sg->page;
4159 offset = sg->offset + qc->cursg_ofs;
4160
4161 /* get the current page and offset */
4162 page = nth_page(page, (offset >> PAGE_SHIFT));
4163 offset %= PAGE_SIZE;
4164
6952df03 4165 /* don't overrun current sg */
32529e01 4166 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
4167
4168 /* don't cross page boundaries */
4169 count = min(count, (unsigned int)PAGE_SIZE - offset);
4170
7282aa4b
AL
4171 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4172
91b8b313
AL
4173 if (PageHighMem(page)) {
4174 unsigned long flags;
4175
a6b2c5d4 4176 /* FIXME: use bounce buffer */
91b8b313
AL
4177 local_irq_save(flags);
4178 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4179
91b8b313 4180 /* do the actual data transfer */
a6b2c5d4 4181 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
7282aa4b 4182
91b8b313
AL
4183 kunmap_atomic(buf, KM_IRQ0);
4184 local_irq_restore(flags);
4185 } else {
4186 buf = page_address(page);
a6b2c5d4 4187 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
91b8b313 4188 }
1da177e4
LT
4189
4190 bytes -= count;
4191 qc->curbytes += count;
4192 qc->cursg_ofs += count;
4193
32529e01 4194 if (qc->cursg_ofs == sg->length) {
1da177e4
LT
4195 qc->cursg++;
4196 qc->cursg_ofs = 0;
4197 }
4198
563a6e1f 4199 if (bytes)
1da177e4 4200 goto next_sg;
1da177e4
LT
4201}
4202
6ae4cfb5
AL
4203/**
4204 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
4205 * @qc: Command on going
4206 *
4207 * Transfer Transfer data from/to the ATAPI device.
4208 *
4209 * LOCKING:
4210 * Inherited from caller.
6ae4cfb5
AL
4211 */
4212
1da177e4
LT
4213static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4214{
4215 struct ata_port *ap = qc->ap;
4216 struct ata_device *dev = qc->dev;
4217 unsigned int ireason, bc_lo, bc_hi, bytes;
4218 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4219
eec4c3f3
AL
4220 /* Abuse qc->result_tf for temp storage of intermediate TF
4221 * here to save some kernel stack usage.
4222 * For normal completion, qc->result_tf is not relevant. For
4223 * error, qc->result_tf is later overwritten by ata_qc_complete().
4224 * So, the correctness of qc->result_tf is not affected.
4225 */
4226 ap->ops->tf_read(ap, &qc->result_tf);
4227 ireason = qc->result_tf.nsect;
4228 bc_lo = qc->result_tf.lbam;
4229 bc_hi = qc->result_tf.lbah;
1da177e4
LT
4230 bytes = (bc_hi << 8) | bc_lo;
4231
4232 /* shall be cleared to zero, indicating xfer of data */
4233 if (ireason & (1 << 0))
4234 goto err_out;
4235
4236 /* make sure transfer direction matches expected */
4237 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4238 if (do_write != i_write)
4239 goto err_out;
4240
44877b4e 4241 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
312f7da2 4242
1da177e4
LT
4243 __atapi_pio_bytes(qc, bytes);
4244
4245 return;
4246
4247err_out:
f15a1daf 4248 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
11a56d24 4249 qc->err_mask |= AC_ERR_HSM;
14be71f4 4250 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
4251}
4252
4253/**
c234fb00
AL
4254 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4255 * @ap: the target ata_port
4256 * @qc: qc on going
1da177e4 4257 *
c234fb00
AL
4258 * RETURNS:
4259 * 1 if ok in workqueue, 0 otherwise.
1da177e4 4260 */
c234fb00
AL
4261
4262static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 4263{
c234fb00
AL
4264 if (qc->tf.flags & ATA_TFLAG_POLLING)
4265 return 1;
1da177e4 4266
c234fb00
AL
4267 if (ap->hsm_task_state == HSM_ST_FIRST) {
4268 if (qc->tf.protocol == ATA_PROT_PIO &&
4269 (qc->tf.flags & ATA_TFLAG_WRITE))
4270 return 1;
1da177e4 4271
c234fb00
AL
4272 if (is_atapi_taskfile(&qc->tf) &&
4273 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4274 return 1;
fe79e683
AL
4275 }
4276
c234fb00
AL
4277 return 0;
4278}
1da177e4 4279
c17ea20d
TH
4280/**
4281 * ata_hsm_qc_complete - finish a qc running on standard HSM
4282 * @qc: Command to complete
4283 * @in_wq: 1 if called from workqueue, 0 otherwise
4284 *
4285 * Finish @qc which is running on standard HSM.
4286 *
4287 * LOCKING:
cca3974e 4288 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
4289 * Otherwise, none on entry and grabs host lock.
4290 */
4291static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4292{
4293 struct ata_port *ap = qc->ap;
4294 unsigned long flags;
4295
4296 if (ap->ops->error_handler) {
4297 if (in_wq) {
ba6a1308 4298 spin_lock_irqsave(ap->lock, flags);
c17ea20d 4299
cca3974e
JG
4300 /* EH might have kicked in while host lock is
4301 * released.
c17ea20d
TH
4302 */
4303 qc = ata_qc_from_tag(ap, qc->tag);
4304 if (qc) {
4305 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
83625006 4306 ap->ops->irq_on(ap);
c17ea20d
TH
4307 ata_qc_complete(qc);
4308 } else
4309 ata_port_freeze(ap);
4310 }
4311
ba6a1308 4312 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4313 } else {
4314 if (likely(!(qc->err_mask & AC_ERR_HSM)))
4315 ata_qc_complete(qc);
4316 else
4317 ata_port_freeze(ap);
4318 }
4319 } else {
4320 if (in_wq) {
ba6a1308 4321 spin_lock_irqsave(ap->lock, flags);
83625006 4322 ap->ops->irq_on(ap);
c17ea20d 4323 ata_qc_complete(qc);
ba6a1308 4324 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4325 } else
4326 ata_qc_complete(qc);
4327 }
1da177e4 4328
c81e29b4 4329 ata_altstatus(ap); /* flush */
c17ea20d
TH
4330}
4331
bb5cb290
AL
4332/**
4333 * ata_hsm_move - move the HSM to the next state.
4334 * @ap: the target ata_port
4335 * @qc: qc on going
4336 * @status: current device status
4337 * @in_wq: 1 if called from workqueue, 0 otherwise
4338 *
4339 * RETURNS:
4340 * 1 when poll next status needed, 0 otherwise.
4341 */
9a1004d0
TH
4342int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4343 u8 status, int in_wq)
e2cec771 4344{
bb5cb290
AL
4345 unsigned long flags = 0;
4346 int poll_next;
4347
6912ccd5
AL
4348 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4349
bb5cb290
AL
4350 /* Make sure ata_qc_issue_prot() does not throw things
4351 * like DMA polling into the workqueue. Notice that
4352 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4353 */
c234fb00 4354 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 4355
e2cec771 4356fsm_start:
999bb6f4 4357 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
44877b4e 4358 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
999bb6f4 4359
e2cec771
AL
4360 switch (ap->hsm_task_state) {
4361 case HSM_ST_FIRST:
bb5cb290
AL
4362 /* Send first data block or PACKET CDB */
4363
4364 /* If polling, we will stay in the work queue after
4365 * sending the data. Otherwise, interrupt handler
4366 * takes over after sending the data.
4367 */
4368 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4369
e2cec771 4370 /* check device status */
3655d1d3
AL
4371 if (unlikely((status & ATA_DRQ) == 0)) {
4372 /* handle BSY=0, DRQ=0 as error */
4373 if (likely(status & (ATA_ERR | ATA_DF)))
4374 /* device stops HSM for abort/error */
4375 qc->err_mask |= AC_ERR_DEV;
4376 else
4377 /* HSM violation. Let EH handle this */
4378 qc->err_mask |= AC_ERR_HSM;
4379
14be71f4 4380 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 4381 goto fsm_start;
1da177e4
LT
4382 }
4383
71601958
AL
4384 /* Device should not ask for data transfer (DRQ=1)
4385 * when it finds something wrong.
eee6c32f
AL
4386 * We ignore DRQ here and stop the HSM by
4387 * changing hsm_task_state to HSM_ST_ERR and
4388 * let the EH abort the command or reset the device.
71601958
AL
4389 */
4390 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
4391 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
4392 "error, dev_stat 0x%X\n", status);
3655d1d3 4393 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4394 ap->hsm_task_state = HSM_ST_ERR;
4395 goto fsm_start;
71601958 4396 }
1da177e4 4397
bb5cb290
AL
4398 /* Send the CDB (atapi) or the first data block (ata pio out).
4399 * During the state transition, interrupt handler shouldn't
4400 * be invoked before the data transfer is complete and
4401 * hsm_task_state is changed. Hence, the following locking.
4402 */
4403 if (in_wq)
ba6a1308 4404 spin_lock_irqsave(ap->lock, flags);
1da177e4 4405
bb5cb290
AL
4406 if (qc->tf.protocol == ATA_PROT_PIO) {
4407 /* PIO data out protocol.
4408 * send first data block.
4409 */
0565c26d 4410
bb5cb290
AL
4411 /* ata_pio_sectors() might change the state
4412 * to HSM_ST_LAST. so, the state is changed here
4413 * before ata_pio_sectors().
4414 */
4415 ap->hsm_task_state = HSM_ST;
4416 ata_pio_sectors(qc);
4417 ata_altstatus(ap); /* flush */
4418 } else
4419 /* send CDB */
4420 atapi_send_cdb(ap, qc);
4421
4422 if (in_wq)
ba6a1308 4423 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
4424
4425 /* if polling, ata_pio_task() handles the rest.
4426 * otherwise, interrupt handler takes over from here.
4427 */
e2cec771 4428 break;
1c848984 4429
e2cec771
AL
4430 case HSM_ST:
4431 /* complete command or read/write the data register */
4432 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4433 /* ATAPI PIO protocol */
4434 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
4435 /* No more data to transfer or device error.
4436 * Device error will be tagged in HSM_ST_LAST.
4437 */
e2cec771
AL
4438 ap->hsm_task_state = HSM_ST_LAST;
4439 goto fsm_start;
4440 }
1da177e4 4441
71601958
AL
4442 /* Device should not ask for data transfer (DRQ=1)
4443 * when it finds something wrong.
eee6c32f
AL
4444 * We ignore DRQ here and stop the HSM by
4445 * changing hsm_task_state to HSM_ST_ERR and
4446 * let the EH abort the command or reset the device.
71601958
AL
4447 */
4448 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
4449 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
4450 "device error, dev_stat 0x%X\n",
4451 status);
3655d1d3 4452 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4453 ap->hsm_task_state = HSM_ST_ERR;
4454 goto fsm_start;
71601958 4455 }
1da177e4 4456
e2cec771 4457 atapi_pio_bytes(qc);
7fb6ec28 4458
e2cec771
AL
4459 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4460 /* bad ireason reported by device */
4461 goto fsm_start;
1da177e4 4462
e2cec771
AL
4463 } else {
4464 /* ATA PIO protocol */
4465 if (unlikely((status & ATA_DRQ) == 0)) {
4466 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
4467 if (likely(status & (ATA_ERR | ATA_DF)))
4468 /* device stops HSM for abort/error */
4469 qc->err_mask |= AC_ERR_DEV;
4470 else
55a8e2c8
TH
4471 /* HSM violation. Let EH handle this.
4472 * Phantom devices also trigger this
4473 * condition. Mark hint.
4474 */
4475 qc->err_mask |= AC_ERR_HSM |
4476 AC_ERR_NODEV_HINT;
3655d1d3 4477
e2cec771
AL
4478 ap->hsm_task_state = HSM_ST_ERR;
4479 goto fsm_start;
4480 }
1da177e4 4481
eee6c32f
AL
4482 /* For PIO reads, some devices may ask for
4483 * data transfer (DRQ=1) alone with ERR=1.
4484 * We respect DRQ here and transfer one
4485 * block of junk data before changing the
4486 * hsm_task_state to HSM_ST_ERR.
4487 *
4488 * For PIO writes, ERR=1 DRQ=1 doesn't make
4489 * sense since the data block has been
4490 * transferred to the device.
71601958
AL
4491 */
4492 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
4493 /* data might be corrputed */
4494 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
4495
4496 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4497 ata_pio_sectors(qc);
4498 ata_altstatus(ap);
4499 status = ata_wait_idle(ap);
4500 }
4501
3655d1d3
AL
4502 if (status & (ATA_BUSY | ATA_DRQ))
4503 qc->err_mask |= AC_ERR_HSM;
4504
eee6c32f
AL
4505 /* ata_pio_sectors() might change the
4506 * state to HSM_ST_LAST. so, the state
4507 * is changed after ata_pio_sectors().
4508 */
4509 ap->hsm_task_state = HSM_ST_ERR;
4510 goto fsm_start;
71601958
AL
4511 }
4512
e2cec771
AL
4513 ata_pio_sectors(qc);
4514
4515 if (ap->hsm_task_state == HSM_ST_LAST &&
4516 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4517 /* all data read */
4518 ata_altstatus(ap);
52a32205 4519 status = ata_wait_idle(ap);
e2cec771
AL
4520 goto fsm_start;
4521 }
4522 }
4523
4524 ata_altstatus(ap); /* flush */
bb5cb290 4525 poll_next = 1;
1da177e4
LT
4526 break;
4527
14be71f4 4528 case HSM_ST_LAST:
6912ccd5
AL
4529 if (unlikely(!ata_ok(status))) {
4530 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
4531 ap->hsm_task_state = HSM_ST_ERR;
4532 goto fsm_start;
4533 }
4534
4535 /* no more data to transfer */
4332a771 4536 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
44877b4e 4537 ap->print_id, qc->dev->devno, status);
e2cec771 4538
6912ccd5
AL
4539 WARN_ON(qc->err_mask);
4540
e2cec771 4541 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 4542
e2cec771 4543 /* complete taskfile transaction */
c17ea20d 4544 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
4545
4546 poll_next = 0;
1da177e4
LT
4547 break;
4548
14be71f4 4549 case HSM_ST_ERR:
e2cec771
AL
4550 /* make sure qc->err_mask is available to
4551 * know what's wrong and recover
4552 */
4553 WARN_ON(qc->err_mask == 0);
4554
4555 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 4556
999bb6f4 4557 /* complete taskfile transaction */
c17ea20d 4558 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
4559
4560 poll_next = 0;
e2cec771
AL
4561 break;
4562 default:
bb5cb290 4563 poll_next = 0;
6912ccd5 4564 BUG();
1da177e4
LT
4565 }
4566
bb5cb290 4567 return poll_next;
1da177e4
LT
4568}
4569
65f27f38 4570static void ata_pio_task(struct work_struct *work)
8061f5f0 4571{
65f27f38
DH
4572 struct ata_port *ap =
4573 container_of(work, struct ata_port, port_task.work);
4574 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 4575 u8 status;
a1af3734 4576 int poll_next;
8061f5f0 4577
7fb6ec28 4578fsm_start:
a1af3734 4579 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 4580
a1af3734
AL
4581 /*
4582 * This is purely heuristic. This is a fast path.
4583 * Sometimes when we enter, BSY will be cleared in
4584 * a chk-status or two. If not, the drive is probably seeking
4585 * or something. Snooze for a couple msecs, then
4586 * chk-status again. If still busy, queue delayed work.
4587 */
4588 status = ata_busy_wait(ap, ATA_BUSY, 5);
4589 if (status & ATA_BUSY) {
4590 msleep(2);
4591 status = ata_busy_wait(ap, ATA_BUSY, 10);
4592 if (status & ATA_BUSY) {
31ce6dae 4593 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
a1af3734
AL
4594 return;
4595 }
8061f5f0
TH
4596 }
4597
a1af3734
AL
4598 /* move the HSM */
4599 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 4600
a1af3734
AL
4601 /* another command or interrupt handler
4602 * may be running at this point.
4603 */
4604 if (poll_next)
7fb6ec28 4605 goto fsm_start;
8061f5f0
TH
4606}
4607
1da177e4
LT
4608/**
4609 * ata_qc_new - Request an available ATA command, for queueing
4610 * @ap: Port associated with device @dev
4611 * @dev: Device from whom we request an available command structure
4612 *
4613 * LOCKING:
0cba632b 4614 * None.
1da177e4
LT
4615 */
4616
4617static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4618{
4619 struct ata_queued_cmd *qc = NULL;
4620 unsigned int i;
4621
e3180499 4622 /* no command while frozen */
b51e9e5d 4623 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
4624 return NULL;
4625
2ab7db1f
TH
4626 /* the last tag is reserved for internal command. */
4627 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 4628 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 4629 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
4630 break;
4631 }
4632
4633 if (qc)
4634 qc->tag = i;
4635
4636 return qc;
4637}
4638
4639/**
4640 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
4641 * @dev: Device from whom we request an available command structure
4642 *
4643 * LOCKING:
0cba632b 4644 * None.
1da177e4
LT
4645 */
4646
3373efd8 4647struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 4648{
3373efd8 4649 struct ata_port *ap = dev->ap;
1da177e4
LT
4650 struct ata_queued_cmd *qc;
4651
4652 qc = ata_qc_new(ap);
4653 if (qc) {
1da177e4
LT
4654 qc->scsicmd = NULL;
4655 qc->ap = ap;
4656 qc->dev = dev;
1da177e4 4657
2c13b7ce 4658 ata_qc_reinit(qc);
1da177e4
LT
4659 }
4660
4661 return qc;
4662}
4663
1da177e4
LT
4664/**
4665 * ata_qc_free - free unused ata_queued_cmd
4666 * @qc: Command to complete
4667 *
4668 * Designed to free unused ata_queued_cmd object
4669 * in case something prevents using it.
4670 *
4671 * LOCKING:
cca3974e 4672 * spin_lock_irqsave(host lock)
1da177e4
LT
4673 */
4674void ata_qc_free(struct ata_queued_cmd *qc)
4675{
4ba946e9
TH
4676 struct ata_port *ap = qc->ap;
4677 unsigned int tag;
4678
a4631474 4679 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 4680
4ba946e9
TH
4681 qc->flags = 0;
4682 tag = qc->tag;
4683 if (likely(ata_tag_valid(tag))) {
4ba946e9 4684 qc->tag = ATA_TAG_POISON;
6cec4a39 4685 clear_bit(tag, &ap->qc_allocated);
4ba946e9 4686 }
1da177e4
LT
4687}
4688
76014427 4689void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 4690{
dedaf2b0
TH
4691 struct ata_port *ap = qc->ap;
4692
a4631474
TH
4693 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4694 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
4695
4696 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4697 ata_sg_clean(qc);
4698
7401abf2 4699 /* command should be marked inactive atomically with qc completion */
dedaf2b0
TH
4700 if (qc->tf.protocol == ATA_PROT_NCQ)
4701 ap->sactive &= ~(1 << qc->tag);
4702 else
4703 ap->active_tag = ATA_TAG_POISON;
7401abf2 4704
3f3791d3
AL
4705 /* atapi: mark qc as inactive to prevent the interrupt handler
4706 * from completing the command twice later, before the error handler
4707 * is called. (when rc != 0 and atapi request sense is needed)
4708 */
4709 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 4710 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 4711
1da177e4 4712 /* call completion callback */
77853bf2 4713 qc->complete_fn(qc);
1da177e4
LT
4714}
4715
39599a53
TH
4716static void fill_result_tf(struct ata_queued_cmd *qc)
4717{
4718 struct ata_port *ap = qc->ap;
4719
4720 ap->ops->tf_read(ap, &qc->result_tf);
4721 qc->result_tf.flags = qc->tf.flags;
4722}
4723
f686bcb8
TH
4724/**
4725 * ata_qc_complete - Complete an active ATA command
4726 * @qc: Command to complete
4727 * @err_mask: ATA Status register contents
4728 *
4729 * Indicate to the mid and upper layers that an ATA
4730 * command has completed, with either an ok or not-ok status.
4731 *
4732 * LOCKING:
cca3974e 4733 * spin_lock_irqsave(host lock)
f686bcb8
TH
4734 */
4735void ata_qc_complete(struct ata_queued_cmd *qc)
4736{
4737 struct ata_port *ap = qc->ap;
4738
4739 /* XXX: New EH and old EH use different mechanisms to
4740 * synchronize EH with regular execution path.
4741 *
4742 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4743 * Normal execution path is responsible for not accessing a
4744 * failed qc. libata core enforces the rule by returning NULL
4745 * from ata_qc_from_tag() for failed qcs.
4746 *
4747 * Old EH depends on ata_qc_complete() nullifying completion
4748 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4749 * not synchronize with interrupt handler. Only PIO task is
4750 * taken care of.
4751 */
4752 if (ap->ops->error_handler) {
b51e9e5d 4753 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
4754
4755 if (unlikely(qc->err_mask))
4756 qc->flags |= ATA_QCFLAG_FAILED;
4757
4758 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4759 if (!ata_tag_internal(qc->tag)) {
4760 /* always fill result TF for failed qc */
39599a53 4761 fill_result_tf(qc);
f686bcb8
TH
4762 ata_qc_schedule_eh(qc);
4763 return;
4764 }
4765 }
4766
4767 /* read result TF if requested */
4768 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4769 fill_result_tf(qc);
f686bcb8
TH
4770
4771 __ata_qc_complete(qc);
4772 } else {
4773 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4774 return;
4775
4776 /* read result TF if failed or requested */
4777 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4778 fill_result_tf(qc);
f686bcb8
TH
4779
4780 __ata_qc_complete(qc);
4781 }
4782}
4783
dedaf2b0
TH
4784/**
4785 * ata_qc_complete_multiple - Complete multiple qcs successfully
4786 * @ap: port in question
4787 * @qc_active: new qc_active mask
4788 * @finish_qc: LLDD callback invoked before completing a qc
4789 *
4790 * Complete in-flight commands. This functions is meant to be
4791 * called from low-level driver's interrupt routine to complete
4792 * requests normally. ap->qc_active and @qc_active is compared
4793 * and commands are completed accordingly.
4794 *
4795 * LOCKING:
cca3974e 4796 * spin_lock_irqsave(host lock)
dedaf2b0
TH
4797 *
4798 * RETURNS:
4799 * Number of completed commands on success, -errno otherwise.
4800 */
4801int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
4802 void (*finish_qc)(struct ata_queued_cmd *))
4803{
4804 int nr_done = 0;
4805 u32 done_mask;
4806 int i;
4807
4808 done_mask = ap->qc_active ^ qc_active;
4809
4810 if (unlikely(done_mask & qc_active)) {
4811 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4812 "(%08x->%08x)\n", ap->qc_active, qc_active);
4813 return -EINVAL;
4814 }
4815
4816 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4817 struct ata_queued_cmd *qc;
4818
4819 if (!(done_mask & (1 << i)))
4820 continue;
4821
4822 if ((qc = ata_qc_from_tag(ap, i))) {
4823 if (finish_qc)
4824 finish_qc(qc);
4825 ata_qc_complete(qc);
4826 nr_done++;
4827 }
4828 }
4829
4830 return nr_done;
4831}
4832
1da177e4
LT
4833static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4834{
4835 struct ata_port *ap = qc->ap;
4836
4837 switch (qc->tf.protocol) {
3dc1d881 4838 case ATA_PROT_NCQ:
1da177e4
LT
4839 case ATA_PROT_DMA:
4840 case ATA_PROT_ATAPI_DMA:
4841 return 1;
4842
4843 case ATA_PROT_ATAPI:
4844 case ATA_PROT_PIO:
1da177e4
LT
4845 if (ap->flags & ATA_FLAG_PIO_DMA)
4846 return 1;
4847
4848 /* fall through */
4849
4850 default:
4851 return 0;
4852 }
4853
4854 /* never reached */
4855}
4856
4857/**
4858 * ata_qc_issue - issue taskfile to device
4859 * @qc: command to issue to device
4860 *
4861 * Prepare an ATA command to submission to device.
4862 * This includes mapping the data into a DMA-able
4863 * area, filling in the S/G table, and finally
4864 * writing the taskfile to hardware, starting the command.
4865 *
4866 * LOCKING:
cca3974e 4867 * spin_lock_irqsave(host lock)
1da177e4 4868 */
8e0e694a 4869void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
4870{
4871 struct ata_port *ap = qc->ap;
4872
dedaf2b0
TH
4873 /* Make sure only one non-NCQ command is outstanding. The
4874 * check is skipped for old EH because it reuses active qc to
4875 * request ATAPI sense.
4876 */
4877 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
4878
4879 if (qc->tf.protocol == ATA_PROT_NCQ) {
4880 WARN_ON(ap->sactive & (1 << qc->tag));
4881 ap->sactive |= 1 << qc->tag;
4882 } else {
4883 WARN_ON(ap->sactive);
4884 ap->active_tag = qc->tag;
4885 }
4886
e4a70e76 4887 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 4888 ap->qc_active |= 1 << qc->tag;
e4a70e76 4889
1da177e4
LT
4890 if (ata_should_dma_map(qc)) {
4891 if (qc->flags & ATA_QCFLAG_SG) {
4892 if (ata_sg_setup(qc))
8e436af9 4893 goto sg_err;
1da177e4
LT
4894 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4895 if (ata_sg_setup_one(qc))
8e436af9 4896 goto sg_err;
1da177e4
LT
4897 }
4898 } else {
4899 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4900 }
4901
4902 ap->ops->qc_prep(qc);
4903
8e0e694a
TH
4904 qc->err_mask |= ap->ops->qc_issue(qc);
4905 if (unlikely(qc->err_mask))
4906 goto err;
4907 return;
1da177e4 4908
8e436af9
TH
4909sg_err:
4910 qc->flags &= ~ATA_QCFLAG_DMAMAP;
8e0e694a
TH
4911 qc->err_mask |= AC_ERR_SYSTEM;
4912err:
4913 ata_qc_complete(qc);
1da177e4
LT
4914}
4915
4916/**
4917 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4918 * @qc: command to issue to device
4919 *
4920 * Using various libata functions and hooks, this function
4921 * starts an ATA command. ATA commands are grouped into
4922 * classes called "protocols", and issuing each type of protocol
4923 * is slightly different.
4924 *
0baab86b
EF
4925 * May be used as the qc_issue() entry in ata_port_operations.
4926 *
1da177e4 4927 * LOCKING:
cca3974e 4928 * spin_lock_irqsave(host lock)
1da177e4
LT
4929 *
4930 * RETURNS:
9a3d9eb0 4931 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
4932 */
4933
9a3d9eb0 4934unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
4935{
4936 struct ata_port *ap = qc->ap;
4937
e50362ec
AL
4938 /* Use polling pio if the LLD doesn't handle
4939 * interrupt driven pio and atapi CDB interrupt.
4940 */
4941 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4942 switch (qc->tf.protocol) {
4943 case ATA_PROT_PIO:
e3472cbe 4944 case ATA_PROT_NODATA:
e50362ec
AL
4945 case ATA_PROT_ATAPI:
4946 case ATA_PROT_ATAPI_NODATA:
4947 qc->tf.flags |= ATA_TFLAG_POLLING;
4948 break;
4949 case ATA_PROT_ATAPI_DMA:
4950 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 4951 /* see ata_dma_blacklisted() */
e50362ec
AL
4952 BUG();
4953 break;
4954 default:
4955 break;
4956 }
4957 }
4958
3d3cca37
TH
4959 /* Some controllers show flaky interrupt behavior after
4960 * setting xfer mode. Use polling instead.
4961 */
4962 if (unlikely(qc->tf.command == ATA_CMD_SET_FEATURES &&
4963 qc->tf.feature == SETFEATURES_XFER) &&
4964 (ap->flags & ATA_FLAG_SETXFER_POLLING))
4965 qc->tf.flags |= ATA_TFLAG_POLLING;
4966
312f7da2 4967 /* select the device */
1da177e4
LT
4968 ata_dev_select(ap, qc->dev->devno, 1, 0);
4969
312f7da2 4970 /* start the command */
1da177e4
LT
4971 switch (qc->tf.protocol) {
4972 case ATA_PROT_NODATA:
312f7da2
AL
4973 if (qc->tf.flags & ATA_TFLAG_POLLING)
4974 ata_qc_set_polling(qc);
4975
e5338254 4976 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
4977 ap->hsm_task_state = HSM_ST_LAST;
4978
4979 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 4980 ata_port_queue_task(ap, ata_pio_task, qc, 0);
312f7da2 4981
1da177e4
LT
4982 break;
4983
4984 case ATA_PROT_DMA:
587005de 4985 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 4986
1da177e4
LT
4987 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4988 ap->ops->bmdma_setup(qc); /* set up bmdma */
4989 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 4990 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4991 break;
4992
312f7da2
AL
4993 case ATA_PROT_PIO:
4994 if (qc->tf.flags & ATA_TFLAG_POLLING)
4995 ata_qc_set_polling(qc);
1da177e4 4996
e5338254 4997 ata_tf_to_host(ap, &qc->tf);
312f7da2 4998
54f00389
AL
4999 if (qc->tf.flags & ATA_TFLAG_WRITE) {
5000 /* PIO data out protocol */
5001 ap->hsm_task_state = HSM_ST_FIRST;
31ce6dae 5002 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5003
5004 /* always send first data block using
e27486db 5005 * the ata_pio_task() codepath.
54f00389 5006 */
312f7da2 5007 } else {
54f00389
AL
5008 /* PIO data in protocol */
5009 ap->hsm_task_state = HSM_ST;
5010
5011 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5012 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5013
5014 /* if polling, ata_pio_task() handles the rest.
5015 * otherwise, interrupt handler takes over from here.
5016 */
312f7da2
AL
5017 }
5018
1da177e4
LT
5019 break;
5020
1da177e4 5021 case ATA_PROT_ATAPI:
1da177e4 5022 case ATA_PROT_ATAPI_NODATA:
312f7da2
AL
5023 if (qc->tf.flags & ATA_TFLAG_POLLING)
5024 ata_qc_set_polling(qc);
5025
e5338254 5026 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 5027
312f7da2
AL
5028 ap->hsm_task_state = HSM_ST_FIRST;
5029
5030 /* send cdb by polling if no cdb interrupt */
5031 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
5032 (qc->tf.flags & ATA_TFLAG_POLLING))
31ce6dae 5033 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5034 break;
5035
5036 case ATA_PROT_ATAPI_DMA:
587005de 5037 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5038
1da177e4
LT
5039 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5040 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
5041 ap->hsm_task_state = HSM_ST_FIRST;
5042
5043 /* send cdb by polling if no cdb interrupt */
5044 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
31ce6dae 5045 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5046 break;
5047
5048 default:
5049 WARN_ON(1);
9a3d9eb0 5050 return AC_ERR_SYSTEM;
1da177e4
LT
5051 }
5052
5053 return 0;
5054}
5055
1da177e4
LT
5056/**
5057 * ata_host_intr - Handle host interrupt for given (port, task)
5058 * @ap: Port on which interrupt arrived (possibly...)
5059 * @qc: Taskfile currently active in engine
5060 *
5061 * Handle host interrupt for given queued command. Currently,
5062 * only DMA interrupts are handled. All other commands are
5063 * handled via polling with interrupts disabled (nIEN bit).
5064 *
5065 * LOCKING:
cca3974e 5066 * spin_lock_irqsave(host lock)
1da177e4
LT
5067 *
5068 * RETURNS:
5069 * One if interrupt was handled, zero if not (shared irq).
5070 */
5071
5072inline unsigned int ata_host_intr (struct ata_port *ap,
5073 struct ata_queued_cmd *qc)
5074{
ea54763f 5075 struct ata_eh_info *ehi = &ap->eh_info;
312f7da2 5076 u8 status, host_stat = 0;
1da177e4 5077
312f7da2 5078 VPRINTK("ata%u: protocol %d task_state %d\n",
44877b4e 5079 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 5080
312f7da2
AL
5081 /* Check whether we are expecting interrupt in this state */
5082 switch (ap->hsm_task_state) {
5083 case HSM_ST_FIRST:
6912ccd5
AL
5084 /* Some pre-ATAPI-4 devices assert INTRQ
5085 * at this state when ready to receive CDB.
5086 */
1da177e4 5087
312f7da2
AL
5088 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5089 * The flag was turned on only for atapi devices.
5090 * No need to check is_atapi_taskfile(&qc->tf) again.
5091 */
5092 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 5093 goto idle_irq;
1da177e4 5094 break;
312f7da2
AL
5095 case HSM_ST_LAST:
5096 if (qc->tf.protocol == ATA_PROT_DMA ||
5097 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5098 /* check status of DMA engine */
5099 host_stat = ap->ops->bmdma_status(ap);
44877b4e
TH
5100 VPRINTK("ata%u: host_stat 0x%X\n",
5101 ap->print_id, host_stat);
312f7da2
AL
5102
5103 /* if it's not our irq... */
5104 if (!(host_stat & ATA_DMA_INTR))
5105 goto idle_irq;
5106
5107 /* before we do anything else, clear DMA-Start bit */
5108 ap->ops->bmdma_stop(qc);
a4f16610
AL
5109
5110 if (unlikely(host_stat & ATA_DMA_ERR)) {
5111 /* error when transfering data to/from memory */
5112 qc->err_mask |= AC_ERR_HOST_BUS;
5113 ap->hsm_task_state = HSM_ST_ERR;
5114 }
312f7da2
AL
5115 }
5116 break;
5117 case HSM_ST:
5118 break;
1da177e4
LT
5119 default:
5120 goto idle_irq;
5121 }
5122
312f7da2
AL
5123 /* check altstatus */
5124 status = ata_altstatus(ap);
5125 if (status & ATA_BUSY)
5126 goto idle_irq;
1da177e4 5127
312f7da2
AL
5128 /* check main status, clearing INTRQ */
5129 status = ata_chk_status(ap);
5130 if (unlikely(status & ATA_BUSY))
5131 goto idle_irq;
1da177e4 5132
312f7da2
AL
5133 /* ack bmdma irq events */
5134 ap->ops->irq_clear(ap);
1da177e4 5135
bb5cb290 5136 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
5137
5138 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5139 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5140 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5141
1da177e4
LT
5142 return 1; /* irq handled */
5143
5144idle_irq:
5145 ap->stats.idle_irq++;
5146
5147#ifdef ATA_IRQ_TRAP
5148 if ((ap->stats.idle_irq % 1000) == 0) {
83625006 5149 ap->ops->irq_ack(ap, 0); /* debug trap */
f15a1daf 5150 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 5151 return 1;
1da177e4
LT
5152 }
5153#endif
5154 return 0; /* irq not handled */
5155}
5156
5157/**
5158 * ata_interrupt - Default ATA host interrupt handler
0cba632b 5159 * @irq: irq line (unused)
cca3974e 5160 * @dev_instance: pointer to our ata_host information structure
1da177e4 5161 *
0cba632b
JG
5162 * Default interrupt handler for PCI IDE devices. Calls
5163 * ata_host_intr() for each port that is not disabled.
5164 *
1da177e4 5165 * LOCKING:
cca3974e 5166 * Obtains host lock during operation.
1da177e4
LT
5167 *
5168 * RETURNS:
0cba632b 5169 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
5170 */
5171
7d12e780 5172irqreturn_t ata_interrupt (int irq, void *dev_instance)
1da177e4 5173{
cca3974e 5174 struct ata_host *host = dev_instance;
1da177e4
LT
5175 unsigned int i;
5176 unsigned int handled = 0;
5177 unsigned long flags;
5178
5179 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 5180 spin_lock_irqsave(&host->lock, flags);
1da177e4 5181
cca3974e 5182 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5183 struct ata_port *ap;
5184
cca3974e 5185 ap = host->ports[i];
c1389503 5186 if (ap &&
029f5468 5187 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
5188 struct ata_queued_cmd *qc;
5189
5190 qc = ata_qc_from_tag(ap, ap->active_tag);
312f7da2 5191 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 5192 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
5193 handled |= ata_host_intr(ap, qc);
5194 }
5195 }
5196
cca3974e 5197 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
5198
5199 return IRQ_RETVAL(handled);
5200}
5201
34bf2170
TH
5202/**
5203 * sata_scr_valid - test whether SCRs are accessible
5204 * @ap: ATA port to test SCR accessibility for
5205 *
5206 * Test whether SCRs are accessible for @ap.
5207 *
5208 * LOCKING:
5209 * None.
5210 *
5211 * RETURNS:
5212 * 1 if SCRs are accessible, 0 otherwise.
5213 */
5214int sata_scr_valid(struct ata_port *ap)
5215{
5216 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
5217}
5218
5219/**
5220 * sata_scr_read - read SCR register of the specified port
5221 * @ap: ATA port to read SCR for
5222 * @reg: SCR to read
5223 * @val: Place to store read value
5224 *
5225 * Read SCR register @reg of @ap into *@val. This function is
5226 * guaranteed to succeed if the cable type of the port is SATA
5227 * and the port implements ->scr_read.
5228 *
5229 * LOCKING:
5230 * None.
5231 *
5232 * RETURNS:
5233 * 0 on success, negative errno on failure.
5234 */
5235int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
5236{
5237 if (sata_scr_valid(ap)) {
5238 *val = ap->ops->scr_read(ap, reg);
5239 return 0;
5240 }
5241 return -EOPNOTSUPP;
5242}
5243
5244/**
5245 * sata_scr_write - write SCR register of the specified port
5246 * @ap: ATA port to write SCR for
5247 * @reg: SCR to write
5248 * @val: value to write
5249 *
5250 * Write @val to SCR register @reg of @ap. This function is
5251 * guaranteed to succeed if the cable type of the port is SATA
5252 * and the port implements ->scr_read.
5253 *
5254 * LOCKING:
5255 * None.
5256 *
5257 * RETURNS:
5258 * 0 on success, negative errno on failure.
5259 */
5260int sata_scr_write(struct ata_port *ap, int reg, u32 val)
5261{
5262 if (sata_scr_valid(ap)) {
5263 ap->ops->scr_write(ap, reg, val);
5264 return 0;
5265 }
5266 return -EOPNOTSUPP;
5267}
5268
5269/**
5270 * sata_scr_write_flush - write SCR register of the specified port and flush
5271 * @ap: ATA port to write SCR for
5272 * @reg: SCR to write
5273 * @val: value to write
5274 *
5275 * This function is identical to sata_scr_write() except that this
5276 * function performs flush after writing to the register.
5277 *
5278 * LOCKING:
5279 * None.
5280 *
5281 * RETURNS:
5282 * 0 on success, negative errno on failure.
5283 */
5284int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
5285{
5286 if (sata_scr_valid(ap)) {
5287 ap->ops->scr_write(ap, reg, val);
5288 ap->ops->scr_read(ap, reg);
5289 return 0;
5290 }
5291 return -EOPNOTSUPP;
5292}
5293
5294/**
5295 * ata_port_online - test whether the given port is online
5296 * @ap: ATA port to test
5297 *
5298 * Test whether @ap is online. Note that this function returns 0
5299 * if online status of @ap cannot be obtained, so
5300 * ata_port_online(ap) != !ata_port_offline(ap).
5301 *
5302 * LOCKING:
5303 * None.
5304 *
5305 * RETURNS:
5306 * 1 if the port online status is available and online.
5307 */
5308int ata_port_online(struct ata_port *ap)
5309{
5310 u32 sstatus;
5311
5312 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
5313 return 1;
5314 return 0;
5315}
5316
5317/**
5318 * ata_port_offline - test whether the given port is offline
5319 * @ap: ATA port to test
5320 *
5321 * Test whether @ap is offline. Note that this function returns
5322 * 0 if offline status of @ap cannot be obtained, so
5323 * ata_port_online(ap) != !ata_port_offline(ap).
5324 *
5325 * LOCKING:
5326 * None.
5327 *
5328 * RETURNS:
5329 * 1 if the port offline status is available and offline.
5330 */
5331int ata_port_offline(struct ata_port *ap)
5332{
5333 u32 sstatus;
5334
5335 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
5336 return 1;
5337 return 0;
5338}
0baab86b 5339
77b08fb5 5340int ata_flush_cache(struct ata_device *dev)
9b847548 5341{
977e6b9f 5342 unsigned int err_mask;
9b847548
JA
5343 u8 cmd;
5344
5345 if (!ata_try_flush_cache(dev))
5346 return 0;
5347
6fc49adb 5348 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
5349 cmd = ATA_CMD_FLUSH_EXT;
5350 else
5351 cmd = ATA_CMD_FLUSH;
5352
977e6b9f
TH
5353 err_mask = ata_do_simple_cmd(dev, cmd);
5354 if (err_mask) {
5355 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5356 return -EIO;
5357 }
5358
5359 return 0;
9b847548
JA
5360}
5361
6ffa01d8 5362#ifdef CONFIG_PM
cca3974e
JG
5363static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5364 unsigned int action, unsigned int ehi_flags,
5365 int wait)
500530f6
TH
5366{
5367 unsigned long flags;
5368 int i, rc;
5369
cca3974e
JG
5370 for (i = 0; i < host->n_ports; i++) {
5371 struct ata_port *ap = host->ports[i];
500530f6
TH
5372
5373 /* Previous resume operation might still be in
5374 * progress. Wait for PM_PENDING to clear.
5375 */
5376 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5377 ata_port_wait_eh(ap);
5378 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5379 }
5380
5381 /* request PM ops to EH */
5382 spin_lock_irqsave(ap->lock, flags);
5383
5384 ap->pm_mesg = mesg;
5385 if (wait) {
5386 rc = 0;
5387 ap->pm_result = &rc;
5388 }
5389
5390 ap->pflags |= ATA_PFLAG_PM_PENDING;
5391 ap->eh_info.action |= action;
5392 ap->eh_info.flags |= ehi_flags;
5393
5394 ata_port_schedule_eh(ap);
5395
5396 spin_unlock_irqrestore(ap->lock, flags);
5397
5398 /* wait and check result */
5399 if (wait) {
5400 ata_port_wait_eh(ap);
5401 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5402 if (rc)
5403 return rc;
5404 }
5405 }
5406
5407 return 0;
5408}
5409
5410/**
cca3974e
JG
5411 * ata_host_suspend - suspend host
5412 * @host: host to suspend
500530f6
TH
5413 * @mesg: PM message
5414 *
cca3974e 5415 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
5416 * function requests EH to perform PM operations and waits for EH
5417 * to finish.
5418 *
5419 * LOCKING:
5420 * Kernel thread context (may sleep).
5421 *
5422 * RETURNS:
5423 * 0 on success, -errno on failure.
5424 */
cca3974e 5425int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6
TH
5426{
5427 int i, j, rc;
5428
cca3974e 5429 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
500530f6
TH
5430 if (rc)
5431 goto fail;
5432
5433 /* EH is quiescent now. Fail if we have any ready device.
5434 * This happens if hotplug occurs between completion of device
5435 * suspension and here.
5436 */
cca3974e
JG
5437 for (i = 0; i < host->n_ports; i++) {
5438 struct ata_port *ap = host->ports[i];
500530f6
TH
5439
5440 for (j = 0; j < ATA_MAX_DEVICES; j++) {
5441 struct ata_device *dev = &ap->device[j];
5442
5443 if (ata_dev_ready(dev)) {
5444 ata_port_printk(ap, KERN_WARNING,
5445 "suspend failed, device %d "
5446 "still active\n", dev->devno);
5447 rc = -EBUSY;
5448 goto fail;
5449 }
5450 }
5451 }
5452
cca3974e 5453 host->dev->power.power_state = mesg;
500530f6
TH
5454 return 0;
5455
5456 fail:
cca3974e 5457 ata_host_resume(host);
500530f6
TH
5458 return rc;
5459}
5460
5461/**
cca3974e
JG
5462 * ata_host_resume - resume host
5463 * @host: host to resume
500530f6 5464 *
cca3974e 5465 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
5466 * function requests EH to perform PM operations and returns.
5467 * Note that all resume operations are performed parallely.
5468 *
5469 * LOCKING:
5470 * Kernel thread context (may sleep).
5471 */
cca3974e 5472void ata_host_resume(struct ata_host *host)
500530f6 5473{
cca3974e
JG
5474 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
5475 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5476 host->dev->power.power_state = PMSG_ON;
500530f6 5477}
6ffa01d8 5478#endif
500530f6 5479
c893a3ae
RD
5480/**
5481 * ata_port_start - Set port up for dma.
5482 * @ap: Port to initialize
5483 *
5484 * Called just after data structures for each port are
5485 * initialized. Allocates space for PRD table.
5486 *
5487 * May be used as the port_start() entry in ata_port_operations.
5488 *
5489 * LOCKING:
5490 * Inherited from caller.
5491 */
f0d36efd 5492int ata_port_start(struct ata_port *ap)
1da177e4 5493{
2f1f610b 5494 struct device *dev = ap->dev;
6037d6bb 5495 int rc;
1da177e4 5496
f0d36efd
TH
5497 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5498 GFP_KERNEL);
1da177e4
LT
5499 if (!ap->prd)
5500 return -ENOMEM;
5501
6037d6bb 5502 rc = ata_pad_alloc(ap, dev);
f0d36efd 5503 if (rc)
6037d6bb 5504 return rc;
1da177e4 5505
f0d36efd
TH
5506 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
5507 (unsigned long long)ap->prd_dma);
1da177e4
LT
5508 return 0;
5509}
5510
3ef3b43d
TH
5511/**
5512 * ata_dev_init - Initialize an ata_device structure
5513 * @dev: Device structure to initialize
5514 *
5515 * Initialize @dev in preparation for probing.
5516 *
5517 * LOCKING:
5518 * Inherited from caller.
5519 */
5520void ata_dev_init(struct ata_device *dev)
5521{
5522 struct ata_port *ap = dev->ap;
72fa4b74
TH
5523 unsigned long flags;
5524
5a04bf4b
TH
5525 /* SATA spd limit is bound to the first device */
5526 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5527
72fa4b74
TH
5528 /* High bits of dev->flags are used to record warm plug
5529 * requests which occur asynchronously. Synchronize using
cca3974e 5530 * host lock.
72fa4b74 5531 */
ba6a1308 5532 spin_lock_irqsave(ap->lock, flags);
72fa4b74 5533 dev->flags &= ~ATA_DFLAG_INIT_MASK;
ba6a1308 5534 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 5535
72fa4b74
TH
5536 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5537 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
5538 dev->pio_mask = UINT_MAX;
5539 dev->mwdma_mask = UINT_MAX;
5540 dev->udma_mask = UINT_MAX;
5541}
5542
1da177e4 5543/**
155a8a9c 5544 * ata_port_init - Initialize an ata_port structure
1da177e4 5545 * @ap: Structure to initialize
cca3974e 5546 * @host: Collection of hosts to which @ap belongs
1da177e4
LT
5547 * @ent: Probe information provided by low-level driver
5548 * @port_no: Port number associated with this ata_port
5549 *
155a8a9c 5550 * Initialize a new ata_port structure.
0cba632b 5551 *
1da177e4 5552 * LOCKING:
0cba632b 5553 * Inherited from caller.
1da177e4 5554 */
cca3974e 5555void ata_port_init(struct ata_port *ap, struct ata_host *host,
155a8a9c 5556 const struct ata_probe_ent *ent, unsigned int port_no)
1da177e4
LT
5557{
5558 unsigned int i;
5559
cca3974e 5560 ap->lock = &host->lock;
198e0fed 5561 ap->flags = ATA_FLAG_DISABLED;
44877b4e 5562 ap->print_id = ata_print_id++;
1da177e4 5563 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 5564 ap->host = host;
2f1f610b 5565 ap->dev = ent->dev;
1da177e4 5566 ap->port_no = port_no;
fea63e38
TH
5567 if (port_no == 1 && ent->pinfo2) {
5568 ap->pio_mask = ent->pinfo2->pio_mask;
5569 ap->mwdma_mask = ent->pinfo2->mwdma_mask;
5570 ap->udma_mask = ent->pinfo2->udma_mask;
5571 ap->flags |= ent->pinfo2->flags;
5572 ap->ops = ent->pinfo2->port_ops;
5573 } else {
5574 ap->pio_mask = ent->pio_mask;
5575 ap->mwdma_mask = ent->mwdma_mask;
5576 ap->udma_mask = ent->udma_mask;
5577 ap->flags |= ent->port_flags;
5578 ap->ops = ent->port_ops;
5579 }
5a04bf4b 5580 ap->hw_sata_spd_limit = UINT_MAX;
1da177e4
LT
5581 ap->active_tag = ATA_TAG_POISON;
5582 ap->last_ctl = 0xFF;
bd5d825c
BP
5583
5584#if defined(ATA_VERBOSE_DEBUG)
5585 /* turn on all debugging levels */
5586 ap->msg_enable = 0x00FF;
5587#elif defined(ATA_DEBUG)
5588 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 5589#else
0dd4b21f 5590 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 5591#endif
1da177e4 5592
65f27f38
DH
5593 INIT_DELAYED_WORK(&ap->port_task, NULL);
5594 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5595 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 5596 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 5597 init_waitqueue_head(&ap->eh_wait_q);
1da177e4 5598
838df628
TH
5599 /* set cable type */
5600 ap->cbl = ATA_CBL_NONE;
5601 if (ap->flags & ATA_FLAG_SATA)
5602 ap->cbl = ATA_CBL_SATA;
5603
acf356b1
TH
5604 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5605 struct ata_device *dev = &ap->device[i];
38d87234 5606 dev->ap = ap;
72fa4b74 5607 dev->devno = i;
3ef3b43d 5608 ata_dev_init(dev);
acf356b1 5609 }
1da177e4
LT
5610
5611#ifdef ATA_IRQ_TRAP
5612 ap->stats.unhandled_irq = 1;
5613 ap->stats.idle_irq = 1;
5614#endif
5615
5616 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
5617}
5618
155a8a9c 5619/**
4608c160
TH
5620 * ata_port_init_shost - Initialize SCSI host associated with ATA port
5621 * @ap: ATA port to initialize SCSI host for
5622 * @shost: SCSI host associated with @ap
155a8a9c 5623 *
4608c160 5624 * Initialize SCSI host @shost associated with ATA port @ap.
155a8a9c
BK
5625 *
5626 * LOCKING:
5627 * Inherited from caller.
5628 */
4608c160 5629static void ata_port_init_shost(struct ata_port *ap, struct Scsi_Host *shost)
155a8a9c 5630{
cca3974e 5631 ap->scsi_host = shost;
155a8a9c 5632
44877b4e 5633 shost->unique_id = ap->print_id;
4608c160
TH
5634 shost->max_id = 16;
5635 shost->max_lun = 1;
5636 shost->max_channel = 1;
5637 shost->max_cmd_len = 12;
155a8a9c
BK
5638}
5639
1da177e4 5640/**
996139f1 5641 * ata_port_add - Attach low-level ATA driver to system
1da177e4 5642 * @ent: Information provided by low-level driver
cca3974e 5643 * @host: Collections of ports to which we add
1da177e4
LT
5644 * @port_no: Port number associated with this host
5645 *
0cba632b
JG
5646 * Attach low-level ATA driver to system.
5647 *
1da177e4 5648 * LOCKING:
0cba632b 5649 * PCI/etc. bus probe sem.
1da177e4
LT
5650 *
5651 * RETURNS:
0cba632b 5652 * New ata_port on success, for NULL on error.
1da177e4 5653 */
996139f1 5654static struct ata_port * ata_port_add(const struct ata_probe_ent *ent,
cca3974e 5655 struct ata_host *host,
1da177e4
LT
5656 unsigned int port_no)
5657{
996139f1 5658 struct Scsi_Host *shost;
1da177e4 5659 struct ata_port *ap;
1da177e4
LT
5660
5661 DPRINTK("ENTER\n");
aec5c3c1 5662
52783c5d 5663 if (!ent->port_ops->error_handler &&
cca3974e 5664 !(ent->port_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
aec5c3c1
TH
5665 printk(KERN_ERR "ata%u: no reset mechanism available\n",
5666 port_no);
5667 return NULL;
5668 }
5669
996139f1
JG
5670 shost = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
5671 if (!shost)
1da177e4
LT
5672 return NULL;
5673
996139f1 5674 shost->transportt = &ata_scsi_transport_template;
30afc84c 5675
996139f1 5676 ap = ata_shost_to_port(shost);
1da177e4 5677
cca3974e 5678 ata_port_init(ap, host, ent, port_no);
996139f1 5679 ata_port_init_shost(ap, shost);
1da177e4 5680
1da177e4 5681 return ap;
1da177e4
LT
5682}
5683
f0d36efd
TH
5684static void ata_host_release(struct device *gendev, void *res)
5685{
5686 struct ata_host *host = dev_get_drvdata(gendev);
5687 int i;
5688
5689 for (i = 0; i < host->n_ports; i++) {
5690 struct ata_port *ap = host->ports[i];
5691
1aa506e4 5692 if (ap && ap->ops->port_stop)
f0d36efd 5693 ap->ops->port_stop(ap);
f0d36efd
TH
5694 }
5695
5696 if (host->ops->host_stop)
5697 host->ops->host_stop(host);
1aa56cca 5698
1aa506e4
TH
5699 for (i = 0; i < host->n_ports; i++) {
5700 struct ata_port *ap = host->ports[i];
5701
5702 if (ap)
5703 scsi_host_put(ap->scsi_host);
5704
5705 host->ports[i] = NULL;
5706 }
5707
1aa56cca 5708 dev_set_drvdata(gendev, NULL);
f0d36efd
TH
5709}
5710
b03732f0 5711/**
cca3974e
JG
5712 * ata_sas_host_init - Initialize a host struct
5713 * @host: host to initialize
5714 * @dev: device host is attached to
5715 * @flags: host flags
5716 * @ops: port_ops
b03732f0
BK
5717 *
5718 * LOCKING:
5719 * PCI/etc. bus probe sem.
5720 *
5721 */
5722
cca3974e
JG
5723void ata_host_init(struct ata_host *host, struct device *dev,
5724 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 5725{
cca3974e
JG
5726 spin_lock_init(&host->lock);
5727 host->dev = dev;
5728 host->flags = flags;
5729 host->ops = ops;
b03732f0
BK
5730}
5731
1da177e4 5732/**
0cba632b
JG
5733 * ata_device_add - Register hardware device with ATA and SCSI layers
5734 * @ent: Probe information describing hardware device to be registered
5735 *
5736 * This function processes the information provided in the probe
5737 * information struct @ent, allocates the necessary ATA and SCSI
5738 * host information structures, initializes them, and registers
5739 * everything with requisite kernel subsystems.
5740 *
5741 * This function requests irqs, probes the ATA bus, and probes
5742 * the SCSI bus.
1da177e4
LT
5743 *
5744 * LOCKING:
0cba632b 5745 * PCI/etc. bus probe sem.
1da177e4
LT
5746 *
5747 * RETURNS:
0cba632b 5748 * Number of ports registered. Zero on error (no ports registered).
1da177e4 5749 */
057ace5e 5750int ata_device_add(const struct ata_probe_ent *ent)
1da177e4 5751{
6d0500df 5752 unsigned int i;
1da177e4 5753 struct device *dev = ent->dev;
cca3974e 5754 struct ata_host *host;
39b07ce6 5755 int rc;
1da177e4
LT
5756
5757 DPRINTK("ENTER\n");
f20b16ff 5758
02f076aa
AC
5759 if (ent->irq == 0) {
5760 dev_printk(KERN_ERR, dev, "is not available: No interrupt assigned.\n");
5761 return 0;
5762 }
f0d36efd
TH
5763
5764 if (!devres_open_group(dev, ata_device_add, GFP_KERNEL))
5765 return 0;
5766
1da177e4 5767 /* alloc a container for our list of ATA ports (buses) */
f0d36efd
TH
5768 host = devres_alloc(ata_host_release, sizeof(struct ata_host) +
5769 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
cca3974e 5770 if (!host)
f0d36efd
TH
5771 goto err_out;
5772 devres_add(dev, host);
5773 dev_set_drvdata(dev, host);
1da177e4 5774
cca3974e
JG
5775 ata_host_init(host, dev, ent->_host_flags, ent->port_ops);
5776 host->n_ports = ent->n_ports;
5777 host->irq = ent->irq;
5778 host->irq2 = ent->irq2;
0d5ff566 5779 host->iomap = ent->iomap;
cca3974e 5780 host->private_data = ent->private_data;
1da177e4
LT
5781
5782 /* register each port bound to this device */
cca3974e 5783 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5784 struct ata_port *ap;
5785 unsigned long xfer_mode_mask;
2ec7df04 5786 int irq_line = ent->irq;
1da177e4 5787
cca3974e 5788 ap = ata_port_add(ent, host, i);
c38778c3 5789 host->ports[i] = ap;
1da177e4
LT
5790 if (!ap)
5791 goto err_out;
5792
dd5b06c4
TH
5793 /* dummy? */
5794 if (ent->dummy_port_mask & (1 << i)) {
5795 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
5796 ap->ops = &ata_dummy_port_ops;
5797 continue;
5798 }
5799
5800 /* start port */
5801 rc = ap->ops->port_start(ap);
5802 if (rc) {
cca3974e
JG
5803 host->ports[i] = NULL;
5804 scsi_host_put(ap->scsi_host);
dd5b06c4
TH
5805 goto err_out;
5806 }
5807
2ec7df04
AC
5808 /* Report the secondary IRQ for second channel legacy */
5809 if (i == 1 && ent->irq2)
5810 irq_line = ent->irq2;
5811
1da177e4
LT
5812 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
5813 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
5814 (ap->pio_mask << ATA_SHIFT_PIO);
5815
5816 /* print per-port info to dmesg */
0d5ff566
TH
5817 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%p "
5818 "ctl 0x%p bmdma 0x%p irq %d\n",
f15a1daf
TH
5819 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
5820 ata_mode_string(xfer_mode_mask),
5821 ap->ioaddr.cmd_addr,
5822 ap->ioaddr.ctl_addr,
5823 ap->ioaddr.bmdma_addr,
2ec7df04 5824 irq_line);
1da177e4 5825
0f0a3ad3
TH
5826 /* freeze port before requesting IRQ */
5827 ata_eh_freeze_port(ap);
1da177e4
LT
5828 }
5829
2ec7df04 5830 /* obtain irq, that may be shared between channels */
f0d36efd
TH
5831 rc = devm_request_irq(dev, ent->irq, ent->port_ops->irq_handler,
5832 ent->irq_flags, DRV_NAME, host);
39b07ce6
JG
5833 if (rc) {
5834 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5835 ent->irq, rc);
1da177e4 5836 goto err_out;
39b07ce6 5837 }
1da177e4 5838
2ec7df04
AC
5839 /* do we have a second IRQ for the other channel, eg legacy mode */
5840 if (ent->irq2) {
5841 /* We will get weird core code crashes later if this is true
5842 so trap it now */
5843 BUG_ON(ent->irq == ent->irq2);
5844
f0d36efd
TH
5845 rc = devm_request_irq(dev, ent->irq2,
5846 ent->port_ops->irq_handler, ent->irq_flags,
5847 DRV_NAME, host);
2ec7df04
AC
5848 if (rc) {
5849 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5850 ent->irq2, rc);
f0d36efd 5851 goto err_out;
2ec7df04
AC
5852 }
5853 }
5854
f0d36efd 5855 /* resource acquisition complete */
b878ca5d 5856 devres_remove_group(dev, ata_device_add);
f0d36efd 5857
1da177e4
LT
5858 /* perform each probe synchronously */
5859 DPRINTK("probe begin\n");
cca3974e
JG
5860 for (i = 0; i < host->n_ports; i++) {
5861 struct ata_port *ap = host->ports[i];
5a04bf4b 5862 u32 scontrol;
1da177e4
LT
5863 int rc;
5864
5a04bf4b
TH
5865 /* init sata_spd_limit to the current value */
5866 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
5867 int spd = (scontrol >> 4) & 0xf;
5868 ap->hw_sata_spd_limit &= (1 << spd) - 1;
5869 }
5870 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5871
cca3974e 5872 rc = scsi_add_host(ap->scsi_host, dev);
1da177e4 5873 if (rc) {
f15a1daf 5874 ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
1da177e4
LT
5875 /* FIXME: do something useful here */
5876 /* FIXME: handle unconditional calls to
5877 * scsi_scan_host and ata_host_remove, below,
5878 * at the very least
5879 */
5880 }
3e706399 5881
52783c5d 5882 if (ap->ops->error_handler) {
1cdaf534 5883 struct ata_eh_info *ehi = &ap->eh_info;
3e706399
TH
5884 unsigned long flags;
5885
5886 ata_port_probe(ap);
5887
5888 /* kick EH for boot probing */
ba6a1308 5889 spin_lock_irqsave(ap->lock, flags);
3e706399 5890
1cdaf534
TH
5891 ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
5892 ehi->action |= ATA_EH_SOFTRESET;
5893 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
3e706399 5894
b51e9e5d 5895 ap->pflags |= ATA_PFLAG_LOADING;
3e706399
TH
5896 ata_port_schedule_eh(ap);
5897
ba6a1308 5898 spin_unlock_irqrestore(ap->lock, flags);
3e706399
TH
5899
5900 /* wait for EH to finish */
5901 ata_port_wait_eh(ap);
5902 } else {
44877b4e 5903 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
3e706399 5904 rc = ata_bus_probe(ap);
44877b4e 5905 DPRINTK("ata%u: bus probe end\n", ap->print_id);
3e706399
TH
5906
5907 if (rc) {
5908 /* FIXME: do something useful here?
5909 * Current libata behavior will
5910 * tear down everything when
5911 * the module is removed
5912 * or the h/w is unplugged.
5913 */
5914 }
5915 }
1da177e4
LT
5916 }
5917
5918 /* probes are done, now scan each port's disk(s) */
c893a3ae 5919 DPRINTK("host probe begin\n");
cca3974e
JG
5920 for (i = 0; i < host->n_ports; i++) {
5921 struct ata_port *ap = host->ports[i];
1da177e4 5922
644dd0cc 5923 ata_scsi_scan_host(ap);
1da177e4
LT
5924 }
5925
1da177e4
LT
5926 VPRINTK("EXIT, returning %u\n", ent->n_ports);
5927 return ent->n_ports; /* success */
5928
f0d36efd
TH
5929 err_out:
5930 devres_release_group(dev, ata_device_add);
f0d36efd 5931 VPRINTK("EXIT, returning %d\n", rc);
1da177e4
LT
5932 return 0;
5933}
5934
720ba126
TH
5935/**
5936 * ata_port_detach - Detach ATA port in prepration of device removal
5937 * @ap: ATA port to be detached
5938 *
5939 * Detach all ATA devices and the associated SCSI devices of @ap;
5940 * then, remove the associated SCSI host. @ap is guaranteed to
5941 * be quiescent on return from this function.
5942 *
5943 * LOCKING:
5944 * Kernel thread context (may sleep).
5945 */
5946void ata_port_detach(struct ata_port *ap)
5947{
5948 unsigned long flags;
5949 int i;
5950
5951 if (!ap->ops->error_handler)
c3cf30a9 5952 goto skip_eh;
720ba126
TH
5953
5954 /* tell EH we're leaving & flush EH */
ba6a1308 5955 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 5956 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 5957 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5958
5959 ata_port_wait_eh(ap);
5960
5961 /* EH is now guaranteed to see UNLOADING, so no new device
5962 * will be attached. Disable all existing devices.
5963 */
ba6a1308 5964 spin_lock_irqsave(ap->lock, flags);
720ba126
TH
5965
5966 for (i = 0; i < ATA_MAX_DEVICES; i++)
5967 ata_dev_disable(&ap->device[i]);
5968
ba6a1308 5969 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5970
5971 /* Final freeze & EH. All in-flight commands are aborted. EH
5972 * will be skipped and retrials will be terminated with bad
5973 * target.
5974 */
ba6a1308 5975 spin_lock_irqsave(ap->lock, flags);
720ba126 5976 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 5977 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5978
5979 ata_port_wait_eh(ap);
5980
5981 /* Flush hotplug task. The sequence is similar to
5982 * ata_port_flush_task().
5983 */
5984 flush_workqueue(ata_aux_wq);
5985 cancel_delayed_work(&ap->hotplug_task);
5986 flush_workqueue(ata_aux_wq);
5987
c3cf30a9 5988 skip_eh:
720ba126 5989 /* remove the associated SCSI host */
cca3974e 5990 scsi_remove_host(ap->scsi_host);
720ba126
TH
5991}
5992
0529c159
TH
5993/**
5994 * ata_host_detach - Detach all ports of an ATA host
5995 * @host: Host to detach
5996 *
5997 * Detach all ports of @host.
5998 *
5999 * LOCKING:
6000 * Kernel thread context (may sleep).
6001 */
6002void ata_host_detach(struct ata_host *host)
6003{
6004 int i;
6005
6006 for (i = 0; i < host->n_ports; i++)
6007 ata_port_detach(host->ports[i]);
6008}
6009
f6d950e2
BK
6010struct ata_probe_ent *
6011ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
6012{
6013 struct ata_probe_ent *probe_ent;
6014
4d05447e 6015 probe_ent = devm_kzalloc(dev, sizeof(*probe_ent), GFP_KERNEL);
f6d950e2
BK
6016 if (!probe_ent) {
6017 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
6018 kobject_name(&(dev->kobj)));
6019 return NULL;
6020 }
6021
6022 INIT_LIST_HEAD(&probe_ent->node);
6023 probe_ent->dev = dev;
6024
6025 probe_ent->sht = port->sht;
cca3974e 6026 probe_ent->port_flags = port->flags;
f6d950e2
BK
6027 probe_ent->pio_mask = port->pio_mask;
6028 probe_ent->mwdma_mask = port->mwdma_mask;
6029 probe_ent->udma_mask = port->udma_mask;
6030 probe_ent->port_ops = port->port_ops;
d639ca94 6031 probe_ent->private_data = port->private_data;
f6d950e2
BK
6032
6033 return probe_ent;
6034}
6035
1da177e4
LT
6036/**
6037 * ata_std_ports - initialize ioaddr with standard port offsets.
6038 * @ioaddr: IO address structure to be initialized
0baab86b
EF
6039 *
6040 * Utility function which initializes data_addr, error_addr,
6041 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
6042 * device_addr, status_addr, and command_addr to standard offsets
6043 * relative to cmd_addr.
6044 *
6045 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 6046 */
0baab86b 6047
1da177e4
LT
6048void ata_std_ports(struct ata_ioports *ioaddr)
6049{
6050 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
6051 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
6052 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
6053 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
6054 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
6055 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
6056 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
6057 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
6058 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
6059 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
6060}
6061
0baab86b 6062
374b1873
JG
6063#ifdef CONFIG_PCI
6064
1da177e4
LT
6065/**
6066 * ata_pci_remove_one - PCI layer callback for device removal
6067 * @pdev: PCI device that was removed
6068 *
b878ca5d
TH
6069 * PCI layer indicates to libata via this hook that hot-unplug or
6070 * module unload event has occurred. Detach all ports. Resource
6071 * release is handled via devres.
1da177e4
LT
6072 *
6073 * LOCKING:
6074 * Inherited from PCI layer (may sleep).
6075 */
f0d36efd 6076void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4
LT
6077{
6078 struct device *dev = pci_dev_to_dev(pdev);
cca3974e 6079 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 6080
b878ca5d 6081 ata_host_detach(host);
1da177e4
LT
6082}
6083
6084/* move to PCI subsystem */
057ace5e 6085int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
6086{
6087 unsigned long tmp = 0;
6088
6089 switch (bits->width) {
6090 case 1: {
6091 u8 tmp8 = 0;
6092 pci_read_config_byte(pdev, bits->reg, &tmp8);
6093 tmp = tmp8;
6094 break;
6095 }
6096 case 2: {
6097 u16 tmp16 = 0;
6098 pci_read_config_word(pdev, bits->reg, &tmp16);
6099 tmp = tmp16;
6100 break;
6101 }
6102 case 4: {
6103 u32 tmp32 = 0;
6104 pci_read_config_dword(pdev, bits->reg, &tmp32);
6105 tmp = tmp32;
6106 break;
6107 }
6108
6109 default:
6110 return -EINVAL;
6111 }
6112
6113 tmp &= bits->mask;
6114
6115 return (tmp == bits->val) ? 1 : 0;
6116}
9b847548 6117
6ffa01d8 6118#ifdef CONFIG_PM
3c5100c1 6119void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
6120{
6121 pci_save_state(pdev);
4c90d971 6122 pci_disable_device(pdev);
500530f6 6123
4c90d971 6124 if (mesg.event == PM_EVENT_SUSPEND)
500530f6 6125 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
6126}
6127
553c4aa6 6128int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 6129{
553c4aa6
TH
6130 int rc;
6131
9b847548
JA
6132 pci_set_power_state(pdev, PCI_D0);
6133 pci_restore_state(pdev);
553c4aa6 6134
b878ca5d 6135 rc = pcim_enable_device(pdev);
553c4aa6
TH
6136 if (rc) {
6137 dev_printk(KERN_ERR, &pdev->dev,
6138 "failed to enable device after resume (%d)\n", rc);
6139 return rc;
6140 }
6141
9b847548 6142 pci_set_master(pdev);
553c4aa6 6143 return 0;
500530f6
TH
6144}
6145
3c5100c1 6146int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 6147{
cca3974e 6148 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
6149 int rc = 0;
6150
cca3974e 6151 rc = ata_host_suspend(host, mesg);
500530f6
TH
6152 if (rc)
6153 return rc;
6154
3c5100c1 6155 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
6156
6157 return 0;
6158}
6159
6160int ata_pci_device_resume(struct pci_dev *pdev)
6161{
cca3974e 6162 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 6163 int rc;
500530f6 6164
553c4aa6
TH
6165 rc = ata_pci_device_do_resume(pdev);
6166 if (rc == 0)
6167 ata_host_resume(host);
6168 return rc;
9b847548 6169}
6ffa01d8
TH
6170#endif /* CONFIG_PM */
6171
1da177e4
LT
6172#endif /* CONFIG_PCI */
6173
6174
1da177e4
LT
6175static int __init ata_init(void)
6176{
a8601e5f 6177 ata_probe_timeout *= HZ;
1da177e4
LT
6178 ata_wq = create_workqueue("ata");
6179 if (!ata_wq)
6180 return -ENOMEM;
6181
453b07ac
TH
6182 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6183 if (!ata_aux_wq) {
6184 destroy_workqueue(ata_wq);
6185 return -ENOMEM;
6186 }
6187
1da177e4
LT
6188 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6189 return 0;
6190}
6191
6192static void __exit ata_exit(void)
6193{
6194 destroy_workqueue(ata_wq);
453b07ac 6195 destroy_workqueue(ata_aux_wq);
1da177e4
LT
6196}
6197
a4625085 6198subsys_initcall(ata_init);
1da177e4
LT
6199module_exit(ata_exit);
6200
67846b30 6201static unsigned long ratelimit_time;
34af946a 6202static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
6203
6204int ata_ratelimit(void)
6205{
6206 int rc;
6207 unsigned long flags;
6208
6209 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6210
6211 if (time_after(jiffies, ratelimit_time)) {
6212 rc = 1;
6213 ratelimit_time = jiffies + (HZ/5);
6214 } else
6215 rc = 0;
6216
6217 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6218
6219 return rc;
6220}
6221
c22daff4
TH
6222/**
6223 * ata_wait_register - wait until register value changes
6224 * @reg: IO-mapped register
6225 * @mask: Mask to apply to read register value
6226 * @val: Wait condition
6227 * @interval_msec: polling interval in milliseconds
6228 * @timeout_msec: timeout in milliseconds
6229 *
6230 * Waiting for some bits of register to change is a common
6231 * operation for ATA controllers. This function reads 32bit LE
6232 * IO-mapped register @reg and tests for the following condition.
6233 *
6234 * (*@reg & mask) != val
6235 *
6236 * If the condition is met, it returns; otherwise, the process is
6237 * repeated after @interval_msec until timeout.
6238 *
6239 * LOCKING:
6240 * Kernel thread context (may sleep)
6241 *
6242 * RETURNS:
6243 * The final register value.
6244 */
6245u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6246 unsigned long interval_msec,
6247 unsigned long timeout_msec)
6248{
6249 unsigned long timeout;
6250 u32 tmp;
6251
6252 tmp = ioread32(reg);
6253
6254 /* Calculate timeout _after_ the first read to make sure
6255 * preceding writes reach the controller before starting to
6256 * eat away the timeout.
6257 */
6258 timeout = jiffies + (timeout_msec * HZ) / 1000;
6259
6260 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6261 msleep(interval_msec);
6262 tmp = ioread32(reg);
6263 }
6264
6265 return tmp;
6266}
6267
dd5b06c4
TH
6268/*
6269 * Dummy port_ops
6270 */
6271static void ata_dummy_noret(struct ata_port *ap) { }
6272static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
6273static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6274
6275static u8 ata_dummy_check_status(struct ata_port *ap)
6276{
6277 return ATA_DRDY;
6278}
6279
6280static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6281{
6282 return AC_ERR_SYSTEM;
6283}
6284
6285const struct ata_port_operations ata_dummy_port_ops = {
6286 .port_disable = ata_port_disable,
6287 .check_status = ata_dummy_check_status,
6288 .check_altstatus = ata_dummy_check_status,
6289 .dev_select = ata_noop_dev_select,
6290 .qc_prep = ata_noop_qc_prep,
6291 .qc_issue = ata_dummy_qc_issue,
6292 .freeze = ata_dummy_noret,
6293 .thaw = ata_dummy_noret,
6294 .error_handler = ata_dummy_noret,
6295 .post_internal_cmd = ata_dummy_qc_noret,
6296 .irq_clear = ata_dummy_noret,
6297 .port_start = ata_dummy_ret0,
6298 .port_stop = ata_dummy_noret,
6299};
6300
1da177e4
LT
6301/*
6302 * libata is essentially a library of internal helper functions for
6303 * low-level ATA host controller drivers. As such, the API/ABI is
6304 * likely to change as new drivers are added and updated.
6305 * Do not depend on ABI/API stability.
6306 */
6307
e9c83914
TH
6308EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6309EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6310EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 6311EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
1da177e4
LT
6312EXPORT_SYMBOL_GPL(ata_std_bios_param);
6313EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 6314EXPORT_SYMBOL_GPL(ata_host_init);
1da177e4 6315EXPORT_SYMBOL_GPL(ata_device_add);
0529c159 6316EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4
LT
6317EXPORT_SYMBOL_GPL(ata_sg_init);
6318EXPORT_SYMBOL_GPL(ata_sg_init_one);
9a1004d0 6319EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 6320EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 6321EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 6322EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
6323EXPORT_SYMBOL_GPL(ata_tf_load);
6324EXPORT_SYMBOL_GPL(ata_tf_read);
6325EXPORT_SYMBOL_GPL(ata_noop_dev_select);
6326EXPORT_SYMBOL_GPL(ata_std_dev_select);
6327EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6328EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6329EXPORT_SYMBOL_GPL(ata_check_status);
6330EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
6331EXPORT_SYMBOL_GPL(ata_exec_command);
6332EXPORT_SYMBOL_GPL(ata_port_start);
1da177e4 6333EXPORT_SYMBOL_GPL(ata_interrupt);
0d5ff566
TH
6334EXPORT_SYMBOL_GPL(ata_data_xfer);
6335EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
1da177e4 6336EXPORT_SYMBOL_GPL(ata_qc_prep);
e46834cd 6337EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
6338EXPORT_SYMBOL_GPL(ata_bmdma_setup);
6339EXPORT_SYMBOL_GPL(ata_bmdma_start);
6340EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
6341EXPORT_SYMBOL_GPL(ata_bmdma_status);
6342EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
6343EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
6344EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
6345EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
6346EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
6347EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 6348EXPORT_SYMBOL_GPL(ata_port_probe);
10305f0f 6349EXPORT_SYMBOL_GPL(ata_dev_disable);
3c567b7d 6350EXPORT_SYMBOL_GPL(sata_set_spd);
d7bb4cc7
TH
6351EXPORT_SYMBOL_GPL(sata_phy_debounce);
6352EXPORT_SYMBOL_GPL(sata_phy_resume);
1da177e4
LT
6353EXPORT_SYMBOL_GPL(sata_phy_reset);
6354EXPORT_SYMBOL_GPL(__sata_phy_reset);
6355EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 6356EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 6357EXPORT_SYMBOL_GPL(ata_std_softreset);
b6103f6d 6358EXPORT_SYMBOL_GPL(sata_port_hardreset);
c2bd5804
TH
6359EXPORT_SYMBOL_GPL(sata_std_hardreset);
6360EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
6361EXPORT_SYMBOL_GPL(ata_dev_classify);
6362EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 6363EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 6364EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 6365EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 6366EXPORT_SYMBOL_GPL(ata_busy_sleep);
86e45b6b 6367EXPORT_SYMBOL_GPL(ata_port_queue_task);
1da177e4
LT
6368EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6369EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 6370EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 6371EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 6372EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4 6373EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
6374EXPORT_SYMBOL_GPL(sata_scr_valid);
6375EXPORT_SYMBOL_GPL(sata_scr_read);
6376EXPORT_SYMBOL_GPL(sata_scr_write);
6377EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6378EXPORT_SYMBOL_GPL(ata_port_online);
6379EXPORT_SYMBOL_GPL(ata_port_offline);
6ffa01d8 6380#ifdef CONFIG_PM
cca3974e
JG
6381EXPORT_SYMBOL_GPL(ata_host_suspend);
6382EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 6383#endif /* CONFIG_PM */
6a62a04d
TH
6384EXPORT_SYMBOL_GPL(ata_id_string);
6385EXPORT_SYMBOL_GPL(ata_id_c_string);
10305f0f 6386EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
6919a0a6 6387EXPORT_SYMBOL_GPL(ata_device_blacklisted);
1da177e4
LT
6388EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6389
1bc4ccff 6390EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
452503f9
AC
6391EXPORT_SYMBOL_GPL(ata_timing_compute);
6392EXPORT_SYMBOL_GPL(ata_timing_merge);
6393
1da177e4
LT
6394#ifdef CONFIG_PCI
6395EXPORT_SYMBOL_GPL(pci_test_config_bits);
6396EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
6397EXPORT_SYMBOL_GPL(ata_pci_init_one);
6398EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6ffa01d8 6399#ifdef CONFIG_PM
500530f6
TH
6400EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6401EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
6402EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6403EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 6404#endif /* CONFIG_PM */
67951ade
AC
6405EXPORT_SYMBOL_GPL(ata_pci_default_filter);
6406EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 6407#endif /* CONFIG_PCI */
9b847548 6408
6ffa01d8 6409#ifdef CONFIG_PM
9b847548
JA
6410EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
6411EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
6ffa01d8 6412#endif /* CONFIG_PM */
ece1d636 6413
ece1d636 6414EXPORT_SYMBOL_GPL(ata_eng_timeout);
7b70fc03
TH
6415EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6416EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499
TH
6417EXPORT_SYMBOL_GPL(ata_port_freeze);
6418EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6419EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
6420EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6421EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 6422EXPORT_SYMBOL_GPL(ata_do_eh);
83625006
AI
6423EXPORT_SYMBOL_GPL(ata_irq_on);
6424EXPORT_SYMBOL_GPL(ata_dummy_irq_on);
6425EXPORT_SYMBOL_GPL(ata_irq_ack);
6426EXPORT_SYMBOL_GPL(ata_dummy_irq_ack);
a619f981 6427EXPORT_SYMBOL_GPL(ata_dev_try_classify);