]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/ata/libata-core.c
sata_sil24: kill unused local variable idx in sil24_fill_sg()
[net-next-2.6.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
1da177e4
LT
33 */
34
1da177e4
LT
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/mm.h>
41#include <linux/highmem.h>
42#include <linux/spinlock.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/timer.h>
46#include <linux/interrupt.h>
47#include <linux/completion.h>
48#include <linux/suspend.h>
49#include <linux/workqueue.h>
67846b30 50#include <linux/jiffies.h>
378f058c 51#include <linux/scatterlist.h>
1da177e4 52#include <scsi/scsi.h>
193515d5 53#include <scsi/scsi_cmnd.h>
1da177e4
LT
54#include <scsi/scsi_host.h>
55#include <linux/libata.h>
56#include <asm/io.h>
57#include <asm/semaphore.h>
58#include <asm/byteorder.h>
59
60#include "libata.h"
61
cb48cab7 62#define DRV_VERSION "2.20" /* must be exactly four chars */
fda0efc5
JG
63
64
d7bb4cc7 65/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
66const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
67const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
68const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 69
3373efd8
TH
70static unsigned int ata_dev_init_params(struct ata_device *dev,
71 u16 heads, u16 sectors);
72static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
73static void ata_dev_xfermask(struct ata_device *dev);
1da177e4 74
44877b4e 75static unsigned int ata_print_id = 1;
1da177e4
LT
76static struct workqueue_struct *ata_wq;
77
453b07ac
TH
78struct workqueue_struct *ata_aux_wq;
79
418dc1f5 80int atapi_enabled = 1;
1623c81e
JG
81module_param(atapi_enabled, int, 0444);
82MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
83
95de719a
AL
84int atapi_dmadir = 0;
85module_param(atapi_dmadir, int, 0444);
86MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
87
c3c013a2
JG
88int libata_fua = 0;
89module_param_named(fua, libata_fua, int, 0444);
90MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
91
a8601e5f
AM
92static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
93module_param(ata_probe_timeout, int, 0444);
94MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
95
11ef697b
KCA
96int noacpi;
97module_param(noacpi, int, 0444);
98MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
99
1da177e4
LT
100MODULE_AUTHOR("Jeff Garzik");
101MODULE_DESCRIPTION("Library module for ATA devices");
102MODULE_LICENSE("GPL");
103MODULE_VERSION(DRV_VERSION);
104
0baab86b 105
1da177e4
LT
106/**
107 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
108 * @tf: Taskfile to convert
109 * @fis: Buffer into which data will output
110 * @pmp: Port multiplier port
111 *
112 * Converts a standard ATA taskfile to a Serial ATA
113 * FIS structure (Register - Host to Device).
114 *
115 * LOCKING:
116 * Inherited from caller.
117 */
118
057ace5e 119void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
1da177e4
LT
120{
121 fis[0] = 0x27; /* Register - Host to Device FIS */
122 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
123 bit 7 indicates Command FIS */
124 fis[2] = tf->command;
125 fis[3] = tf->feature;
126
127 fis[4] = tf->lbal;
128 fis[5] = tf->lbam;
129 fis[6] = tf->lbah;
130 fis[7] = tf->device;
131
132 fis[8] = tf->hob_lbal;
133 fis[9] = tf->hob_lbam;
134 fis[10] = tf->hob_lbah;
135 fis[11] = tf->hob_feature;
136
137 fis[12] = tf->nsect;
138 fis[13] = tf->hob_nsect;
139 fis[14] = 0;
140 fis[15] = tf->ctl;
141
142 fis[16] = 0;
143 fis[17] = 0;
144 fis[18] = 0;
145 fis[19] = 0;
146}
147
148/**
149 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
150 * @fis: Buffer from which data will be input
151 * @tf: Taskfile to output
152 *
e12a1be6 153 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
154 *
155 * LOCKING:
156 * Inherited from caller.
157 */
158
057ace5e 159void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
160{
161 tf->command = fis[2]; /* status */
162 tf->feature = fis[3]; /* error */
163
164 tf->lbal = fis[4];
165 tf->lbam = fis[5];
166 tf->lbah = fis[6];
167 tf->device = fis[7];
168
169 tf->hob_lbal = fis[8];
170 tf->hob_lbam = fis[9];
171 tf->hob_lbah = fis[10];
172
173 tf->nsect = fis[12];
174 tf->hob_nsect = fis[13];
175}
176
8cbd6df1
AL
177static const u8 ata_rw_cmds[] = {
178 /* pio multi */
179 ATA_CMD_READ_MULTI,
180 ATA_CMD_WRITE_MULTI,
181 ATA_CMD_READ_MULTI_EXT,
182 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
183 0,
184 0,
185 0,
186 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
187 /* pio */
188 ATA_CMD_PIO_READ,
189 ATA_CMD_PIO_WRITE,
190 ATA_CMD_PIO_READ_EXT,
191 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
192 0,
193 0,
194 0,
195 0,
8cbd6df1
AL
196 /* dma */
197 ATA_CMD_READ,
198 ATA_CMD_WRITE,
199 ATA_CMD_READ_EXT,
9a3dccc4
TH
200 ATA_CMD_WRITE_EXT,
201 0,
202 0,
203 0,
204 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 205};
1da177e4
LT
206
207/**
8cbd6df1 208 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
209 * @tf: command to examine and configure
210 * @dev: device tf belongs to
1da177e4 211 *
2e9edbf8 212 * Examine the device configuration and tf->flags to calculate
8cbd6df1 213 * the proper read/write commands and protocol to use.
1da177e4
LT
214 *
215 * LOCKING:
216 * caller.
217 */
bd056d7e 218static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 219{
9a3dccc4 220 u8 cmd;
1da177e4 221
9a3dccc4 222 int index, fua, lba48, write;
2e9edbf8 223
9a3dccc4 224 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
225 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
226 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 227
8cbd6df1
AL
228 if (dev->flags & ATA_DFLAG_PIO) {
229 tf->protocol = ATA_PROT_PIO;
9a3dccc4 230 index = dev->multi_count ? 0 : 8;
bd056d7e 231 } else if (lba48 && (dev->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
232 /* Unable to use DMA due to host limitation */
233 tf->protocol = ATA_PROT_PIO;
0565c26d 234 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
235 } else {
236 tf->protocol = ATA_PROT_DMA;
9a3dccc4 237 index = 16;
8cbd6df1 238 }
1da177e4 239
9a3dccc4
TH
240 cmd = ata_rw_cmds[index + fua + lba48 + write];
241 if (cmd) {
242 tf->command = cmd;
243 return 0;
244 }
245 return -1;
1da177e4
LT
246}
247
35b649fe
TH
248/**
249 * ata_tf_read_block - Read block address from ATA taskfile
250 * @tf: ATA taskfile of interest
251 * @dev: ATA device @tf belongs to
252 *
253 * LOCKING:
254 * None.
255 *
256 * Read block address from @tf. This function can handle all
257 * three address formats - LBA, LBA48 and CHS. tf->protocol and
258 * flags select the address format to use.
259 *
260 * RETURNS:
261 * Block address read from @tf.
262 */
263u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
264{
265 u64 block = 0;
266
267 if (tf->flags & ATA_TFLAG_LBA) {
268 if (tf->flags & ATA_TFLAG_LBA48) {
269 block |= (u64)tf->hob_lbah << 40;
270 block |= (u64)tf->hob_lbam << 32;
271 block |= tf->hob_lbal << 24;
272 } else
273 block |= (tf->device & 0xf) << 24;
274
275 block |= tf->lbah << 16;
276 block |= tf->lbam << 8;
277 block |= tf->lbal;
278 } else {
279 u32 cyl, head, sect;
280
281 cyl = tf->lbam | (tf->lbah << 8);
282 head = tf->device & 0xf;
283 sect = tf->lbal;
284
285 block = (cyl * dev->heads + head) * dev->sectors + sect;
286 }
287
288 return block;
289}
290
bd056d7e
TH
291/**
292 * ata_build_rw_tf - Build ATA taskfile for given read/write request
293 * @tf: Target ATA taskfile
294 * @dev: ATA device @tf belongs to
295 * @block: Block address
296 * @n_block: Number of blocks
297 * @tf_flags: RW/FUA etc...
298 * @tag: tag
299 *
300 * LOCKING:
301 * None.
302 *
303 * Build ATA taskfile @tf for read/write request described by
304 * @block, @n_block, @tf_flags and @tag on @dev.
305 *
306 * RETURNS:
307 *
308 * 0 on success, -ERANGE if the request is too large for @dev,
309 * -EINVAL if the request is invalid.
310 */
311int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
312 u64 block, u32 n_block, unsigned int tf_flags,
313 unsigned int tag)
314{
315 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
316 tf->flags |= tf_flags;
317
6d1245bf 318 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
319 /* yay, NCQ */
320 if (!lba_48_ok(block, n_block))
321 return -ERANGE;
322
323 tf->protocol = ATA_PROT_NCQ;
324 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
325
326 if (tf->flags & ATA_TFLAG_WRITE)
327 tf->command = ATA_CMD_FPDMA_WRITE;
328 else
329 tf->command = ATA_CMD_FPDMA_READ;
330
331 tf->nsect = tag << 3;
332 tf->hob_feature = (n_block >> 8) & 0xff;
333 tf->feature = n_block & 0xff;
334
335 tf->hob_lbah = (block >> 40) & 0xff;
336 tf->hob_lbam = (block >> 32) & 0xff;
337 tf->hob_lbal = (block >> 24) & 0xff;
338 tf->lbah = (block >> 16) & 0xff;
339 tf->lbam = (block >> 8) & 0xff;
340 tf->lbal = block & 0xff;
341
342 tf->device = 1 << 6;
343 if (tf->flags & ATA_TFLAG_FUA)
344 tf->device |= 1 << 7;
345 } else if (dev->flags & ATA_DFLAG_LBA) {
346 tf->flags |= ATA_TFLAG_LBA;
347
348 if (lba_28_ok(block, n_block)) {
349 /* use LBA28 */
350 tf->device |= (block >> 24) & 0xf;
351 } else if (lba_48_ok(block, n_block)) {
352 if (!(dev->flags & ATA_DFLAG_LBA48))
353 return -ERANGE;
354
355 /* use LBA48 */
356 tf->flags |= ATA_TFLAG_LBA48;
357
358 tf->hob_nsect = (n_block >> 8) & 0xff;
359
360 tf->hob_lbah = (block >> 40) & 0xff;
361 tf->hob_lbam = (block >> 32) & 0xff;
362 tf->hob_lbal = (block >> 24) & 0xff;
363 } else
364 /* request too large even for LBA48 */
365 return -ERANGE;
366
367 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
368 return -EINVAL;
369
370 tf->nsect = n_block & 0xff;
371
372 tf->lbah = (block >> 16) & 0xff;
373 tf->lbam = (block >> 8) & 0xff;
374 tf->lbal = block & 0xff;
375
376 tf->device |= ATA_LBA;
377 } else {
378 /* CHS */
379 u32 sect, head, cyl, track;
380
381 /* The request -may- be too large for CHS addressing. */
382 if (!lba_28_ok(block, n_block))
383 return -ERANGE;
384
385 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
386 return -EINVAL;
387
388 /* Convert LBA to CHS */
389 track = (u32)block / dev->sectors;
390 cyl = track / dev->heads;
391 head = track % dev->heads;
392 sect = (u32)block % dev->sectors + 1;
393
394 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
395 (u32)block, track, cyl, head, sect);
396
397 /* Check whether the converted CHS can fit.
398 Cylinder: 0-65535
399 Head: 0-15
400 Sector: 1-255*/
401 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
402 return -ERANGE;
403
404 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
405 tf->lbal = sect;
406 tf->lbam = cyl;
407 tf->lbah = cyl >> 8;
408 tf->device |= head;
409 }
410
411 return 0;
412}
413
cb95d562
TH
414/**
415 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
416 * @pio_mask: pio_mask
417 * @mwdma_mask: mwdma_mask
418 * @udma_mask: udma_mask
419 *
420 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
421 * unsigned int xfer_mask.
422 *
423 * LOCKING:
424 * None.
425 *
426 * RETURNS:
427 * Packed xfer_mask.
428 */
429static unsigned int ata_pack_xfermask(unsigned int pio_mask,
430 unsigned int mwdma_mask,
431 unsigned int udma_mask)
432{
433 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
434 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
435 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
436}
437
c0489e4e
TH
438/**
439 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
440 * @xfer_mask: xfer_mask to unpack
441 * @pio_mask: resulting pio_mask
442 * @mwdma_mask: resulting mwdma_mask
443 * @udma_mask: resulting udma_mask
444 *
445 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
446 * Any NULL distination masks will be ignored.
447 */
448static void ata_unpack_xfermask(unsigned int xfer_mask,
449 unsigned int *pio_mask,
450 unsigned int *mwdma_mask,
451 unsigned int *udma_mask)
452{
453 if (pio_mask)
454 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
455 if (mwdma_mask)
456 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
457 if (udma_mask)
458 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
459}
460
cb95d562 461static const struct ata_xfer_ent {
be9a50c8 462 int shift, bits;
cb95d562
TH
463 u8 base;
464} ata_xfer_tbl[] = {
465 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
466 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
467 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
468 { -1, },
469};
470
471/**
472 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
473 * @xfer_mask: xfer_mask of interest
474 *
475 * Return matching XFER_* value for @xfer_mask. Only the highest
476 * bit of @xfer_mask is considered.
477 *
478 * LOCKING:
479 * None.
480 *
481 * RETURNS:
482 * Matching XFER_* value, 0 if no match found.
483 */
484static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
485{
486 int highbit = fls(xfer_mask) - 1;
487 const struct ata_xfer_ent *ent;
488
489 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
490 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
491 return ent->base + highbit - ent->shift;
492 return 0;
493}
494
495/**
496 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
497 * @xfer_mode: XFER_* of interest
498 *
499 * Return matching xfer_mask for @xfer_mode.
500 *
501 * LOCKING:
502 * None.
503 *
504 * RETURNS:
505 * Matching xfer_mask, 0 if no match found.
506 */
507static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
508{
509 const struct ata_xfer_ent *ent;
510
511 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
512 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
513 return 1 << (ent->shift + xfer_mode - ent->base);
514 return 0;
515}
516
517/**
518 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
519 * @xfer_mode: XFER_* of interest
520 *
521 * Return matching xfer_shift for @xfer_mode.
522 *
523 * LOCKING:
524 * None.
525 *
526 * RETURNS:
527 * Matching xfer_shift, -1 if no match found.
528 */
529static int ata_xfer_mode2shift(unsigned int xfer_mode)
530{
531 const struct ata_xfer_ent *ent;
532
533 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
534 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
535 return ent->shift;
536 return -1;
537}
538
1da177e4 539/**
1da7b0d0
TH
540 * ata_mode_string - convert xfer_mask to string
541 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
542 *
543 * Determine string which represents the highest speed
1da7b0d0 544 * (highest bit in @modemask).
1da177e4
LT
545 *
546 * LOCKING:
547 * None.
548 *
549 * RETURNS:
550 * Constant C string representing highest speed listed in
1da7b0d0 551 * @mode_mask, or the constant C string "<n/a>".
1da177e4 552 */
1da7b0d0 553static const char *ata_mode_string(unsigned int xfer_mask)
1da177e4 554{
75f554bc
TH
555 static const char * const xfer_mode_str[] = {
556 "PIO0",
557 "PIO1",
558 "PIO2",
559 "PIO3",
560 "PIO4",
b352e57d
AC
561 "PIO5",
562 "PIO6",
75f554bc
TH
563 "MWDMA0",
564 "MWDMA1",
565 "MWDMA2",
b352e57d
AC
566 "MWDMA3",
567 "MWDMA4",
75f554bc
TH
568 "UDMA/16",
569 "UDMA/25",
570 "UDMA/33",
571 "UDMA/44",
572 "UDMA/66",
573 "UDMA/100",
574 "UDMA/133",
575 "UDMA7",
576 };
1da7b0d0 577 int highbit;
1da177e4 578
1da7b0d0
TH
579 highbit = fls(xfer_mask) - 1;
580 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
581 return xfer_mode_str[highbit];
1da177e4 582 return "<n/a>";
1da177e4
LT
583}
584
4c360c81
TH
585static const char *sata_spd_string(unsigned int spd)
586{
587 static const char * const spd_str[] = {
588 "1.5 Gbps",
589 "3.0 Gbps",
590 };
591
592 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
593 return "<unknown>";
594 return spd_str[spd - 1];
595}
596
3373efd8 597void ata_dev_disable(struct ata_device *dev)
0b8efb0a 598{
0dd4b21f 599 if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
f15a1daf 600 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
4ae72a1e
TH
601 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
602 ATA_DNXFER_QUIET);
0b8efb0a
TH
603 dev->class++;
604 }
605}
606
1da177e4 607/**
0d5ff566 608 * ata_devchk - PATA device presence detection
1da177e4
LT
609 * @ap: ATA channel to examine
610 * @device: Device to examine (starting at zero)
611 *
612 * This technique was originally described in
613 * Hale Landis's ATADRVR (www.ata-atapi.com), and
614 * later found its way into the ATA/ATAPI spec.
615 *
616 * Write a pattern to the ATA shadow registers,
617 * and if a device is present, it will respond by
618 * correctly storing and echoing back the
619 * ATA shadow register contents.
620 *
621 * LOCKING:
622 * caller.
623 */
624
0d5ff566 625static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1da177e4
LT
626{
627 struct ata_ioports *ioaddr = &ap->ioaddr;
628 u8 nsect, lbal;
629
630 ap->ops->dev_select(ap, device);
631
0d5ff566
TH
632 iowrite8(0x55, ioaddr->nsect_addr);
633 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 634
0d5ff566
TH
635 iowrite8(0xaa, ioaddr->nsect_addr);
636 iowrite8(0x55, ioaddr->lbal_addr);
1da177e4 637
0d5ff566
TH
638 iowrite8(0x55, ioaddr->nsect_addr);
639 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 640
0d5ff566
TH
641 nsect = ioread8(ioaddr->nsect_addr);
642 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
643
644 if ((nsect == 0x55) && (lbal == 0xaa))
645 return 1; /* we found a device */
646
647 return 0; /* nothing found */
648}
649
1da177e4
LT
650/**
651 * ata_dev_classify - determine device type based on ATA-spec signature
652 * @tf: ATA taskfile register set for device to be identified
653 *
654 * Determine from taskfile register contents whether a device is
655 * ATA or ATAPI, as per "Signature and persistence" section
656 * of ATA/PI spec (volume 1, sect 5.14).
657 *
658 * LOCKING:
659 * None.
660 *
661 * RETURNS:
662 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
663 * the event of failure.
664 */
665
057ace5e 666unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
667{
668 /* Apple's open source Darwin code hints that some devices only
669 * put a proper signature into the LBA mid/high registers,
670 * So, we only check those. It's sufficient for uniqueness.
671 */
672
673 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
674 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
675 DPRINTK("found ATA device by sig\n");
676 return ATA_DEV_ATA;
677 }
678
679 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
680 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
681 DPRINTK("found ATAPI device by sig\n");
682 return ATA_DEV_ATAPI;
683 }
684
685 DPRINTK("unknown device\n");
686 return ATA_DEV_UNKNOWN;
687}
688
689/**
690 * ata_dev_try_classify - Parse returned ATA device signature
691 * @ap: ATA channel to examine
692 * @device: Device to examine (starting at zero)
b4dc7623 693 * @r_err: Value of error register on completion
1da177e4
LT
694 *
695 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
696 * an ATA/ATAPI-defined set of values is placed in the ATA
697 * shadow registers, indicating the results of device detection
698 * and diagnostics.
699 *
700 * Select the ATA device, and read the values from the ATA shadow
701 * registers. Then parse according to the Error register value,
702 * and the spec-defined values examined by ata_dev_classify().
703 *
704 * LOCKING:
705 * caller.
b4dc7623
TH
706 *
707 * RETURNS:
708 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4
LT
709 */
710
a619f981 711unsigned int
b4dc7623 712ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
1da177e4 713{
1da177e4
LT
714 struct ata_taskfile tf;
715 unsigned int class;
716 u8 err;
717
718 ap->ops->dev_select(ap, device);
719
720 memset(&tf, 0, sizeof(tf));
721
1da177e4 722 ap->ops->tf_read(ap, &tf);
0169e284 723 err = tf.feature;
b4dc7623
TH
724 if (r_err)
725 *r_err = err;
1da177e4 726
93590859
AC
727 /* see if device passed diags: if master then continue and warn later */
728 if (err == 0 && device == 0)
729 /* diagnostic fail : do nothing _YET_ */
730 ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
731 else if (err == 1)
1da177e4
LT
732 /* do nothing */ ;
733 else if ((device == 0) && (err == 0x81))
734 /* do nothing */ ;
735 else
b4dc7623 736 return ATA_DEV_NONE;
1da177e4 737
b4dc7623 738 /* determine if device is ATA or ATAPI */
1da177e4 739 class = ata_dev_classify(&tf);
b4dc7623 740
1da177e4 741 if (class == ATA_DEV_UNKNOWN)
b4dc7623 742 return ATA_DEV_NONE;
1da177e4 743 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
b4dc7623
TH
744 return ATA_DEV_NONE;
745 return class;
1da177e4
LT
746}
747
748/**
6a62a04d 749 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
750 * @id: IDENTIFY DEVICE results we will examine
751 * @s: string into which data is output
752 * @ofs: offset into identify device page
753 * @len: length of string to return. must be an even number.
754 *
755 * The strings in the IDENTIFY DEVICE page are broken up into
756 * 16-bit chunks. Run through the string, and output each
757 * 8-bit chunk linearly, regardless of platform.
758 *
759 * LOCKING:
760 * caller.
761 */
762
6a62a04d
TH
763void ata_id_string(const u16 *id, unsigned char *s,
764 unsigned int ofs, unsigned int len)
1da177e4
LT
765{
766 unsigned int c;
767
768 while (len > 0) {
769 c = id[ofs] >> 8;
770 *s = c;
771 s++;
772
773 c = id[ofs] & 0xff;
774 *s = c;
775 s++;
776
777 ofs++;
778 len -= 2;
779 }
780}
781
0e949ff3 782/**
6a62a04d 783 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
784 * @id: IDENTIFY DEVICE results we will examine
785 * @s: string into which data is output
786 * @ofs: offset into identify device page
787 * @len: length of string to return. must be an odd number.
788 *
6a62a04d 789 * This function is identical to ata_id_string except that it
0e949ff3
TH
790 * trims trailing spaces and terminates the resulting string with
791 * null. @len must be actual maximum length (even number) + 1.
792 *
793 * LOCKING:
794 * caller.
795 */
6a62a04d
TH
796void ata_id_c_string(const u16 *id, unsigned char *s,
797 unsigned int ofs, unsigned int len)
0e949ff3
TH
798{
799 unsigned char *p;
800
801 WARN_ON(!(len & 1));
802
6a62a04d 803 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
804
805 p = s + strnlen(s, len - 1);
806 while (p > s && p[-1] == ' ')
807 p--;
808 *p = '\0';
809}
0baab86b 810
2940740b
TH
811static u64 ata_id_n_sectors(const u16 *id)
812{
813 if (ata_id_has_lba(id)) {
814 if (ata_id_has_lba48(id))
815 return ata_id_u64(id, 100);
816 else
817 return ata_id_u32(id, 60);
818 } else {
819 if (ata_id_current_chs_valid(id))
820 return ata_id_u32(id, 57);
821 else
822 return id[1] * id[3] * id[6];
823 }
824}
825
10305f0f
AC
826/**
827 * ata_id_to_dma_mode - Identify DMA mode from id block
828 * @dev: device to identify
829 * @mode: mode to assume if we cannot tell
830 *
831 * Set up the timing values for the device based upon the identify
832 * reported values for the DMA mode. This function is used by drivers
833 * which rely upon firmware configured modes, but wish to report the
834 * mode correctly when possible.
835 *
836 * In addition we emit similarly formatted messages to the default
837 * ata_dev_set_mode handler, in order to provide consistency of
838 * presentation.
839 */
840
841void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
842{
843 unsigned int mask;
844 u8 mode;
845
846 /* Pack the DMA modes */
847 mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
848 if (dev->id[53] & 0x04)
849 mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
850
851 /* Select the mode in use */
852 mode = ata_xfer_mask2mode(mask);
853
854 if (mode != 0) {
855 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
856 ata_mode_string(mask));
857 } else {
858 /* SWDMA perhaps ? */
859 mode = unknown;
860 ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
861 }
862
863 /* Configure the device reporting */
864 dev->xfer_mode = mode;
865 dev->xfer_shift = ata_xfer_mode2shift(mode);
866}
867
0baab86b
EF
868/**
869 * ata_noop_dev_select - Select device 0/1 on ATA bus
870 * @ap: ATA channel to manipulate
871 * @device: ATA device (numbered from zero) to select
872 *
873 * This function performs no actual function.
874 *
875 * May be used as the dev_select() entry in ata_port_operations.
876 *
877 * LOCKING:
878 * caller.
879 */
1da177e4
LT
880void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
881{
882}
883
0baab86b 884
1da177e4
LT
885/**
886 * ata_std_dev_select - Select device 0/1 on ATA bus
887 * @ap: ATA channel to manipulate
888 * @device: ATA device (numbered from zero) to select
889 *
890 * Use the method defined in the ATA specification to
891 * make either device 0, or device 1, active on the
0baab86b
EF
892 * ATA channel. Works with both PIO and MMIO.
893 *
894 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
895 *
896 * LOCKING:
897 * caller.
898 */
899
900void ata_std_dev_select (struct ata_port *ap, unsigned int device)
901{
902 u8 tmp;
903
904 if (device == 0)
905 tmp = ATA_DEVICE_OBS;
906 else
907 tmp = ATA_DEVICE_OBS | ATA_DEV1;
908
0d5ff566 909 iowrite8(tmp, ap->ioaddr.device_addr);
1da177e4
LT
910 ata_pause(ap); /* needed; also flushes, for mmio */
911}
912
913/**
914 * ata_dev_select - Select device 0/1 on ATA bus
915 * @ap: ATA channel to manipulate
916 * @device: ATA device (numbered from zero) to select
917 * @wait: non-zero to wait for Status register BSY bit to clear
918 * @can_sleep: non-zero if context allows sleeping
919 *
920 * Use the method defined in the ATA specification to
921 * make either device 0, or device 1, active on the
922 * ATA channel.
923 *
924 * This is a high-level version of ata_std_dev_select(),
925 * which additionally provides the services of inserting
926 * the proper pauses and status polling, where needed.
927 *
928 * LOCKING:
929 * caller.
930 */
931
932void ata_dev_select(struct ata_port *ap, unsigned int device,
933 unsigned int wait, unsigned int can_sleep)
934{
88574551 935 if (ata_msg_probe(ap))
44877b4e
TH
936 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
937 "device %u, wait %u\n", device, wait);
1da177e4
LT
938
939 if (wait)
940 ata_wait_idle(ap);
941
942 ap->ops->dev_select(ap, device);
943
944 if (wait) {
945 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
946 msleep(150);
947 ata_wait_idle(ap);
948 }
949}
950
951/**
952 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 953 * @id: IDENTIFY DEVICE page to dump
1da177e4 954 *
0bd3300a
TH
955 * Dump selected 16-bit words from the given IDENTIFY DEVICE
956 * page.
1da177e4
LT
957 *
958 * LOCKING:
959 * caller.
960 */
961
0bd3300a 962static inline void ata_dump_id(const u16 *id)
1da177e4
LT
963{
964 DPRINTK("49==0x%04x "
965 "53==0x%04x "
966 "63==0x%04x "
967 "64==0x%04x "
968 "75==0x%04x \n",
0bd3300a
TH
969 id[49],
970 id[53],
971 id[63],
972 id[64],
973 id[75]);
1da177e4
LT
974 DPRINTK("80==0x%04x "
975 "81==0x%04x "
976 "82==0x%04x "
977 "83==0x%04x "
978 "84==0x%04x \n",
0bd3300a
TH
979 id[80],
980 id[81],
981 id[82],
982 id[83],
983 id[84]);
1da177e4
LT
984 DPRINTK("88==0x%04x "
985 "93==0x%04x\n",
0bd3300a
TH
986 id[88],
987 id[93]);
1da177e4
LT
988}
989
cb95d562
TH
990/**
991 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
992 * @id: IDENTIFY data to compute xfer mask from
993 *
994 * Compute the xfermask for this device. This is not as trivial
995 * as it seems if we must consider early devices correctly.
996 *
997 * FIXME: pre IDE drive timing (do we care ?).
998 *
999 * LOCKING:
1000 * None.
1001 *
1002 * RETURNS:
1003 * Computed xfermask
1004 */
1005static unsigned int ata_id_xfermask(const u16 *id)
1006{
1007 unsigned int pio_mask, mwdma_mask, udma_mask;
1008
1009 /* Usual case. Word 53 indicates word 64 is valid */
1010 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1011 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1012 pio_mask <<= 3;
1013 pio_mask |= 0x7;
1014 } else {
1015 /* If word 64 isn't valid then Word 51 high byte holds
1016 * the PIO timing number for the maximum. Turn it into
1017 * a mask.
1018 */
7a0f1c8a 1019 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb
AC
1020 if (mode < 5) /* Valid PIO range */
1021 pio_mask = (2 << mode) - 1;
1022 else
1023 pio_mask = 1;
cb95d562
TH
1024
1025 /* But wait.. there's more. Design your standards by
1026 * committee and you too can get a free iordy field to
1027 * process. However its the speeds not the modes that
1028 * are supported... Note drivers using the timing API
1029 * will get this right anyway
1030 */
1031 }
1032
1033 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1034
b352e57d
AC
1035 if (ata_id_is_cfa(id)) {
1036 /*
1037 * Process compact flash extended modes
1038 */
1039 int pio = id[163] & 0x7;
1040 int dma = (id[163] >> 3) & 7;
1041
1042 if (pio)
1043 pio_mask |= (1 << 5);
1044 if (pio > 1)
1045 pio_mask |= (1 << 6);
1046 if (dma)
1047 mwdma_mask |= (1 << 3);
1048 if (dma > 1)
1049 mwdma_mask |= (1 << 4);
1050 }
1051
fb21f0d0
TH
1052 udma_mask = 0;
1053 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1054 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1055
1056 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1057}
1058
86e45b6b
TH
1059/**
1060 * ata_port_queue_task - Queue port_task
1061 * @ap: The ata_port to queue port_task for
e2a7f77a 1062 * @fn: workqueue function to be scheduled
65f27f38 1063 * @data: data for @fn to use
e2a7f77a 1064 * @delay: delay time for workqueue function
86e45b6b
TH
1065 *
1066 * Schedule @fn(@data) for execution after @delay jiffies using
1067 * port_task. There is one port_task per port and it's the
1068 * user(low level driver)'s responsibility to make sure that only
1069 * one task is active at any given time.
1070 *
1071 * libata core layer takes care of synchronization between
1072 * port_task and EH. ata_port_queue_task() may be ignored for EH
1073 * synchronization.
1074 *
1075 * LOCKING:
1076 * Inherited from caller.
1077 */
65f27f38 1078void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
86e45b6b
TH
1079 unsigned long delay)
1080{
1081 int rc;
1082
b51e9e5d 1083 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
86e45b6b
TH
1084 return;
1085
65f27f38
DH
1086 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1087 ap->port_task_data = data;
86e45b6b 1088
52bad64d 1089 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1090
1091 /* rc == 0 means that another user is using port task */
1092 WARN_ON(rc == 0);
1093}
1094
1095/**
1096 * ata_port_flush_task - Flush port_task
1097 * @ap: The ata_port to flush port_task for
1098 *
1099 * After this function completes, port_task is guranteed not to
1100 * be running or scheduled.
1101 *
1102 * LOCKING:
1103 * Kernel thread context (may sleep)
1104 */
1105void ata_port_flush_task(struct ata_port *ap)
1106{
1107 unsigned long flags;
1108
1109 DPRINTK("ENTER\n");
1110
ba6a1308 1111 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 1112 ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK;
ba6a1308 1113 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b
TH
1114
1115 DPRINTK("flush #1\n");
1116 flush_workqueue(ata_wq);
1117
1118 /*
1119 * At this point, if a task is running, it's guaranteed to see
1120 * the FLUSH flag; thus, it will never queue pio tasks again.
1121 * Cancel and flush.
1122 */
1123 if (!cancel_delayed_work(&ap->port_task)) {
0dd4b21f 1124 if (ata_msg_ctl(ap))
88574551
TH
1125 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
1126 __FUNCTION__);
86e45b6b
TH
1127 flush_workqueue(ata_wq);
1128 }
1129
ba6a1308 1130 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 1131 ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK;
ba6a1308 1132 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b 1133
0dd4b21f
BP
1134 if (ata_msg_ctl(ap))
1135 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
86e45b6b
TH
1136}
1137
7102d230 1138static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1139{
77853bf2 1140 struct completion *waiting = qc->private_data;
a2a7a662 1141
a2a7a662 1142 complete(waiting);
a2a7a662
TH
1143}
1144
1145/**
2432697b 1146 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1147 * @dev: Device to which the command is sent
1148 * @tf: Taskfile registers for the command and the result
d69cf37d 1149 * @cdb: CDB for packet command
a2a7a662 1150 * @dma_dir: Data tranfer direction of the command
2432697b
TH
1151 * @sg: sg list for the data buffer of the command
1152 * @n_elem: Number of sg entries
a2a7a662
TH
1153 *
1154 * Executes libata internal command with timeout. @tf contains
1155 * command on entry and result on return. Timeout and error
1156 * conditions are reported via return value. No recovery action
1157 * is taken after a command times out. It's caller's duty to
1158 * clean up after timeout.
1159 *
1160 * LOCKING:
1161 * None. Should be called with kernel context, might sleep.
551e8889
TH
1162 *
1163 * RETURNS:
1164 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1165 */
2432697b
TH
1166unsigned ata_exec_internal_sg(struct ata_device *dev,
1167 struct ata_taskfile *tf, const u8 *cdb,
1168 int dma_dir, struct scatterlist *sg,
1169 unsigned int n_elem)
a2a7a662 1170{
3373efd8 1171 struct ata_port *ap = dev->ap;
a2a7a662
TH
1172 u8 command = tf->command;
1173 struct ata_queued_cmd *qc;
2ab7db1f 1174 unsigned int tag, preempted_tag;
dedaf2b0 1175 u32 preempted_sactive, preempted_qc_active;
60be6b9a 1176 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1177 unsigned long flags;
77853bf2 1178 unsigned int err_mask;
d95a717f 1179 int rc;
a2a7a662 1180
ba6a1308 1181 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1182
e3180499 1183 /* no internal command while frozen */
b51e9e5d 1184 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1185 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1186 return AC_ERR_SYSTEM;
1187 }
1188
2ab7db1f 1189 /* initialize internal qc */
a2a7a662 1190
2ab7db1f
TH
1191 /* XXX: Tag 0 is used for drivers with legacy EH as some
1192 * drivers choke if any other tag is given. This breaks
1193 * ata_tag_internal() test for those drivers. Don't use new
1194 * EH stuff without converting to it.
1195 */
1196 if (ap->ops->error_handler)
1197 tag = ATA_TAG_INTERNAL;
1198 else
1199 tag = 0;
1200
6cec4a39 1201 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1202 BUG();
f69499f4 1203 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1204
1205 qc->tag = tag;
1206 qc->scsicmd = NULL;
1207 qc->ap = ap;
1208 qc->dev = dev;
1209 ata_qc_reinit(qc);
1210
1211 preempted_tag = ap->active_tag;
dedaf2b0
TH
1212 preempted_sactive = ap->sactive;
1213 preempted_qc_active = ap->qc_active;
2ab7db1f 1214 ap->active_tag = ATA_TAG_POISON;
dedaf2b0
TH
1215 ap->sactive = 0;
1216 ap->qc_active = 0;
2ab7db1f
TH
1217
1218 /* prepare & issue qc */
a2a7a662 1219 qc->tf = *tf;
d69cf37d
TH
1220 if (cdb)
1221 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1222 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1223 qc->dma_dir = dma_dir;
1224 if (dma_dir != DMA_NONE) {
2432697b
TH
1225 unsigned int i, buflen = 0;
1226
1227 for (i = 0; i < n_elem; i++)
1228 buflen += sg[i].length;
1229
1230 ata_sg_init(qc, sg, n_elem);
49c80429 1231 qc->nbytes = buflen;
a2a7a662
TH
1232 }
1233
77853bf2 1234 qc->private_data = &wait;
a2a7a662
TH
1235 qc->complete_fn = ata_qc_complete_internal;
1236
8e0e694a 1237 ata_qc_issue(qc);
a2a7a662 1238
ba6a1308 1239 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1240
a8601e5f 1241 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
d95a717f
TH
1242
1243 ata_port_flush_task(ap);
41ade50c 1244
d95a717f 1245 if (!rc) {
ba6a1308 1246 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1247
1248 /* We're racing with irq here. If we lose, the
1249 * following test prevents us from completing the qc
d95a717f
TH
1250 * twice. If we win, the port is frozen and will be
1251 * cleaned up by ->post_internal_cmd().
a2a7a662 1252 */
77853bf2 1253 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1254 qc->err_mask |= AC_ERR_TIMEOUT;
1255
1256 if (ap->ops->error_handler)
1257 ata_port_freeze(ap);
1258 else
1259 ata_qc_complete(qc);
f15a1daf 1260
0dd4b21f
BP
1261 if (ata_msg_warn(ap))
1262 ata_dev_printk(dev, KERN_WARNING,
88574551 1263 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1264 }
1265
ba6a1308 1266 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1267 }
1268
d95a717f
TH
1269 /* do post_internal_cmd */
1270 if (ap->ops->post_internal_cmd)
1271 ap->ops->post_internal_cmd(qc);
1272
18d90deb 1273 if ((qc->flags & ATA_QCFLAG_FAILED) && !qc->err_mask) {
0dd4b21f 1274 if (ata_msg_warn(ap))
88574551 1275 ata_dev_printk(dev, KERN_WARNING,
0dd4b21f 1276 "zero err_mask for failed "
88574551 1277 "internal command, assuming AC_ERR_OTHER\n");
d95a717f
TH
1278 qc->err_mask |= AC_ERR_OTHER;
1279 }
1280
15869303 1281 /* finish up */
ba6a1308 1282 spin_lock_irqsave(ap->lock, flags);
15869303 1283
e61e0672 1284 *tf = qc->result_tf;
77853bf2
TH
1285 err_mask = qc->err_mask;
1286
1287 ata_qc_free(qc);
2ab7db1f 1288 ap->active_tag = preempted_tag;
dedaf2b0
TH
1289 ap->sactive = preempted_sactive;
1290 ap->qc_active = preempted_qc_active;
77853bf2 1291
1f7dd3e9
TH
1292 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1293 * Until those drivers are fixed, we detect the condition
1294 * here, fail the command with AC_ERR_SYSTEM and reenable the
1295 * port.
1296 *
1297 * Note that this doesn't change any behavior as internal
1298 * command failure results in disabling the device in the
1299 * higher layer for LLDDs without new reset/EH callbacks.
1300 *
1301 * Kill the following code as soon as those drivers are fixed.
1302 */
198e0fed 1303 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1304 err_mask |= AC_ERR_SYSTEM;
1305 ata_port_probe(ap);
1306 }
1307
ba6a1308 1308 spin_unlock_irqrestore(ap->lock, flags);
15869303 1309
77853bf2 1310 return err_mask;
a2a7a662
TH
1311}
1312
2432697b 1313/**
33480a0e 1314 * ata_exec_internal - execute libata internal command
2432697b
TH
1315 * @dev: Device to which the command is sent
1316 * @tf: Taskfile registers for the command and the result
1317 * @cdb: CDB for packet command
1318 * @dma_dir: Data tranfer direction of the command
1319 * @buf: Data buffer of the command
1320 * @buflen: Length of data buffer
1321 *
1322 * Wrapper around ata_exec_internal_sg() which takes simple
1323 * buffer instead of sg list.
1324 *
1325 * LOCKING:
1326 * None. Should be called with kernel context, might sleep.
1327 *
1328 * RETURNS:
1329 * Zero on success, AC_ERR_* mask on failure
1330 */
1331unsigned ata_exec_internal(struct ata_device *dev,
1332 struct ata_taskfile *tf, const u8 *cdb,
1333 int dma_dir, void *buf, unsigned int buflen)
1334{
33480a0e
TH
1335 struct scatterlist *psg = NULL, sg;
1336 unsigned int n_elem = 0;
2432697b 1337
33480a0e
TH
1338 if (dma_dir != DMA_NONE) {
1339 WARN_ON(!buf);
1340 sg_init_one(&sg, buf, buflen);
1341 psg = &sg;
1342 n_elem++;
1343 }
2432697b 1344
33480a0e 1345 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
2432697b
TH
1346}
1347
977e6b9f
TH
1348/**
1349 * ata_do_simple_cmd - execute simple internal command
1350 * @dev: Device to which the command is sent
1351 * @cmd: Opcode to execute
1352 *
1353 * Execute a 'simple' command, that only consists of the opcode
1354 * 'cmd' itself, without filling any other registers
1355 *
1356 * LOCKING:
1357 * Kernel thread context (may sleep).
1358 *
1359 * RETURNS:
1360 * Zero on success, AC_ERR_* mask on failure
e58eb583 1361 */
77b08fb5 1362unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1363{
1364 struct ata_taskfile tf;
e58eb583
TH
1365
1366 ata_tf_init(dev, &tf);
1367
1368 tf.command = cmd;
1369 tf.flags |= ATA_TFLAG_DEVICE;
1370 tf.protocol = ATA_PROT_NODATA;
1371
977e6b9f 1372 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
e58eb583
TH
1373}
1374
1bc4ccff
AC
1375/**
1376 * ata_pio_need_iordy - check if iordy needed
1377 * @adev: ATA device
1378 *
1379 * Check if the current speed of the device requires IORDY. Used
1380 * by various controllers for chip configuration.
1381 */
1382
1383unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1384{
1385 int pio;
1386 int speed = adev->pio_mode - XFER_PIO_0;
1387
1388 if (speed < 2)
1389 return 0;
1390 if (speed > 2)
1391 return 1;
2e9edbf8 1392
1bc4ccff
AC
1393 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1394
1395 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
1396 pio = adev->id[ATA_ID_EIDE_PIO];
1397 /* Is the speed faster than the drive allows non IORDY ? */
1398 if (pio) {
1399 /* This is cycle times not frequency - watch the logic! */
1400 if (pio > 240) /* PIO2 is 240nS per cycle */
1401 return 1;
1402 return 0;
1403 }
1404 }
1405 return 0;
1406}
1407
1da177e4 1408/**
49016aca 1409 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1410 * @dev: target device
1411 * @p_class: pointer to class of the target device (may be changed)
bff04647 1412 * @flags: ATA_READID_* flags
fe635c7e 1413 * @id: buffer to read IDENTIFY data into
1da177e4 1414 *
49016aca
TH
1415 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1416 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1417 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1418 * for pre-ATA4 drives.
1da177e4
LT
1419 *
1420 * LOCKING:
49016aca
TH
1421 * Kernel thread context (may sleep)
1422 *
1423 * RETURNS:
1424 * 0 on success, -errno otherwise.
1da177e4 1425 */
a9beec95 1426int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1427 unsigned int flags, u16 *id)
1da177e4 1428{
3373efd8 1429 struct ata_port *ap = dev->ap;
49016aca 1430 unsigned int class = *p_class;
a0123703 1431 struct ata_taskfile tf;
49016aca
TH
1432 unsigned int err_mask = 0;
1433 const char *reason;
1434 int rc;
1da177e4 1435
0dd4b21f 1436 if (ata_msg_ctl(ap))
44877b4e 1437 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1438
49016aca 1439 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
1da177e4 1440
49016aca 1441 retry:
3373efd8 1442 ata_tf_init(dev, &tf);
a0123703 1443
49016aca
TH
1444 switch (class) {
1445 case ATA_DEV_ATA:
a0123703 1446 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1447 break;
1448 case ATA_DEV_ATAPI:
a0123703 1449 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1450 break;
1451 default:
1452 rc = -ENODEV;
1453 reason = "unsupported class";
1454 goto err_out;
1da177e4
LT
1455 }
1456
a0123703 1457 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1458
1459 /* Some devices choke if TF registers contain garbage. Make
1460 * sure those are properly initialized.
1461 */
1462 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1463
1464 /* Device presence detection is unreliable on some
1465 * controllers. Always poll IDENTIFY if available.
1466 */
1467 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 1468
3373efd8 1469 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
49016aca 1470 id, sizeof(id[0]) * ATA_ID_WORDS);
a0123703 1471 if (err_mask) {
800b3996 1472 if (err_mask & AC_ERR_NODEV_HINT) {
55a8e2c8 1473 DPRINTK("ata%u.%d: NODEV after polling detection\n",
44877b4e 1474 ap->print_id, dev->devno);
55a8e2c8
TH
1475 return -ENOENT;
1476 }
1477
49016aca
TH
1478 rc = -EIO;
1479 reason = "I/O error";
1da177e4
LT
1480 goto err_out;
1481 }
1482
49016aca 1483 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1484
49016aca 1485 /* sanity check */
a4f5749b
TH
1486 rc = -EINVAL;
1487 reason = "device reports illegal type";
1488
1489 if (class == ATA_DEV_ATA) {
1490 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1491 goto err_out;
1492 } else {
1493 if (ata_id_is_ata(id))
1494 goto err_out;
49016aca
TH
1495 }
1496
bff04647 1497 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
1498 /*
1499 * The exact sequence expected by certain pre-ATA4 drives is:
1500 * SRST RESET
1501 * IDENTIFY
1502 * INITIALIZE DEVICE PARAMETERS
1503 * anything else..
1504 * Some drives were very specific about that exact sequence.
1505 */
1506 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 1507 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
1508 if (err_mask) {
1509 rc = -EIO;
1510 reason = "INIT_DEV_PARAMS failed";
1511 goto err_out;
1512 }
1513
1514 /* current CHS translation info (id[53-58]) might be
1515 * changed. reread the identify device info.
1516 */
bff04647 1517 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
1518 goto retry;
1519 }
1520 }
1521
1522 *p_class = class;
fe635c7e 1523
49016aca
TH
1524 return 0;
1525
1526 err_out:
88574551 1527 if (ata_msg_warn(ap))
0dd4b21f 1528 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 1529 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
1530 return rc;
1531}
1532
3373efd8 1533static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 1534{
3373efd8 1535 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
1536}
1537
a6e6ce8e
TH
1538static void ata_dev_config_ncq(struct ata_device *dev,
1539 char *desc, size_t desc_sz)
1540{
1541 struct ata_port *ap = dev->ap;
1542 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1543
1544 if (!ata_id_has_ncq(dev->id)) {
1545 desc[0] = '\0';
1546 return;
1547 }
6919a0a6
AC
1548 if (ata_device_blacklisted(dev) & ATA_HORKAGE_NONCQ) {
1549 snprintf(desc, desc_sz, "NCQ (not used)");
1550 return;
1551 }
a6e6ce8e 1552 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 1553 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
1554 dev->flags |= ATA_DFLAG_NCQ;
1555 }
1556
1557 if (hdepth >= ddepth)
1558 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1559 else
1560 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1561}
1562
e6d902a3
BK
1563static void ata_set_port_max_cmd_len(struct ata_port *ap)
1564{
1565 int i;
1566
cca3974e
JG
1567 if (ap->scsi_host) {
1568 unsigned int len = 0;
1569
e6d902a3 1570 for (i = 0; i < ATA_MAX_DEVICES; i++)
cca3974e
JG
1571 len = max(len, ap->device[i].cdb_len);
1572
1573 ap->scsi_host->max_cmd_len = len;
e6d902a3
BK
1574 }
1575}
1576
49016aca 1577/**
ffeae418 1578 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
1579 * @dev: Target device to configure
1580 *
1581 * Configure @dev according to @dev->id. Generic and low-level
1582 * driver specific fixups are also applied.
49016aca
TH
1583 *
1584 * LOCKING:
ffeae418
TH
1585 * Kernel thread context (may sleep)
1586 *
1587 * RETURNS:
1588 * 0 on success, -errno otherwise
49016aca 1589 */
efdaedc4 1590int ata_dev_configure(struct ata_device *dev)
49016aca 1591{
3373efd8 1592 struct ata_port *ap = dev->ap;
efdaedc4 1593 int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO;
1148c3a7 1594 const u16 *id = dev->id;
ff8854b2 1595 unsigned int xfer_mask;
b352e57d 1596 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
1597 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
1598 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 1599 int rc;
49016aca 1600
0dd4b21f 1601 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
44877b4e
TH
1602 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
1603 __FUNCTION__);
ffeae418 1604 return 0;
49016aca
TH
1605 }
1606
0dd4b21f 1607 if (ata_msg_probe(ap))
44877b4e 1608 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1609
08573a86
KCA
1610 /* set _SDD */
1611 rc = ata_acpi_push_id(ap, dev->devno);
1612 if (rc) {
1613 ata_dev_printk(dev, KERN_WARNING, "failed to set _SDD(%d)\n",
1614 rc);
1615 }
1616
1617 /* retrieve and execute the ATA task file of _GTF */
1618 ata_acpi_exec_tfs(ap);
1619
c39f5ebe 1620 /* print device capabilities */
0dd4b21f 1621 if (ata_msg_probe(ap))
88574551
TH
1622 ata_dev_printk(dev, KERN_DEBUG,
1623 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1624 "85:%04x 86:%04x 87:%04x 88:%04x\n",
0dd4b21f 1625 __FUNCTION__,
f15a1daf
TH
1626 id[49], id[82], id[83], id[84],
1627 id[85], id[86], id[87], id[88]);
c39f5ebe 1628
208a9933 1629 /* initialize to-be-configured parameters */
ea1dd4e1 1630 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
1631 dev->max_sectors = 0;
1632 dev->cdb_len = 0;
1633 dev->n_sectors = 0;
1634 dev->cylinders = 0;
1635 dev->heads = 0;
1636 dev->sectors = 0;
1637
1da177e4
LT
1638 /*
1639 * common ATA, ATAPI feature tests
1640 */
1641
ff8854b2 1642 /* find max transfer mode; for printk only */
1148c3a7 1643 xfer_mask = ata_id_xfermask(id);
1da177e4 1644
0dd4b21f
BP
1645 if (ata_msg_probe(ap))
1646 ata_dump_id(id);
1da177e4
LT
1647
1648 /* ATA-specific feature tests */
1649 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
1650 if (ata_id_is_cfa(id)) {
1651 if (id[162] & 1) /* CPRM may make this media unusable */
44877b4e
TH
1652 ata_dev_printk(dev, KERN_WARNING,
1653 "supports DRM functions and may "
1654 "not be fully accessable.\n");
b352e57d
AC
1655 snprintf(revbuf, 7, "CFA");
1656 }
1657 else
1658 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1659
1148c3a7 1660 dev->n_sectors = ata_id_n_sectors(id);
2940740b 1661
3f64f565 1662 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
591a6e8e 1663 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
3f64f565
EM
1664 sizeof(fwrevbuf));
1665
591a6e8e 1666 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
3f64f565
EM
1667 sizeof(modelbuf));
1668
1669 if (dev->id[59] & 0x100)
1670 dev->multi_count = dev->id[59] & 0xff;
1671
1148c3a7 1672 if (ata_id_has_lba(id)) {
4c2d721a 1673 const char *lba_desc;
a6e6ce8e 1674 char ncq_desc[20];
8bf62ece 1675
4c2d721a
TH
1676 lba_desc = "LBA";
1677 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 1678 if (ata_id_has_lba48(id)) {
8bf62ece 1679 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 1680 lba_desc = "LBA48";
6fc49adb
TH
1681
1682 if (dev->n_sectors >= (1UL << 28) &&
1683 ata_id_has_flush_ext(id))
1684 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 1685 }
8bf62ece 1686
a6e6ce8e
TH
1687 /* config NCQ */
1688 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1689
8bf62ece 1690 /* print device info to dmesg */
3f64f565
EM
1691 if (ata_msg_drv(ap) && print_info) {
1692 ata_dev_printk(dev, KERN_INFO,
1693 "%s: %s, %s, max %s\n",
1694 revbuf, modelbuf, fwrevbuf,
1695 ata_mode_string(xfer_mask));
1696 ata_dev_printk(dev, KERN_INFO,
1697 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 1698 (unsigned long long)dev->n_sectors,
3f64f565
EM
1699 dev->multi_count, lba_desc, ncq_desc);
1700 }
ffeae418 1701 } else {
8bf62ece
AL
1702 /* CHS */
1703
1704 /* Default translation */
1148c3a7
TH
1705 dev->cylinders = id[1];
1706 dev->heads = id[3];
1707 dev->sectors = id[6];
8bf62ece 1708
1148c3a7 1709 if (ata_id_current_chs_valid(id)) {
8bf62ece 1710 /* Current CHS translation is valid. */
1148c3a7
TH
1711 dev->cylinders = id[54];
1712 dev->heads = id[55];
1713 dev->sectors = id[56];
8bf62ece
AL
1714 }
1715
1716 /* print device info to dmesg */
3f64f565 1717 if (ata_msg_drv(ap) && print_info) {
88574551 1718 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
1719 "%s: %s, %s, max %s\n",
1720 revbuf, modelbuf, fwrevbuf,
1721 ata_mode_string(xfer_mask));
a84471fe 1722 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
1723 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
1724 (unsigned long long)dev->n_sectors,
1725 dev->multi_count, dev->cylinders,
1726 dev->heads, dev->sectors);
1727 }
07f6f7d0
AL
1728 }
1729
6e7846e9 1730 dev->cdb_len = 16;
1da177e4
LT
1731 }
1732
1733 /* ATAPI-specific feature tests */
2c13b7ce 1734 else if (dev->class == ATA_DEV_ATAPI) {
08a556db
AL
1735 char *cdb_intr_string = "";
1736
1148c3a7 1737 rc = atapi_cdb_len(id);
1da177e4 1738 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 1739 if (ata_msg_warn(ap))
88574551
TH
1740 ata_dev_printk(dev, KERN_WARNING,
1741 "unsupported CDB len\n");
ffeae418 1742 rc = -EINVAL;
1da177e4
LT
1743 goto err_out_nosup;
1744 }
6e7846e9 1745 dev->cdb_len = (unsigned int) rc;
1da177e4 1746
08a556db 1747 if (ata_id_cdb_intr(dev->id)) {
312f7da2 1748 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
1749 cdb_intr_string = ", CDB intr";
1750 }
312f7da2 1751
1da177e4 1752 /* print device info to dmesg */
5afc8142 1753 if (ata_msg_drv(ap) && print_info)
12436c30
TH
1754 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
1755 ata_mode_string(xfer_mask),
1756 cdb_intr_string);
1da177e4
LT
1757 }
1758
914ed354
TH
1759 /* determine max_sectors */
1760 dev->max_sectors = ATA_MAX_SECTORS;
1761 if (dev->flags & ATA_DFLAG_LBA48)
1762 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
1763
93590859
AC
1764 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
1765 /* Let the user know. We don't want to disallow opens for
1766 rescue purposes, or in case the vendor is just a blithering
1767 idiot */
1768 if (print_info) {
1769 ata_dev_printk(dev, KERN_WARNING,
1770"Drive reports diagnostics failure. This may indicate a drive\n");
1771 ata_dev_printk(dev, KERN_WARNING,
1772"fault or invalid emulation. Contact drive vendor for information.\n");
1773 }
1774 }
1775
e6d902a3 1776 ata_set_port_max_cmd_len(ap);
6e7846e9 1777
4b2f3ede 1778 /* limit bridge transfers to udma5, 200 sectors */
3373efd8 1779 if (ata_dev_knobble(dev)) {
5afc8142 1780 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
1781 ata_dev_printk(dev, KERN_INFO,
1782 "applying bridge limits\n");
5a529139 1783 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
1784 dev->max_sectors = ATA_MAX_SECTORS;
1785 }
1786
1787 if (ap->ops->dev_config)
1788 ap->ops->dev_config(ap, dev);
1789
0dd4b21f
BP
1790 if (ata_msg_probe(ap))
1791 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
1792 __FUNCTION__, ata_chk_status(ap));
ffeae418 1793 return 0;
1da177e4
LT
1794
1795err_out_nosup:
0dd4b21f 1796 if (ata_msg_probe(ap))
88574551
TH
1797 ata_dev_printk(dev, KERN_DEBUG,
1798 "%s: EXIT, err\n", __FUNCTION__);
ffeae418 1799 return rc;
1da177e4
LT
1800}
1801
1802/**
1803 * ata_bus_probe - Reset and probe ATA bus
1804 * @ap: Bus to probe
1805 *
0cba632b
JG
1806 * Master ATA bus probing function. Initiates a hardware-dependent
1807 * bus reset, then attempts to identify any devices found on
1808 * the bus.
1809 *
1da177e4 1810 * LOCKING:
0cba632b 1811 * PCI/etc. bus probe sem.
1da177e4
LT
1812 *
1813 * RETURNS:
96072e69 1814 * Zero on success, negative errno otherwise.
1da177e4
LT
1815 */
1816
80289167 1817int ata_bus_probe(struct ata_port *ap)
1da177e4 1818{
28ca5c57 1819 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 1820 int tries[ATA_MAX_DEVICES];
4ae72a1e 1821 int i, rc;
e82cbdb9 1822 struct ata_device *dev;
1da177e4 1823
28ca5c57 1824 ata_port_probe(ap);
c19ba8af 1825
14d2bac1
TH
1826 for (i = 0; i < ATA_MAX_DEVICES; i++)
1827 tries[i] = ATA_PROBE_MAX_TRIES;
1828
1829 retry:
2044470c 1830 /* reset and determine device classes */
52783c5d 1831 ap->ops->phy_reset(ap);
2061a47a 1832
52783c5d
TH
1833 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1834 dev = &ap->device[i];
c19ba8af 1835
52783c5d
TH
1836 if (!(ap->flags & ATA_FLAG_DISABLED) &&
1837 dev->class != ATA_DEV_UNKNOWN)
1838 classes[dev->devno] = dev->class;
1839 else
1840 classes[dev->devno] = ATA_DEV_NONE;
2044470c 1841
52783c5d 1842 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 1843 }
1da177e4 1844
52783c5d 1845 ata_port_probe(ap);
2044470c 1846
b6079ca4
AC
1847 /* after the reset the device state is PIO 0 and the controller
1848 state is undefined. Record the mode */
1849
1850 for (i = 0; i < ATA_MAX_DEVICES; i++)
1851 ap->device[i].pio_mode = XFER_PIO_0;
1852
28ca5c57 1853 /* read IDENTIFY page and configure devices */
1da177e4 1854 for (i = 0; i < ATA_MAX_DEVICES; i++) {
e82cbdb9 1855 dev = &ap->device[i];
28ca5c57 1856
ec573755
TH
1857 if (tries[i])
1858 dev->class = classes[i];
ffeae418 1859
14d2bac1 1860 if (!ata_dev_enabled(dev))
ffeae418 1861 continue;
ffeae418 1862
bff04647
TH
1863 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
1864 dev->id);
14d2bac1
TH
1865 if (rc)
1866 goto fail;
1867
efdaedc4
TH
1868 ap->eh_context.i.flags |= ATA_EHI_PRINTINFO;
1869 rc = ata_dev_configure(dev);
1870 ap->eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
1871 if (rc)
1872 goto fail;
1da177e4
LT
1873 }
1874
e82cbdb9 1875 /* configure transfer mode */
3adcebb2 1876 rc = ata_set_mode(ap, &dev);
4ae72a1e 1877 if (rc)
51713d35 1878 goto fail;
1da177e4 1879
e82cbdb9
TH
1880 for (i = 0; i < ATA_MAX_DEVICES; i++)
1881 if (ata_dev_enabled(&ap->device[i]))
1882 return 0;
1da177e4 1883
e82cbdb9
TH
1884 /* no device present, disable port */
1885 ata_port_disable(ap);
1da177e4 1886 ap->ops->port_disable(ap);
96072e69 1887 return -ENODEV;
14d2bac1
TH
1888
1889 fail:
4ae72a1e
TH
1890 tries[dev->devno]--;
1891
14d2bac1
TH
1892 switch (rc) {
1893 case -EINVAL:
4ae72a1e 1894 /* eeek, something went very wrong, give up */
14d2bac1
TH
1895 tries[dev->devno] = 0;
1896 break;
4ae72a1e
TH
1897
1898 case -ENODEV:
1899 /* give it just one more chance */
1900 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 1901 case -EIO:
4ae72a1e
TH
1902 if (tries[dev->devno] == 1) {
1903 /* This is the last chance, better to slow
1904 * down than lose it.
1905 */
1906 sata_down_spd_limit(ap);
1907 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
1908 }
14d2bac1
TH
1909 }
1910
4ae72a1e 1911 if (!tries[dev->devno])
3373efd8 1912 ata_dev_disable(dev);
ec573755 1913
14d2bac1 1914 goto retry;
1da177e4
LT
1915}
1916
1917/**
0cba632b
JG
1918 * ata_port_probe - Mark port as enabled
1919 * @ap: Port for which we indicate enablement
1da177e4 1920 *
0cba632b
JG
1921 * Modify @ap data structure such that the system
1922 * thinks that the entire port is enabled.
1923 *
cca3974e 1924 * LOCKING: host lock, or some other form of
0cba632b 1925 * serialization.
1da177e4
LT
1926 */
1927
1928void ata_port_probe(struct ata_port *ap)
1929{
198e0fed 1930 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
1931}
1932
3be680b7
TH
1933/**
1934 * sata_print_link_status - Print SATA link status
1935 * @ap: SATA port to printk link status about
1936 *
1937 * This function prints link speed and status of a SATA link.
1938 *
1939 * LOCKING:
1940 * None.
1941 */
1942static void sata_print_link_status(struct ata_port *ap)
1943{
6d5f9732 1944 u32 sstatus, scontrol, tmp;
3be680b7 1945
81952c54 1946 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
3be680b7 1947 return;
81952c54 1948 sata_scr_read(ap, SCR_CONTROL, &scontrol);
3be680b7 1949
81952c54 1950 if (ata_port_online(ap)) {
3be680b7 1951 tmp = (sstatus >> 4) & 0xf;
f15a1daf
TH
1952 ata_port_printk(ap, KERN_INFO,
1953 "SATA link up %s (SStatus %X SControl %X)\n",
1954 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 1955 } else {
f15a1daf
TH
1956 ata_port_printk(ap, KERN_INFO,
1957 "SATA link down (SStatus %X SControl %X)\n",
1958 sstatus, scontrol);
3be680b7
TH
1959 }
1960}
1961
1da177e4 1962/**
780a87f7
JG
1963 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1964 * @ap: SATA port associated with target SATA PHY.
1da177e4 1965 *
780a87f7
JG
1966 * This function issues commands to standard SATA Sxxx
1967 * PHY registers, to wake up the phy (and device), and
1968 * clear any reset condition.
1da177e4
LT
1969 *
1970 * LOCKING:
0cba632b 1971 * PCI/etc. bus probe sem.
1da177e4
LT
1972 *
1973 */
1974void __sata_phy_reset(struct ata_port *ap)
1975{
1976 u32 sstatus;
1977 unsigned long timeout = jiffies + (HZ * 5);
1978
1979 if (ap->flags & ATA_FLAG_SATA_RESET) {
cdcca89e 1980 /* issue phy wake/reset */
81952c54 1981 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
62ba2841
TH
1982 /* Couldn't find anything in SATA I/II specs, but
1983 * AHCI-1.1 10.4.2 says at least 1 ms. */
1984 mdelay(1);
1da177e4 1985 }
81952c54
TH
1986 /* phy wake/clear reset */
1987 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1da177e4
LT
1988
1989 /* wait for phy to become ready, if necessary */
1990 do {
1991 msleep(200);
81952c54 1992 sata_scr_read(ap, SCR_STATUS, &sstatus);
1da177e4
LT
1993 if ((sstatus & 0xf) != 1)
1994 break;
1995 } while (time_before(jiffies, timeout));
1996
3be680b7
TH
1997 /* print link status */
1998 sata_print_link_status(ap);
656563e3 1999
3be680b7 2000 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 2001 if (!ata_port_offline(ap))
1da177e4 2002 ata_port_probe(ap);
3be680b7 2003 else
1da177e4 2004 ata_port_disable(ap);
1da177e4 2005
198e0fed 2006 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2007 return;
2008
2009 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2010 ata_port_disable(ap);
2011 return;
2012 }
2013
2014 ap->cbl = ATA_CBL_SATA;
2015}
2016
2017/**
780a87f7
JG
2018 * sata_phy_reset - Reset SATA bus.
2019 * @ap: SATA port associated with target SATA PHY.
1da177e4 2020 *
780a87f7
JG
2021 * This function resets the SATA bus, and then probes
2022 * the bus for devices.
1da177e4
LT
2023 *
2024 * LOCKING:
0cba632b 2025 * PCI/etc. bus probe sem.
1da177e4
LT
2026 *
2027 */
2028void sata_phy_reset(struct ata_port *ap)
2029{
2030 __sata_phy_reset(ap);
198e0fed 2031 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2032 return;
2033 ata_bus_reset(ap);
2034}
2035
ebdfca6e
AC
2036/**
2037 * ata_dev_pair - return other device on cable
ebdfca6e
AC
2038 * @adev: device
2039 *
2040 * Obtain the other device on the same cable, or if none is
2041 * present NULL is returned
2042 */
2e9edbf8 2043
3373efd8 2044struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2045{
3373efd8 2046 struct ata_port *ap = adev->ap;
ebdfca6e 2047 struct ata_device *pair = &ap->device[1 - adev->devno];
e1211e3f 2048 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2049 return NULL;
2050 return pair;
2051}
2052
1da177e4 2053/**
780a87f7
JG
2054 * ata_port_disable - Disable port.
2055 * @ap: Port to be disabled.
1da177e4 2056 *
780a87f7
JG
2057 * Modify @ap data structure such that the system
2058 * thinks that the entire port is disabled, and should
2059 * never attempt to probe or communicate with devices
2060 * on this port.
2061 *
cca3974e 2062 * LOCKING: host lock, or some other form of
780a87f7 2063 * serialization.
1da177e4
LT
2064 */
2065
2066void ata_port_disable(struct ata_port *ap)
2067{
2068 ap->device[0].class = ATA_DEV_NONE;
2069 ap->device[1].class = ATA_DEV_NONE;
198e0fed 2070 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2071}
2072
1c3fae4d 2073/**
3c567b7d 2074 * sata_down_spd_limit - adjust SATA spd limit downward
1c3fae4d
TH
2075 * @ap: Port to adjust SATA spd limit for
2076 *
2077 * Adjust SATA spd limit of @ap downward. Note that this
2078 * function only adjusts the limit. The change must be applied
3c567b7d 2079 * using sata_set_spd().
1c3fae4d
TH
2080 *
2081 * LOCKING:
2082 * Inherited from caller.
2083 *
2084 * RETURNS:
2085 * 0 on success, negative errno on failure
2086 */
3c567b7d 2087int sata_down_spd_limit(struct ata_port *ap)
1c3fae4d 2088{
81952c54
TH
2089 u32 sstatus, spd, mask;
2090 int rc, highbit;
1c3fae4d 2091
81952c54
TH
2092 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
2093 if (rc)
2094 return rc;
1c3fae4d
TH
2095
2096 mask = ap->sata_spd_limit;
2097 if (mask <= 1)
2098 return -EINVAL;
2099 highbit = fls(mask) - 1;
2100 mask &= ~(1 << highbit);
2101
81952c54 2102 spd = (sstatus >> 4) & 0xf;
1c3fae4d
TH
2103 if (spd <= 1)
2104 return -EINVAL;
2105 spd--;
2106 mask &= (1 << spd) - 1;
2107 if (!mask)
2108 return -EINVAL;
2109
2110 ap->sata_spd_limit = mask;
2111
f15a1daf
TH
2112 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
2113 sata_spd_string(fls(mask)));
1c3fae4d
TH
2114
2115 return 0;
2116}
2117
3c567b7d 2118static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
1c3fae4d
TH
2119{
2120 u32 spd, limit;
2121
2122 if (ap->sata_spd_limit == UINT_MAX)
2123 limit = 0;
2124 else
2125 limit = fls(ap->sata_spd_limit);
2126
2127 spd = (*scontrol >> 4) & 0xf;
2128 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2129
2130 return spd != limit;
2131}
2132
2133/**
3c567b7d 2134 * sata_set_spd_needed - is SATA spd configuration needed
1c3fae4d
TH
2135 * @ap: Port in question
2136 *
2137 * Test whether the spd limit in SControl matches
2138 * @ap->sata_spd_limit. This function is used to determine
2139 * whether hardreset is necessary to apply SATA spd
2140 * configuration.
2141 *
2142 * LOCKING:
2143 * Inherited from caller.
2144 *
2145 * RETURNS:
2146 * 1 if SATA spd configuration is needed, 0 otherwise.
2147 */
3c567b7d 2148int sata_set_spd_needed(struct ata_port *ap)
1c3fae4d
TH
2149{
2150 u32 scontrol;
2151
81952c54 2152 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
1c3fae4d
TH
2153 return 0;
2154
3c567b7d 2155 return __sata_set_spd_needed(ap, &scontrol);
1c3fae4d
TH
2156}
2157
2158/**
3c567b7d 2159 * sata_set_spd - set SATA spd according to spd limit
1c3fae4d
TH
2160 * @ap: Port to set SATA spd for
2161 *
2162 * Set SATA spd of @ap according to sata_spd_limit.
2163 *
2164 * LOCKING:
2165 * Inherited from caller.
2166 *
2167 * RETURNS:
2168 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2169 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2170 */
3c567b7d 2171int sata_set_spd(struct ata_port *ap)
1c3fae4d
TH
2172{
2173 u32 scontrol;
81952c54 2174 int rc;
1c3fae4d 2175
81952c54
TH
2176 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2177 return rc;
1c3fae4d 2178
3c567b7d 2179 if (!__sata_set_spd_needed(ap, &scontrol))
1c3fae4d
TH
2180 return 0;
2181
81952c54
TH
2182 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2183 return rc;
2184
1c3fae4d
TH
2185 return 1;
2186}
2187
452503f9
AC
2188/*
2189 * This mode timing computation functionality is ported over from
2190 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2191 */
2192/*
b352e57d 2193 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2194 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2195 * for UDMA6, which is currently supported only by Maxtor drives.
2196 *
2197 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2198 */
2199
2200static const struct ata_timing ata_timing[] = {
2201
2202 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2203 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2204 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2205 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2206
b352e57d
AC
2207 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2208 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
452503f9
AC
2209 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2210 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2211 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2212
2213/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2e9edbf8 2214
452503f9
AC
2215 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2216 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2217 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2e9edbf8 2218
452503f9
AC
2219 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2220 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2221 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2222
b352e57d
AC
2223 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2224 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
452503f9
AC
2225 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2226 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2227
2228 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2229 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2230 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2231
2232/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2233
2234 { 0xFF }
2235};
2236
2237#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2238#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2239
2240static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2241{
2242 q->setup = EZ(t->setup * 1000, T);
2243 q->act8b = EZ(t->act8b * 1000, T);
2244 q->rec8b = EZ(t->rec8b * 1000, T);
2245 q->cyc8b = EZ(t->cyc8b * 1000, T);
2246 q->active = EZ(t->active * 1000, T);
2247 q->recover = EZ(t->recover * 1000, T);
2248 q->cycle = EZ(t->cycle * 1000, T);
2249 q->udma = EZ(t->udma * 1000, UT);
2250}
2251
2252void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2253 struct ata_timing *m, unsigned int what)
2254{
2255 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2256 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2257 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2258 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2259 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2260 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2261 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2262 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2263}
2264
2265static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2266{
2267 const struct ata_timing *t;
2268
2269 for (t = ata_timing; t->mode != speed; t++)
91190758 2270 if (t->mode == 0xFF)
452503f9 2271 return NULL;
2e9edbf8 2272 return t;
452503f9
AC
2273}
2274
2275int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2276 struct ata_timing *t, int T, int UT)
2277{
2278 const struct ata_timing *s;
2279 struct ata_timing p;
2280
2281 /*
2e9edbf8 2282 * Find the mode.
75b1f2f8 2283 */
452503f9
AC
2284
2285 if (!(s = ata_timing_find_mode(speed)))
2286 return -EINVAL;
2287
75b1f2f8
AL
2288 memcpy(t, s, sizeof(*s));
2289
452503f9
AC
2290 /*
2291 * If the drive is an EIDE drive, it can tell us it needs extended
2292 * PIO/MW_DMA cycle timing.
2293 */
2294
2295 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2296 memset(&p, 0, sizeof(p));
2297 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2298 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2299 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2300 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2301 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2302 }
2303 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2304 }
2305
2306 /*
2307 * Convert the timing to bus clock counts.
2308 */
2309
75b1f2f8 2310 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2311
2312 /*
c893a3ae
RD
2313 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2314 * S.M.A.R.T * and some other commands. We have to ensure that the
2315 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2316 */
2317
fd3367af 2318 if (speed > XFER_PIO_6) {
452503f9
AC
2319 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2320 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2321 }
2322
2323 /*
c893a3ae 2324 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2325 */
2326
2327 if (t->act8b + t->rec8b < t->cyc8b) {
2328 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2329 t->rec8b = t->cyc8b - t->act8b;
2330 }
2331
2332 if (t->active + t->recover < t->cycle) {
2333 t->active += (t->cycle - (t->active + t->recover)) / 2;
2334 t->recover = t->cycle - t->active;
2335 }
2336
2337 return 0;
2338}
2339
cf176e1a
TH
2340/**
2341 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 2342 * @dev: Device to adjust xfer masks
458337db 2343 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
2344 *
2345 * Adjust xfer masks of @dev downward. Note that this function
2346 * does not apply the change. Invoking ata_set_mode() afterwards
2347 * will apply the limit.
2348 *
2349 * LOCKING:
2350 * Inherited from caller.
2351 *
2352 * RETURNS:
2353 * 0 on success, negative errno on failure
2354 */
458337db 2355int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 2356{
458337db
TH
2357 char buf[32];
2358 unsigned int orig_mask, xfer_mask;
2359 unsigned int pio_mask, mwdma_mask, udma_mask;
2360 int quiet, highbit;
cf176e1a 2361
458337db
TH
2362 quiet = !!(sel & ATA_DNXFER_QUIET);
2363 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 2364
458337db
TH
2365 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2366 dev->mwdma_mask,
2367 dev->udma_mask);
2368 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 2369
458337db
TH
2370 switch (sel) {
2371 case ATA_DNXFER_PIO:
2372 highbit = fls(pio_mask) - 1;
2373 pio_mask &= ~(1 << highbit);
2374 break;
2375
2376 case ATA_DNXFER_DMA:
2377 if (udma_mask) {
2378 highbit = fls(udma_mask) - 1;
2379 udma_mask &= ~(1 << highbit);
2380 if (!udma_mask)
2381 return -ENOENT;
2382 } else if (mwdma_mask) {
2383 highbit = fls(mwdma_mask) - 1;
2384 mwdma_mask &= ~(1 << highbit);
2385 if (!mwdma_mask)
2386 return -ENOENT;
2387 }
2388 break;
2389
2390 case ATA_DNXFER_40C:
2391 udma_mask &= ATA_UDMA_MASK_40C;
2392 break;
2393
2394 case ATA_DNXFER_FORCE_PIO0:
2395 pio_mask &= 1;
2396 case ATA_DNXFER_FORCE_PIO:
2397 mwdma_mask = 0;
2398 udma_mask = 0;
2399 break;
2400
458337db
TH
2401 default:
2402 BUG();
2403 }
2404
2405 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2406
2407 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2408 return -ENOENT;
2409
2410 if (!quiet) {
2411 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2412 snprintf(buf, sizeof(buf), "%s:%s",
2413 ata_mode_string(xfer_mask),
2414 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2415 else
2416 snprintf(buf, sizeof(buf), "%s",
2417 ata_mode_string(xfer_mask));
2418
2419 ata_dev_printk(dev, KERN_WARNING,
2420 "limiting speed to %s\n", buf);
2421 }
cf176e1a
TH
2422
2423 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2424 &dev->udma_mask);
2425
cf176e1a 2426 return 0;
cf176e1a
TH
2427}
2428
3373efd8 2429static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 2430{
baa1e78a 2431 struct ata_eh_context *ehc = &dev->ap->eh_context;
83206a29
TH
2432 unsigned int err_mask;
2433 int rc;
1da177e4 2434
e8384607 2435 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
2436 if (dev->xfer_shift == ATA_SHIFT_PIO)
2437 dev->flags |= ATA_DFLAG_PIO;
2438
3373efd8 2439 err_mask = ata_dev_set_xfermode(dev);
11750a40
AC
2440 /* Old CFA may refuse this command, which is just fine */
2441 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2442 err_mask &= ~AC_ERR_DEV;
2443
83206a29 2444 if (err_mask) {
f15a1daf
TH
2445 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2446 "(err_mask=0x%x)\n", err_mask);
83206a29
TH
2447 return -EIO;
2448 }
1da177e4 2449
baa1e78a 2450 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3373efd8 2451 rc = ata_dev_revalidate(dev, 0);
baa1e78a 2452 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
5eb45c02 2453 if (rc)
83206a29 2454 return rc;
48a8a14f 2455
23e71c3d
TH
2456 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2457 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 2458
f15a1daf
TH
2459 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2460 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
83206a29 2461 return 0;
1da177e4
LT
2462}
2463
1da177e4
LT
2464/**
2465 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2466 * @ap: port on which timings will be programmed
e82cbdb9 2467 * @r_failed_dev: out paramter for failed device
1da177e4 2468 *
e82cbdb9
TH
2469 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2470 * ata_set_mode() fails, pointer to the failing device is
2471 * returned in @r_failed_dev.
780a87f7 2472 *
1da177e4 2473 * LOCKING:
0cba632b 2474 * PCI/etc. bus probe sem.
e82cbdb9
TH
2475 *
2476 * RETURNS:
2477 * 0 on success, negative errno otherwise
1da177e4 2478 */
1ad8e7f9 2479int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
1da177e4 2480{
e8e0619f 2481 struct ata_device *dev;
e82cbdb9 2482 int i, rc = 0, used_dma = 0, found = 0;
1da177e4 2483
3adcebb2 2484 /* has private set_mode? */
b229a7b0
AC
2485 if (ap->ops->set_mode)
2486 return ap->ops->set_mode(ap, r_failed_dev);
3adcebb2 2487
a6d5a51c
TH
2488 /* step 1: calculate xfer_mask */
2489 for (i = 0; i < ATA_MAX_DEVICES; i++) {
acf356b1 2490 unsigned int pio_mask, dma_mask;
a6d5a51c 2491
e8e0619f
TH
2492 dev = &ap->device[i];
2493
e1211e3f 2494 if (!ata_dev_enabled(dev))
a6d5a51c
TH
2495 continue;
2496
3373efd8 2497 ata_dev_xfermask(dev);
1da177e4 2498
acf356b1
TH
2499 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2500 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2501 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2502 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 2503
4f65977d 2504 found = 1;
5444a6f4
AC
2505 if (dev->dma_mode)
2506 used_dma = 1;
a6d5a51c 2507 }
4f65977d 2508 if (!found)
e82cbdb9 2509 goto out;
a6d5a51c
TH
2510
2511 /* step 2: always set host PIO timings */
e8e0619f
TH
2512 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2513 dev = &ap->device[i];
2514 if (!ata_dev_enabled(dev))
2515 continue;
2516
2517 if (!dev->pio_mode) {
f15a1daf 2518 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 2519 rc = -EINVAL;
e82cbdb9 2520 goto out;
e8e0619f
TH
2521 }
2522
2523 dev->xfer_mode = dev->pio_mode;
2524 dev->xfer_shift = ATA_SHIFT_PIO;
2525 if (ap->ops->set_piomode)
2526 ap->ops->set_piomode(ap, dev);
2527 }
1da177e4 2528
a6d5a51c 2529 /* step 3: set host DMA timings */
e8e0619f
TH
2530 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2531 dev = &ap->device[i];
2532
2533 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2534 continue;
2535
2536 dev->xfer_mode = dev->dma_mode;
2537 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2538 if (ap->ops->set_dmamode)
2539 ap->ops->set_dmamode(ap, dev);
2540 }
1da177e4
LT
2541
2542 /* step 4: update devices' xfer mode */
83206a29 2543 for (i = 0; i < ATA_MAX_DEVICES; i++) {
e8e0619f 2544 dev = &ap->device[i];
1da177e4 2545
18d90deb 2546 /* don't update suspended devices' xfer mode */
02670bf3 2547 if (!ata_dev_ready(dev))
83206a29
TH
2548 continue;
2549
3373efd8 2550 rc = ata_dev_set_mode(dev);
5bbc53f4 2551 if (rc)
e82cbdb9 2552 goto out;
83206a29 2553 }
1da177e4 2554
e8e0619f
TH
2555 /* Record simplex status. If we selected DMA then the other
2556 * host channels are not permitted to do so.
5444a6f4 2557 */
cca3974e
JG
2558 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
2559 ap->host->simplex_claimed = 1;
5444a6f4 2560
e8e0619f 2561 /* step5: chip specific finalisation */
1da177e4
LT
2562 if (ap->ops->post_set_mode)
2563 ap->ops->post_set_mode(ap);
2564
e82cbdb9
TH
2565 out:
2566 if (rc)
2567 *r_failed_dev = dev;
2568 return rc;
1da177e4
LT
2569}
2570
1fdffbce
JG
2571/**
2572 * ata_tf_to_host - issue ATA taskfile to host controller
2573 * @ap: port to which command is being issued
2574 * @tf: ATA taskfile register set
2575 *
2576 * Issues ATA taskfile register set to ATA host controller,
2577 * with proper synchronization with interrupt handler and
2578 * other threads.
2579 *
2580 * LOCKING:
cca3974e 2581 * spin_lock_irqsave(host lock)
1fdffbce
JG
2582 */
2583
2584static inline void ata_tf_to_host(struct ata_port *ap,
2585 const struct ata_taskfile *tf)
2586{
2587 ap->ops->tf_load(ap, tf);
2588 ap->ops->exec_command(ap, tf);
2589}
2590
1da177e4
LT
2591/**
2592 * ata_busy_sleep - sleep until BSY clears, or timeout
2593 * @ap: port containing status register to be polled
2594 * @tmout_pat: impatience timeout
2595 * @tmout: overall timeout
2596 *
780a87f7
JG
2597 * Sleep until ATA Status register bit BSY clears,
2598 * or a timeout occurs.
2599 *
d1adc1bb
TH
2600 * LOCKING:
2601 * Kernel thread context (may sleep).
2602 *
2603 * RETURNS:
2604 * 0 on success, -errno otherwise.
1da177e4 2605 */
d1adc1bb
TH
2606int ata_busy_sleep(struct ata_port *ap,
2607 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
2608{
2609 unsigned long timer_start, timeout;
2610 u8 status;
2611
2612 status = ata_busy_wait(ap, ATA_BUSY, 300);
2613 timer_start = jiffies;
2614 timeout = timer_start + tmout_pat;
d1adc1bb
TH
2615 while (status != 0xff && (status & ATA_BUSY) &&
2616 time_before(jiffies, timeout)) {
1da177e4
LT
2617 msleep(50);
2618 status = ata_busy_wait(ap, ATA_BUSY, 3);
2619 }
2620
d1adc1bb 2621 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 2622 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
2623 "port is slow to respond, please be patient "
2624 "(Status 0x%x)\n", status);
1da177e4
LT
2625
2626 timeout = timer_start + tmout;
d1adc1bb
TH
2627 while (status != 0xff && (status & ATA_BUSY) &&
2628 time_before(jiffies, timeout)) {
1da177e4
LT
2629 msleep(50);
2630 status = ata_chk_status(ap);
2631 }
2632
d1adc1bb
TH
2633 if (status == 0xff)
2634 return -ENODEV;
2635
1da177e4 2636 if (status & ATA_BUSY) {
f15a1daf 2637 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
2638 "(%lu secs, Status 0x%x)\n",
2639 tmout / HZ, status);
d1adc1bb 2640 return -EBUSY;
1da177e4
LT
2641 }
2642
2643 return 0;
2644}
2645
2646static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
2647{
2648 struct ata_ioports *ioaddr = &ap->ioaddr;
2649 unsigned int dev0 = devmask & (1 << 0);
2650 unsigned int dev1 = devmask & (1 << 1);
2651 unsigned long timeout;
2652
2653 /* if device 0 was found in ata_devchk, wait for its
2654 * BSY bit to clear
2655 */
2656 if (dev0)
2657 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2658
2659 /* if device 1 was found in ata_devchk, wait for
2660 * register access, then wait for BSY to clear
2661 */
2662 timeout = jiffies + ATA_TMOUT_BOOT;
2663 while (dev1) {
2664 u8 nsect, lbal;
2665
2666 ap->ops->dev_select(ap, 1);
0d5ff566
TH
2667 nsect = ioread8(ioaddr->nsect_addr);
2668 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
2669 if ((nsect == 1) && (lbal == 1))
2670 break;
2671 if (time_after(jiffies, timeout)) {
2672 dev1 = 0;
2673 break;
2674 }
2675 msleep(50); /* give drive a breather */
2676 }
2677 if (dev1)
2678 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2679
2680 /* is all this really necessary? */
2681 ap->ops->dev_select(ap, 0);
2682 if (dev1)
2683 ap->ops->dev_select(ap, 1);
2684 if (dev0)
2685 ap->ops->dev_select(ap, 0);
2686}
2687
1da177e4
LT
2688static unsigned int ata_bus_softreset(struct ata_port *ap,
2689 unsigned int devmask)
2690{
2691 struct ata_ioports *ioaddr = &ap->ioaddr;
2692
44877b4e 2693 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
1da177e4
LT
2694
2695 /* software reset. causes dev0 to be selected */
0d5ff566
TH
2696 iowrite8(ap->ctl, ioaddr->ctl_addr);
2697 udelay(20); /* FIXME: flush */
2698 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
2699 udelay(20); /* FIXME: flush */
2700 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
2701
2702 /* spec mandates ">= 2ms" before checking status.
2703 * We wait 150ms, because that was the magic delay used for
2704 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
2705 * between when the ATA command register is written, and then
2706 * status is checked. Because waiting for "a while" before
2707 * checking status is fine, post SRST, we perform this magic
2708 * delay here as well.
09c7ad79
AC
2709 *
2710 * Old drivers/ide uses the 2mS rule and then waits for ready
1da177e4
LT
2711 */
2712 msleep(150);
2713
2e9edbf8 2714 /* Before we perform post reset processing we want to see if
298a41ca
TH
2715 * the bus shows 0xFF because the odd clown forgets the D7
2716 * pulldown resistor.
2717 */
d1adc1bb
TH
2718 if (ata_check_status(ap) == 0xFF)
2719 return 0;
09c7ad79 2720
1da177e4
LT
2721 ata_bus_post_reset(ap, devmask);
2722
2723 return 0;
2724}
2725
2726/**
2727 * ata_bus_reset - reset host port and associated ATA channel
2728 * @ap: port to reset
2729 *
2730 * This is typically the first time we actually start issuing
2731 * commands to the ATA channel. We wait for BSY to clear, then
2732 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
2733 * result. Determine what devices, if any, are on the channel
2734 * by looking at the device 0/1 error register. Look at the signature
2735 * stored in each device's taskfile registers, to determine if
2736 * the device is ATA or ATAPI.
2737 *
2738 * LOCKING:
0cba632b 2739 * PCI/etc. bus probe sem.
cca3974e 2740 * Obtains host lock.
1da177e4
LT
2741 *
2742 * SIDE EFFECTS:
198e0fed 2743 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
2744 */
2745
2746void ata_bus_reset(struct ata_port *ap)
2747{
2748 struct ata_ioports *ioaddr = &ap->ioaddr;
2749 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
2750 u8 err;
aec5c3c1 2751 unsigned int dev0, dev1 = 0, devmask = 0;
1da177e4 2752
44877b4e 2753 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
1da177e4
LT
2754
2755 /* determine if device 0/1 are present */
2756 if (ap->flags & ATA_FLAG_SATA_RESET)
2757 dev0 = 1;
2758 else {
2759 dev0 = ata_devchk(ap, 0);
2760 if (slave_possible)
2761 dev1 = ata_devchk(ap, 1);
2762 }
2763
2764 if (dev0)
2765 devmask |= (1 << 0);
2766 if (dev1)
2767 devmask |= (1 << 1);
2768
2769 /* select device 0 again */
2770 ap->ops->dev_select(ap, 0);
2771
2772 /* issue bus reset */
2773 if (ap->flags & ATA_FLAG_SRST)
aec5c3c1
TH
2774 if (ata_bus_softreset(ap, devmask))
2775 goto err_out;
1da177e4
LT
2776
2777 /*
2778 * determine by signature whether we have ATA or ATAPI devices
2779 */
b4dc7623 2780 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
1da177e4 2781 if ((slave_possible) && (err != 0x81))
b4dc7623 2782 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
1da177e4
LT
2783
2784 /* re-enable interrupts */
83625006 2785 ap->ops->irq_on(ap);
1da177e4
LT
2786
2787 /* is double-select really necessary? */
2788 if (ap->device[1].class != ATA_DEV_NONE)
2789 ap->ops->dev_select(ap, 1);
2790 if (ap->device[0].class != ATA_DEV_NONE)
2791 ap->ops->dev_select(ap, 0);
2792
2793 /* if no devices were detected, disable this port */
2794 if ((ap->device[0].class == ATA_DEV_NONE) &&
2795 (ap->device[1].class == ATA_DEV_NONE))
2796 goto err_out;
2797
2798 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
2799 /* set up device control for ATA_FLAG_SATA_RESET */
0d5ff566 2800 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
2801 }
2802
2803 DPRINTK("EXIT\n");
2804 return;
2805
2806err_out:
f15a1daf 2807 ata_port_printk(ap, KERN_ERR, "disabling port\n");
1da177e4
LT
2808 ap->ops->port_disable(ap);
2809
2810 DPRINTK("EXIT\n");
2811}
2812
d7bb4cc7
TH
2813/**
2814 * sata_phy_debounce - debounce SATA phy status
2815 * @ap: ATA port to debounce SATA phy status for
2816 * @params: timing parameters { interval, duratinon, timeout } in msec
2817 *
2818 * Make sure SStatus of @ap reaches stable state, determined by
2819 * holding the same value where DET is not 1 for @duration polled
2820 * every @interval, before @timeout. Timeout constraints the
2821 * beginning of the stable state. Because, after hot unplugging,
2822 * DET gets stuck at 1 on some controllers, this functions waits
2823 * until timeout then returns 0 if DET is stable at 1.
2824 *
2825 * LOCKING:
2826 * Kernel thread context (may sleep)
2827 *
2828 * RETURNS:
2829 * 0 on success, -errno on failure.
2830 */
2831int sata_phy_debounce(struct ata_port *ap, const unsigned long *params)
7a7921e8 2832{
d7bb4cc7
TH
2833 unsigned long interval_msec = params[0];
2834 unsigned long duration = params[1] * HZ / 1000;
2835 unsigned long timeout = jiffies + params[2] * HZ / 1000;
2836 unsigned long last_jiffies;
2837 u32 last, cur;
2838 int rc;
2839
2840 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2841 return rc;
2842 cur &= 0xf;
2843
2844 last = cur;
2845 last_jiffies = jiffies;
2846
2847 while (1) {
2848 msleep(interval_msec);
2849 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
2850 return rc;
2851 cur &= 0xf;
2852
2853 /* DET stable? */
2854 if (cur == last) {
2855 if (cur == 1 && time_before(jiffies, timeout))
2856 continue;
2857 if (time_after(jiffies, last_jiffies + duration))
2858 return 0;
2859 continue;
2860 }
2861
2862 /* unstable, start over */
2863 last = cur;
2864 last_jiffies = jiffies;
2865
2866 /* check timeout */
2867 if (time_after(jiffies, timeout))
2868 return -EBUSY;
2869 }
2870}
2871
2872/**
2873 * sata_phy_resume - resume SATA phy
2874 * @ap: ATA port to resume SATA phy for
2875 * @params: timing parameters { interval, duratinon, timeout } in msec
2876 *
2877 * Resume SATA phy of @ap and debounce it.
2878 *
2879 * LOCKING:
2880 * Kernel thread context (may sleep)
2881 *
2882 * RETURNS:
2883 * 0 on success, -errno on failure.
2884 */
2885int sata_phy_resume(struct ata_port *ap, const unsigned long *params)
2886{
2887 u32 scontrol;
81952c54
TH
2888 int rc;
2889
2890 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2891 return rc;
7a7921e8 2892
852ee16a 2893 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54
TH
2894
2895 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2896 return rc;
7a7921e8 2897
d7bb4cc7
TH
2898 /* Some PHYs react badly if SStatus is pounded immediately
2899 * after resuming. Delay 200ms before debouncing.
2900 */
2901 msleep(200);
7a7921e8 2902
d7bb4cc7 2903 return sata_phy_debounce(ap, params);
7a7921e8
TH
2904}
2905
f5914a46
TH
2906static void ata_wait_spinup(struct ata_port *ap)
2907{
2908 struct ata_eh_context *ehc = &ap->eh_context;
2909 unsigned long end, secs;
2910 int rc;
2911
2912 /* first, debounce phy if SATA */
2913 if (ap->cbl == ATA_CBL_SATA) {
e9c83914 2914 rc = sata_phy_debounce(ap, sata_deb_timing_hotplug);
f5914a46
TH
2915
2916 /* if debounced successfully and offline, no need to wait */
2917 if ((rc == 0 || rc == -EOPNOTSUPP) && ata_port_offline(ap))
2918 return;
2919 }
2920
2921 /* okay, let's give the drive time to spin up */
2922 end = ehc->i.hotplug_timestamp + ATA_SPINUP_WAIT * HZ / 1000;
2923 secs = ((end - jiffies) + HZ - 1) / HZ;
2924
2925 if (time_after(jiffies, end))
2926 return;
2927
2928 if (secs > 5)
2929 ata_port_printk(ap, KERN_INFO, "waiting for device to spin up "
2930 "(%lu secs)\n", secs);
2931
2932 schedule_timeout_uninterruptible(end - jiffies);
2933}
2934
2935/**
2936 * ata_std_prereset - prepare for reset
2937 * @ap: ATA port to be reset
2938 *
2939 * @ap is about to be reset. Initialize it.
2940 *
2941 * LOCKING:
2942 * Kernel thread context (may sleep)
2943 *
2944 * RETURNS:
2945 * 0 on success, -errno otherwise.
2946 */
2947int ata_std_prereset(struct ata_port *ap)
2948{
2949 struct ata_eh_context *ehc = &ap->eh_context;
e9c83914 2950 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
2951 int rc;
2952
28324304
TH
2953 /* handle link resume & hotplug spinup */
2954 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
2955 (ap->flags & ATA_FLAG_HRST_TO_RESUME))
2956 ehc->i.action |= ATA_EH_HARDRESET;
2957
2958 if ((ehc->i.flags & ATA_EHI_HOTPLUGGED) &&
2959 (ap->flags & ATA_FLAG_SKIP_D2H_BSY))
2960 ata_wait_spinup(ap);
f5914a46
TH
2961
2962 /* if we're about to do hardreset, nothing more to do */
2963 if (ehc->i.action & ATA_EH_HARDRESET)
2964 return 0;
2965
2966 /* if SATA, resume phy */
2967 if (ap->cbl == ATA_CBL_SATA) {
f5914a46
TH
2968 rc = sata_phy_resume(ap, timing);
2969 if (rc && rc != -EOPNOTSUPP) {
2970 /* phy resume failed */
2971 ata_port_printk(ap, KERN_WARNING, "failed to resume "
2972 "link for reset (errno=%d)\n", rc);
2973 return rc;
2974 }
2975 }
2976
2977 /* Wait for !BSY if the controller can wait for the first D2H
2978 * Reg FIS and we don't know that no device is attached.
2979 */
2980 if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap))
2981 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
2982
2983 return 0;
2984}
2985
c2bd5804
TH
2986/**
2987 * ata_std_softreset - reset host port via ATA SRST
2988 * @ap: port to reset
c2bd5804
TH
2989 * @classes: resulting classes of attached devices
2990 *
52783c5d 2991 * Reset host port using ATA SRST.
c2bd5804
TH
2992 *
2993 * LOCKING:
2994 * Kernel thread context (may sleep)
2995 *
2996 * RETURNS:
2997 * 0 on success, -errno otherwise.
2998 */
2bf2cb26 2999int ata_std_softreset(struct ata_port *ap, unsigned int *classes)
c2bd5804
TH
3000{
3001 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3002 unsigned int devmask = 0, err_mask;
3003 u8 err;
3004
3005 DPRINTK("ENTER\n");
3006
81952c54 3007 if (ata_port_offline(ap)) {
3a39746a
TH
3008 classes[0] = ATA_DEV_NONE;
3009 goto out;
3010 }
3011
c2bd5804
TH
3012 /* determine if device 0/1 are present */
3013 if (ata_devchk(ap, 0))
3014 devmask |= (1 << 0);
3015 if (slave_possible && ata_devchk(ap, 1))
3016 devmask |= (1 << 1);
3017
c2bd5804
TH
3018 /* select device 0 again */
3019 ap->ops->dev_select(ap, 0);
3020
3021 /* issue bus reset */
3022 DPRINTK("about to softreset, devmask=%x\n", devmask);
3023 err_mask = ata_bus_softreset(ap, devmask);
3024 if (err_mask) {
f15a1daf
TH
3025 ata_port_printk(ap, KERN_ERR, "SRST failed (err_mask=0x%x)\n",
3026 err_mask);
c2bd5804
TH
3027 return -EIO;
3028 }
3029
3030 /* determine by signature whether we have ATA or ATAPI devices */
3031 classes[0] = ata_dev_try_classify(ap, 0, &err);
3032 if (slave_possible && err != 0x81)
3033 classes[1] = ata_dev_try_classify(ap, 1, &err);
3034
3a39746a 3035 out:
c2bd5804
TH
3036 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3037 return 0;
3038}
3039
3040/**
b6103f6d 3041 * sata_port_hardreset - reset port via SATA phy reset
c2bd5804 3042 * @ap: port to reset
b6103f6d 3043 * @timing: timing parameters { interval, duratinon, timeout } in msec
c2bd5804
TH
3044 *
3045 * SATA phy-reset host port using DET bits of SControl register.
c2bd5804
TH
3046 *
3047 * LOCKING:
3048 * Kernel thread context (may sleep)
3049 *
3050 * RETURNS:
3051 * 0 on success, -errno otherwise.
3052 */
b6103f6d 3053int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing)
c2bd5804 3054{
852ee16a 3055 u32 scontrol;
81952c54 3056 int rc;
852ee16a 3057
c2bd5804
TH
3058 DPRINTK("ENTER\n");
3059
3c567b7d 3060 if (sata_set_spd_needed(ap)) {
1c3fae4d
TH
3061 /* SATA spec says nothing about how to reconfigure
3062 * spd. To be on the safe side, turn off phy during
3063 * reconfiguration. This works for at least ICH7 AHCI
3064 * and Sil3124.
3065 */
81952c54 3066 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
b6103f6d 3067 goto out;
81952c54 3068
a34b6fc0 3069 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54
TH
3070
3071 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
b6103f6d 3072 goto out;
1c3fae4d 3073
3c567b7d 3074 sata_set_spd(ap);
1c3fae4d
TH
3075 }
3076
3077 /* issue phy wake/reset */
81952c54 3078 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
b6103f6d 3079 goto out;
81952c54 3080
852ee16a 3081 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54
TH
3082
3083 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
b6103f6d 3084 goto out;
c2bd5804 3085
1c3fae4d 3086 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
3087 * 10.4.2 says at least 1 ms.
3088 */
3089 msleep(1);
3090
1c3fae4d 3091 /* bring phy back */
b6103f6d
TH
3092 rc = sata_phy_resume(ap, timing);
3093 out:
3094 DPRINTK("EXIT, rc=%d\n", rc);
3095 return rc;
3096}
3097
3098/**
3099 * sata_std_hardreset - reset host port via SATA phy reset
3100 * @ap: port to reset
3101 * @class: resulting class of attached device
3102 *
3103 * SATA phy-reset host port using DET bits of SControl register,
3104 * wait for !BSY and classify the attached device.
3105 *
3106 * LOCKING:
3107 * Kernel thread context (may sleep)
3108 *
3109 * RETURNS:
3110 * 0 on success, -errno otherwise.
3111 */
3112int sata_std_hardreset(struct ata_port *ap, unsigned int *class)
3113{
3114 const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context);
3115 int rc;
3116
3117 DPRINTK("ENTER\n");
3118
3119 /* do hardreset */
3120 rc = sata_port_hardreset(ap, timing);
3121 if (rc) {
3122 ata_port_printk(ap, KERN_ERR,
3123 "COMRESET failed (errno=%d)\n", rc);
3124 return rc;
3125 }
c2bd5804 3126
c2bd5804 3127 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 3128 if (ata_port_offline(ap)) {
c2bd5804
TH
3129 *class = ATA_DEV_NONE;
3130 DPRINTK("EXIT, link offline\n");
3131 return 0;
3132 }
3133
34fee227
TH
3134 /* wait a while before checking status, see SRST for more info */
3135 msleep(150);
3136
c2bd5804 3137 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
f15a1daf
TH
3138 ata_port_printk(ap, KERN_ERR,
3139 "COMRESET failed (device not ready)\n");
c2bd5804
TH
3140 return -EIO;
3141 }
3142
3a39746a
TH
3143 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3144
c2bd5804
TH
3145 *class = ata_dev_try_classify(ap, 0, NULL);
3146
3147 DPRINTK("EXIT, class=%u\n", *class);
3148 return 0;
3149}
3150
3151/**
3152 * ata_std_postreset - standard postreset callback
3153 * @ap: the target ata_port
3154 * @classes: classes of attached devices
3155 *
3156 * This function is invoked after a successful reset. Note that
3157 * the device might have been reset more than once using
3158 * different reset methods before postreset is invoked.
c2bd5804 3159 *
c2bd5804
TH
3160 * LOCKING:
3161 * Kernel thread context (may sleep)
3162 */
3163void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
3164{
dc2b3515
TH
3165 u32 serror;
3166
c2bd5804
TH
3167 DPRINTK("ENTER\n");
3168
c2bd5804 3169 /* print link status */
81952c54 3170 sata_print_link_status(ap);
c2bd5804 3171
dc2b3515
TH
3172 /* clear SError */
3173 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
3174 sata_scr_write(ap, SCR_ERROR, serror);
3175
3a39746a 3176 /* re-enable interrupts */
83625006
AI
3177 if (!ap->ops->error_handler)
3178 ap->ops->irq_on(ap);
c2bd5804
TH
3179
3180 /* is double-select really necessary? */
3181 if (classes[0] != ATA_DEV_NONE)
3182 ap->ops->dev_select(ap, 1);
3183 if (classes[1] != ATA_DEV_NONE)
3184 ap->ops->dev_select(ap, 0);
3185
3a39746a
TH
3186 /* bail out if no device is present */
3187 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3188 DPRINTK("EXIT, no device\n");
3189 return;
3190 }
3191
3192 /* set up device control */
0d5ff566
TH
3193 if (ap->ioaddr.ctl_addr)
3194 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
c2bd5804
TH
3195
3196 DPRINTK("EXIT\n");
3197}
3198
623a3128
TH
3199/**
3200 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3201 * @dev: device to compare against
3202 * @new_class: class of the new device
3203 * @new_id: IDENTIFY page of the new device
3204 *
3205 * Compare @new_class and @new_id against @dev and determine
3206 * whether @dev is the device indicated by @new_class and
3207 * @new_id.
3208 *
3209 * LOCKING:
3210 * None.
3211 *
3212 * RETURNS:
3213 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3214 */
3373efd8
TH
3215static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3216 const u16 *new_id)
623a3128
TH
3217{
3218 const u16 *old_id = dev->id;
a0cf733b
TH
3219 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3220 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
3221 u64 new_n_sectors;
3222
3223 if (dev->class != new_class) {
f15a1daf
TH
3224 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3225 dev->class, new_class);
623a3128
TH
3226 return 0;
3227 }
3228
a0cf733b
TH
3229 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3230 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3231 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3232 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
3233 new_n_sectors = ata_id_n_sectors(new_id);
3234
3235 if (strcmp(model[0], model[1])) {
f15a1daf
TH
3236 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3237 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
3238 return 0;
3239 }
3240
3241 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
3242 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3243 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
3244 return 0;
3245 }
3246
3247 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
f15a1daf
TH
3248 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3249 "%llu != %llu\n",
3250 (unsigned long long)dev->n_sectors,
3251 (unsigned long long)new_n_sectors);
623a3128
TH
3252 return 0;
3253 }
3254
3255 return 1;
3256}
3257
3258/**
3259 * ata_dev_revalidate - Revalidate ATA device
623a3128 3260 * @dev: device to revalidate
bff04647 3261 * @readid_flags: read ID flags
623a3128
TH
3262 *
3263 * Re-read IDENTIFY page and make sure @dev is still attached to
3264 * the port.
3265 *
3266 * LOCKING:
3267 * Kernel thread context (may sleep)
3268 *
3269 * RETURNS:
3270 * 0 on success, negative errno otherwise
3271 */
bff04647 3272int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
623a3128 3273{
5eb45c02 3274 unsigned int class = dev->class;
f15a1daf 3275 u16 *id = (void *)dev->ap->sector_buf;
623a3128
TH
3276 int rc;
3277
5eb45c02
TH
3278 if (!ata_dev_enabled(dev)) {
3279 rc = -ENODEV;
3280 goto fail;
3281 }
623a3128 3282
fe635c7e 3283 /* read ID data */
bff04647 3284 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128
TH
3285 if (rc)
3286 goto fail;
3287
3288 /* is the device still there? */
3373efd8 3289 if (!ata_dev_same_device(dev, class, id)) {
623a3128
TH
3290 rc = -ENODEV;
3291 goto fail;
3292 }
3293
fe635c7e 3294 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
623a3128
TH
3295
3296 /* configure device according to the new ID */
efdaedc4 3297 rc = ata_dev_configure(dev);
5eb45c02
TH
3298 if (rc == 0)
3299 return 0;
623a3128
TH
3300
3301 fail:
f15a1daf 3302 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
3303 return rc;
3304}
3305
6919a0a6
AC
3306struct ata_blacklist_entry {
3307 const char *model_num;
3308 const char *model_rev;
3309 unsigned long horkage;
3310};
3311
3312static const struct ata_blacklist_entry ata_device_blacklist [] = {
3313 /* Devices with DMA related problems under Linux */
3314 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3315 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3316 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3317 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3318 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3319 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3320 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3321 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3322 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3323 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3324 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3325 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3326 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3327 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3328 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3329 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3330 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3331 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3332 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3333 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3334 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3335 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3336 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3337 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3338 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3339 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
3340 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3341 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3342 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
3343
3344 /* Devices we expect to fail diagnostics */
3345
3346 /* Devices where NCQ should be avoided */
3347 /* NCQ is slow */
3348 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
3349
3350 /* Devices with NCQ limits */
3351
3352 /* End Marker */
3353 { }
1da177e4 3354};
2e9edbf8 3355
6919a0a6 3356unsigned long ata_device_blacklisted(const struct ata_device *dev)
1da177e4 3357{
8bfa79fc
TH
3358 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3359 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 3360 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 3361
8bfa79fc
TH
3362 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3363 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 3364
6919a0a6 3365 while (ad->model_num) {
8bfa79fc 3366 if (!strcmp(ad->model_num, model_num)) {
6919a0a6
AC
3367 if (ad->model_rev == NULL)
3368 return ad->horkage;
8bfa79fc 3369 if (!strcmp(ad->model_rev, model_rev))
6919a0a6 3370 return ad->horkage;
f4b15fef 3371 }
6919a0a6 3372 ad++;
f4b15fef 3373 }
1da177e4
LT
3374 return 0;
3375}
3376
6919a0a6
AC
3377static int ata_dma_blacklisted(const struct ata_device *dev)
3378{
3379 /* We don't support polling DMA.
3380 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3381 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3382 */
3383 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
3384 (dev->flags & ATA_DFLAG_CDB_INTR))
3385 return 1;
3386 return (ata_device_blacklisted(dev) & ATA_HORKAGE_NODMA) ? 1 : 0;
3387}
3388
a6d5a51c
TH
3389/**
3390 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
3391 * @dev: Device to compute xfermask for
3392 *
acf356b1
TH
3393 * Compute supported xfermask of @dev and store it in
3394 * dev->*_mask. This function is responsible for applying all
3395 * known limits including host controller limits, device
3396 * blacklist, etc...
a6d5a51c
TH
3397 *
3398 * LOCKING:
3399 * None.
a6d5a51c 3400 */
3373efd8 3401static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 3402{
3373efd8 3403 struct ata_port *ap = dev->ap;
cca3974e 3404 struct ata_host *host = ap->host;
a6d5a51c 3405 unsigned long xfer_mask;
1da177e4 3406
37deecb5 3407 /* controller modes available */
565083e1
TH
3408 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3409 ap->mwdma_mask, ap->udma_mask);
3410
3411 /* Apply cable rule here. Don't apply it early because when
3412 * we handle hot plug the cable type can itself change.
3413 */
3414 if (ap->cbl == ATA_CBL_PATA40)
3415 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
fc085150
AC
3416 /* Apply drive side cable rule. Unknown or 80 pin cables reported
3417 * host side are checked drive side as well. Cases where we know a
3418 * 40wire cable is used safely for 80 are not checked here.
3419 */
3420 if (ata_drive_40wire(dev->id) && (ap->cbl == ATA_CBL_PATA_UNK || ap->cbl == ATA_CBL_PATA80))
3421 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3422
1da177e4 3423
37deecb5
TH
3424 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3425 dev->mwdma_mask, dev->udma_mask);
3426 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 3427
b352e57d
AC
3428 /*
3429 * CFA Advanced TrueIDE timings are not allowed on a shared
3430 * cable
3431 */
3432 if (ata_dev_pair(dev)) {
3433 /* No PIO5 or PIO6 */
3434 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3435 /* No MWDMA3 or MWDMA 4 */
3436 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3437 }
3438
37deecb5
TH
3439 if (ata_dma_blacklisted(dev)) {
3440 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
3441 ata_dev_printk(dev, KERN_WARNING,
3442 "device is on DMA blacklist, disabling DMA\n");
37deecb5 3443 }
a6d5a51c 3444
cca3974e 3445 if ((host->flags & ATA_HOST_SIMPLEX) && host->simplex_claimed) {
37deecb5
TH
3446 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3447 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3448 "other device, disabling DMA\n");
5444a6f4 3449 }
565083e1 3450
5444a6f4
AC
3451 if (ap->ops->mode_filter)
3452 xfer_mask = ap->ops->mode_filter(ap, dev, xfer_mask);
3453
565083e1
TH
3454 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3455 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
3456}
3457
1da177e4
LT
3458/**
3459 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
3460 * @dev: Device to which command will be sent
3461 *
780a87f7
JG
3462 * Issue SET FEATURES - XFER MODE command to device @dev
3463 * on port @ap.
3464 *
1da177e4 3465 * LOCKING:
0cba632b 3466 * PCI/etc. bus probe sem.
83206a29
TH
3467 *
3468 * RETURNS:
3469 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
3470 */
3471
3373efd8 3472static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 3473{
a0123703 3474 struct ata_taskfile tf;
83206a29 3475 unsigned int err_mask;
1da177e4
LT
3476
3477 /* set up set-features taskfile */
3478 DPRINTK("set features - xfer mode\n");
3479
3373efd8 3480 ata_tf_init(dev, &tf);
a0123703
TH
3481 tf.command = ATA_CMD_SET_FEATURES;
3482 tf.feature = SETFEATURES_XFER;
3483 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3484 tf.protocol = ATA_PROT_NODATA;
3485 tf.nsect = dev->xfer_mode;
1da177e4 3486
3373efd8 3487 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1da177e4 3488
83206a29
TH
3489 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3490 return err_mask;
1da177e4
LT
3491}
3492
8bf62ece
AL
3493/**
3494 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 3495 * @dev: Device to which command will be sent
e2a7f77a
RD
3496 * @heads: Number of heads (taskfile parameter)
3497 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
3498 *
3499 * LOCKING:
6aff8f1f
TH
3500 * Kernel thread context (may sleep)
3501 *
3502 * RETURNS:
3503 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 3504 */
3373efd8
TH
3505static unsigned int ata_dev_init_params(struct ata_device *dev,
3506 u16 heads, u16 sectors)
8bf62ece 3507{
a0123703 3508 struct ata_taskfile tf;
6aff8f1f 3509 unsigned int err_mask;
8bf62ece
AL
3510
3511 /* Number of sectors per track 1-255. Number of heads 1-16 */
3512 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 3513 return AC_ERR_INVALID;
8bf62ece
AL
3514
3515 /* set up init dev params taskfile */
3516 DPRINTK("init dev params \n");
3517
3373efd8 3518 ata_tf_init(dev, &tf);
a0123703
TH
3519 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3520 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3521 tf.protocol = ATA_PROT_NODATA;
3522 tf.nsect = sectors;
3523 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 3524
3373efd8 3525 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
8bf62ece 3526
6aff8f1f
TH
3527 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3528 return err_mask;
8bf62ece
AL
3529}
3530
1da177e4 3531/**
0cba632b
JG
3532 * ata_sg_clean - Unmap DMA memory associated with command
3533 * @qc: Command containing DMA memory to be released
3534 *
3535 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
3536 *
3537 * LOCKING:
cca3974e 3538 * spin_lock_irqsave(host lock)
1da177e4 3539 */
70e6ad0c 3540void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
3541{
3542 struct ata_port *ap = qc->ap;
cedc9a47 3543 struct scatterlist *sg = qc->__sg;
1da177e4 3544 int dir = qc->dma_dir;
cedc9a47 3545 void *pad_buf = NULL;
1da177e4 3546
a4631474
TH
3547 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
3548 WARN_ON(sg == NULL);
1da177e4
LT
3549
3550 if (qc->flags & ATA_QCFLAG_SINGLE)
f131883e 3551 WARN_ON(qc->n_elem > 1);
1da177e4 3552
2c13b7ce 3553 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 3554
cedc9a47
JG
3555 /* if we padded the buffer out to 32-bit bound, and data
3556 * xfer direction is from-device, we must copy from the
3557 * pad buffer back into the supplied buffer
3558 */
3559 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
3560 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3561
3562 if (qc->flags & ATA_QCFLAG_SG) {
e1410f2d 3563 if (qc->n_elem)
2f1f610b 3564 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
cedc9a47
JG
3565 /* restore last sg */
3566 sg[qc->orig_n_elem - 1].length += qc->pad_len;
3567 if (pad_buf) {
3568 struct scatterlist *psg = &qc->pad_sgent;
3569 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3570 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
dfa15988 3571 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
3572 }
3573 } else {
2e242fa9 3574 if (qc->n_elem)
2f1f610b 3575 dma_unmap_single(ap->dev,
e1410f2d
JG
3576 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
3577 dir);
cedc9a47
JG
3578 /* restore sg */
3579 sg->length += qc->pad_len;
3580 if (pad_buf)
3581 memcpy(qc->buf_virt + sg->length - qc->pad_len,
3582 pad_buf, qc->pad_len);
3583 }
1da177e4
LT
3584
3585 qc->flags &= ~ATA_QCFLAG_DMAMAP;
cedc9a47 3586 qc->__sg = NULL;
1da177e4
LT
3587}
3588
3589/**
3590 * ata_fill_sg - Fill PCI IDE PRD table
3591 * @qc: Metadata associated with taskfile to be transferred
3592 *
780a87f7
JG
3593 * Fill PCI IDE PRD (scatter-gather) table with segments
3594 * associated with the current disk command.
3595 *
1da177e4 3596 * LOCKING:
cca3974e 3597 * spin_lock_irqsave(host lock)
1da177e4
LT
3598 *
3599 */
3600static void ata_fill_sg(struct ata_queued_cmd *qc)
3601{
1da177e4 3602 struct ata_port *ap = qc->ap;
cedc9a47
JG
3603 struct scatterlist *sg;
3604 unsigned int idx;
1da177e4 3605
a4631474 3606 WARN_ON(qc->__sg == NULL);
f131883e 3607 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1da177e4
LT
3608
3609 idx = 0;
cedc9a47 3610 ata_for_each_sg(sg, qc) {
1da177e4
LT
3611 u32 addr, offset;
3612 u32 sg_len, len;
3613
3614 /* determine if physical DMA addr spans 64K boundary.
3615 * Note h/w doesn't support 64-bit, so we unconditionally
3616 * truncate dma_addr_t to u32.
3617 */
3618 addr = (u32) sg_dma_address(sg);
3619 sg_len = sg_dma_len(sg);
3620
3621 while (sg_len) {
3622 offset = addr & 0xffff;
3623 len = sg_len;
3624 if ((offset + sg_len) > 0x10000)
3625 len = 0x10000 - offset;
3626
3627 ap->prd[idx].addr = cpu_to_le32(addr);
3628 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
3629 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
3630
3631 idx++;
3632 sg_len -= len;
3633 addr += len;
3634 }
3635 }
3636
3637 if (idx)
3638 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
3639}
3640/**
3641 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
3642 * @qc: Metadata associated with taskfile to check
3643 *
780a87f7
JG
3644 * Allow low-level driver to filter ATA PACKET commands, returning
3645 * a status indicating whether or not it is OK to use DMA for the
3646 * supplied PACKET command.
3647 *
1da177e4 3648 * LOCKING:
cca3974e 3649 * spin_lock_irqsave(host lock)
0cba632b 3650 *
1da177e4
LT
3651 * RETURNS: 0 when ATAPI DMA can be used
3652 * nonzero otherwise
3653 */
3654int ata_check_atapi_dma(struct ata_queued_cmd *qc)
3655{
3656 struct ata_port *ap = qc->ap;
3657 int rc = 0; /* Assume ATAPI DMA is OK by default */
3658
3659 if (ap->ops->check_atapi_dma)
3660 rc = ap->ops->check_atapi_dma(qc);
3661
3662 return rc;
3663}
3664/**
3665 * ata_qc_prep - Prepare taskfile for submission
3666 * @qc: Metadata associated with taskfile to be prepared
3667 *
780a87f7
JG
3668 * Prepare ATA taskfile for submission.
3669 *
1da177e4 3670 * LOCKING:
cca3974e 3671 * spin_lock_irqsave(host lock)
1da177e4
LT
3672 */
3673void ata_qc_prep(struct ata_queued_cmd *qc)
3674{
3675 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
3676 return;
3677
3678 ata_fill_sg(qc);
3679}
3680
e46834cd
BK
3681void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
3682
0cba632b
JG
3683/**
3684 * ata_sg_init_one - Associate command with memory buffer
3685 * @qc: Command to be associated
3686 * @buf: Memory buffer
3687 * @buflen: Length of memory buffer, in bytes.
3688 *
3689 * Initialize the data-related elements of queued_cmd @qc
3690 * to point to a single memory buffer, @buf of byte length @buflen.
3691 *
3692 * LOCKING:
cca3974e 3693 * spin_lock_irqsave(host lock)
0cba632b
JG
3694 */
3695
1da177e4
LT
3696void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
3697{
1da177e4
LT
3698 qc->flags |= ATA_QCFLAG_SINGLE;
3699
cedc9a47 3700 qc->__sg = &qc->sgent;
1da177e4 3701 qc->n_elem = 1;
cedc9a47 3702 qc->orig_n_elem = 1;
1da177e4 3703 qc->buf_virt = buf;
233277ca 3704 qc->nbytes = buflen;
1da177e4 3705
61c0596c 3706 sg_init_one(&qc->sgent, buf, buflen);
1da177e4
LT
3707}
3708
0cba632b
JG
3709/**
3710 * ata_sg_init - Associate command with scatter-gather table.
3711 * @qc: Command to be associated
3712 * @sg: Scatter-gather table.
3713 * @n_elem: Number of elements in s/g table.
3714 *
3715 * Initialize the data-related elements of queued_cmd @qc
3716 * to point to a scatter-gather table @sg, containing @n_elem
3717 * elements.
3718 *
3719 * LOCKING:
cca3974e 3720 * spin_lock_irqsave(host lock)
0cba632b
JG
3721 */
3722
1da177e4
LT
3723void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
3724 unsigned int n_elem)
3725{
3726 qc->flags |= ATA_QCFLAG_SG;
cedc9a47 3727 qc->__sg = sg;
1da177e4 3728 qc->n_elem = n_elem;
cedc9a47 3729 qc->orig_n_elem = n_elem;
1da177e4
LT
3730}
3731
3732/**
0cba632b
JG
3733 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
3734 * @qc: Command with memory buffer to be mapped.
3735 *
3736 * DMA-map the memory buffer associated with queued_cmd @qc.
1da177e4
LT
3737 *
3738 * LOCKING:
cca3974e 3739 * spin_lock_irqsave(host lock)
1da177e4
LT
3740 *
3741 * RETURNS:
0cba632b 3742 * Zero on success, negative on error.
1da177e4
LT
3743 */
3744
3745static int ata_sg_setup_one(struct ata_queued_cmd *qc)
3746{
3747 struct ata_port *ap = qc->ap;
3748 int dir = qc->dma_dir;
cedc9a47 3749 struct scatterlist *sg = qc->__sg;
1da177e4 3750 dma_addr_t dma_address;
2e242fa9 3751 int trim_sg = 0;
1da177e4 3752
cedc9a47
JG
3753 /* we must lengthen transfers to end on a 32-bit boundary */
3754 qc->pad_len = sg->length & 3;
3755 if (qc->pad_len) {
3756 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3757 struct scatterlist *psg = &qc->pad_sgent;
3758
a4631474 3759 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
3760
3761 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3762
3763 if (qc->tf.flags & ATA_TFLAG_WRITE)
3764 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
3765 qc->pad_len);
3766
3767 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3768 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3769 /* trim sg */
3770 sg->length -= qc->pad_len;
2e242fa9
TH
3771 if (sg->length == 0)
3772 trim_sg = 1;
cedc9a47
JG
3773
3774 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
3775 sg->length, qc->pad_len);
3776 }
3777
2e242fa9
TH
3778 if (trim_sg) {
3779 qc->n_elem--;
e1410f2d
JG
3780 goto skip_map;
3781 }
3782
2f1f610b 3783 dma_address = dma_map_single(ap->dev, qc->buf_virt,
32529e01 3784 sg->length, dir);
537a95d9
TH
3785 if (dma_mapping_error(dma_address)) {
3786 /* restore sg */
3787 sg->length += qc->pad_len;
1da177e4 3788 return -1;
537a95d9 3789 }
1da177e4
LT
3790
3791 sg_dma_address(sg) = dma_address;
32529e01 3792 sg_dma_len(sg) = sg->length;
1da177e4 3793
2e242fa9 3794skip_map:
1da177e4
LT
3795 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
3796 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
3797
3798 return 0;
3799}
3800
3801/**
0cba632b
JG
3802 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
3803 * @qc: Command with scatter-gather table to be mapped.
3804 *
3805 * DMA-map the scatter-gather table associated with queued_cmd @qc.
1da177e4
LT
3806 *
3807 * LOCKING:
cca3974e 3808 * spin_lock_irqsave(host lock)
1da177e4
LT
3809 *
3810 * RETURNS:
0cba632b 3811 * Zero on success, negative on error.
1da177e4
LT
3812 *
3813 */
3814
3815static int ata_sg_setup(struct ata_queued_cmd *qc)
3816{
3817 struct ata_port *ap = qc->ap;
cedc9a47
JG
3818 struct scatterlist *sg = qc->__sg;
3819 struct scatterlist *lsg = &sg[qc->n_elem - 1];
e1410f2d 3820 int n_elem, pre_n_elem, dir, trim_sg = 0;
1da177e4 3821
44877b4e 3822 VPRINTK("ENTER, ata%u\n", ap->print_id);
a4631474 3823 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
1da177e4 3824
cedc9a47
JG
3825 /* we must lengthen transfers to end on a 32-bit boundary */
3826 qc->pad_len = lsg->length & 3;
3827 if (qc->pad_len) {
3828 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3829 struct scatterlist *psg = &qc->pad_sgent;
3830 unsigned int offset;
3831
a4631474 3832 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
3833
3834 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
3835
3836 /*
3837 * psg->page/offset are used to copy to-be-written
3838 * data in this function or read data in ata_sg_clean.
3839 */
3840 offset = lsg->offset + lsg->length - qc->pad_len;
3841 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
3842 psg->offset = offset_in_page(offset);
3843
3844 if (qc->tf.flags & ATA_TFLAG_WRITE) {
3845 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3846 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
dfa15988 3847 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
3848 }
3849
3850 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
3851 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
3852 /* trim last sg */
3853 lsg->length -= qc->pad_len;
e1410f2d
JG
3854 if (lsg->length == 0)
3855 trim_sg = 1;
cedc9a47
JG
3856
3857 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
3858 qc->n_elem - 1, lsg->length, qc->pad_len);
3859 }
3860
e1410f2d
JG
3861 pre_n_elem = qc->n_elem;
3862 if (trim_sg && pre_n_elem)
3863 pre_n_elem--;
3864
3865 if (!pre_n_elem) {
3866 n_elem = 0;
3867 goto skip_map;
3868 }
3869
1da177e4 3870 dir = qc->dma_dir;
2f1f610b 3871 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
537a95d9
TH
3872 if (n_elem < 1) {
3873 /* restore last sg */
3874 lsg->length += qc->pad_len;
1da177e4 3875 return -1;
537a95d9 3876 }
1da177e4
LT
3877
3878 DPRINTK("%d sg elements mapped\n", n_elem);
3879
e1410f2d 3880skip_map:
1da177e4
LT
3881 qc->n_elem = n_elem;
3882
3883 return 0;
3884}
3885
0baab86b 3886/**
c893a3ae 3887 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
3888 * @buf: Buffer to swap
3889 * @buf_words: Number of 16-bit words in buffer.
3890 *
3891 * Swap halves of 16-bit words if needed to convert from
3892 * little-endian byte order to native cpu byte order, or
3893 * vice-versa.
3894 *
3895 * LOCKING:
6f0ef4fa 3896 * Inherited from caller.
0baab86b 3897 */
1da177e4
LT
3898void swap_buf_le16(u16 *buf, unsigned int buf_words)
3899{
3900#ifdef __BIG_ENDIAN
3901 unsigned int i;
3902
3903 for (i = 0; i < buf_words; i++)
3904 buf[i] = le16_to_cpu(buf[i]);
3905#endif /* __BIG_ENDIAN */
3906}
3907
6ae4cfb5 3908/**
0d5ff566 3909 * ata_data_xfer - Transfer data by PIO
a6b2c5d4 3910 * @adev: device to target
6ae4cfb5
AL
3911 * @buf: data buffer
3912 * @buflen: buffer length
344babaa 3913 * @write_data: read/write
6ae4cfb5
AL
3914 *
3915 * Transfer data from/to the device data register by PIO.
3916 *
3917 * LOCKING:
3918 * Inherited from caller.
6ae4cfb5 3919 */
0d5ff566
TH
3920void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
3921 unsigned int buflen, int write_data)
1da177e4 3922{
a6b2c5d4 3923 struct ata_port *ap = adev->ap;
6ae4cfb5 3924 unsigned int words = buflen >> 1;
1da177e4 3925
6ae4cfb5 3926 /* Transfer multiple of 2 bytes */
1da177e4 3927 if (write_data)
0d5ff566 3928 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
1da177e4 3929 else
0d5ff566 3930 ioread16_rep(ap->ioaddr.data_addr, buf, words);
6ae4cfb5
AL
3931
3932 /* Transfer trailing 1 byte, if any. */
3933 if (unlikely(buflen & 0x01)) {
3934 u16 align_buf[1] = { 0 };
3935 unsigned char *trailing_buf = buf + buflen - 1;
3936
3937 if (write_data) {
3938 memcpy(align_buf, trailing_buf, 1);
0d5ff566 3939 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
6ae4cfb5 3940 } else {
0d5ff566 3941 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
6ae4cfb5
AL
3942 memcpy(trailing_buf, align_buf, 1);
3943 }
3944 }
1da177e4
LT
3945}
3946
75e99585 3947/**
0d5ff566 3948 * ata_data_xfer_noirq - Transfer data by PIO
75e99585
AC
3949 * @adev: device to target
3950 * @buf: data buffer
3951 * @buflen: buffer length
3952 * @write_data: read/write
3953 *
88574551 3954 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
3955 * transfer with interrupts disabled.
3956 *
3957 * LOCKING:
3958 * Inherited from caller.
3959 */
0d5ff566
TH
3960void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
3961 unsigned int buflen, int write_data)
75e99585
AC
3962{
3963 unsigned long flags;
3964 local_irq_save(flags);
0d5ff566 3965 ata_data_xfer(adev, buf, buflen, write_data);
75e99585
AC
3966 local_irq_restore(flags);
3967}
3968
3969
6ae4cfb5
AL
3970/**
3971 * ata_pio_sector - Transfer ATA_SECT_SIZE (512 bytes) of data.
3972 * @qc: Command on going
3973 *
3974 * Transfer ATA_SECT_SIZE of data from/to the ATA device.
3975 *
3976 * LOCKING:
3977 * Inherited from caller.
3978 */
3979
1da177e4
LT
3980static void ata_pio_sector(struct ata_queued_cmd *qc)
3981{
3982 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 3983 struct scatterlist *sg = qc->__sg;
1da177e4
LT
3984 struct ata_port *ap = qc->ap;
3985 struct page *page;
3986 unsigned int offset;
3987 unsigned char *buf;
3988
726f0785 3989 if (qc->curbytes == qc->nbytes - ATA_SECT_SIZE)
14be71f4 3990 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
3991
3992 page = sg[qc->cursg].page;
726f0785 3993 offset = sg[qc->cursg].offset + qc->cursg_ofs;
1da177e4
LT
3994
3995 /* get the current page and offset */
3996 page = nth_page(page, (offset >> PAGE_SHIFT));
3997 offset %= PAGE_SIZE;
3998
1da177e4
LT
3999 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4000
91b8b313
AL
4001 if (PageHighMem(page)) {
4002 unsigned long flags;
4003
a6b2c5d4 4004 /* FIXME: use a bounce buffer */
91b8b313
AL
4005 local_irq_save(flags);
4006 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4007
91b8b313 4008 /* do the actual data transfer */
a6b2c5d4 4009 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
1da177e4 4010
91b8b313
AL
4011 kunmap_atomic(buf, KM_IRQ0);
4012 local_irq_restore(flags);
4013 } else {
4014 buf = page_address(page);
a6b2c5d4 4015 ap->ops->data_xfer(qc->dev, buf + offset, ATA_SECT_SIZE, do_write);
91b8b313 4016 }
1da177e4 4017
726f0785
TH
4018 qc->curbytes += ATA_SECT_SIZE;
4019 qc->cursg_ofs += ATA_SECT_SIZE;
1da177e4 4020
726f0785 4021 if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
1da177e4
LT
4022 qc->cursg++;
4023 qc->cursg_ofs = 0;
4024 }
1da177e4 4025}
1da177e4 4026
07f6f7d0
AL
4027/**
4028 * ata_pio_sectors - Transfer one or many 512-byte sectors.
4029 * @qc: Command on going
4030 *
c81e29b4 4031 * Transfer one or many ATA_SECT_SIZE of data from/to the
07f6f7d0
AL
4032 * ATA device for the DRQ request.
4033 *
4034 * LOCKING:
4035 * Inherited from caller.
4036 */
1da177e4 4037
07f6f7d0
AL
4038static void ata_pio_sectors(struct ata_queued_cmd *qc)
4039{
4040 if (is_multi_taskfile(&qc->tf)) {
4041 /* READ/WRITE MULTIPLE */
4042 unsigned int nsect;
4043
587005de 4044 WARN_ON(qc->dev->multi_count == 0);
1da177e4 4045
726f0785
TH
4046 nsect = min((qc->nbytes - qc->curbytes) / ATA_SECT_SIZE,
4047 qc->dev->multi_count);
07f6f7d0
AL
4048 while (nsect--)
4049 ata_pio_sector(qc);
4050 } else
4051 ata_pio_sector(qc);
4052}
4053
c71c1857
AL
4054/**
4055 * atapi_send_cdb - Write CDB bytes to hardware
4056 * @ap: Port to which ATAPI device is attached.
4057 * @qc: Taskfile currently active
4058 *
4059 * When device has indicated its readiness to accept
4060 * a CDB, this function is called. Send the CDB.
4061 *
4062 * LOCKING:
4063 * caller.
4064 */
4065
4066static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4067{
4068 /* send SCSI cdb */
4069 DPRINTK("send cdb\n");
db024d53 4070 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 4071
a6b2c5d4 4072 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
4073 ata_altstatus(ap); /* flush */
4074
4075 switch (qc->tf.protocol) {
4076 case ATA_PROT_ATAPI:
4077 ap->hsm_task_state = HSM_ST;
4078 break;
4079 case ATA_PROT_ATAPI_NODATA:
4080 ap->hsm_task_state = HSM_ST_LAST;
4081 break;
4082 case ATA_PROT_ATAPI_DMA:
4083 ap->hsm_task_state = HSM_ST_LAST;
4084 /* initiate bmdma */
4085 ap->ops->bmdma_start(qc);
4086 break;
4087 }
1da177e4
LT
4088}
4089
6ae4cfb5
AL
4090/**
4091 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
4092 * @qc: Command on going
4093 * @bytes: number of bytes
4094 *
4095 * Transfer Transfer data from/to the ATAPI device.
4096 *
4097 * LOCKING:
4098 * Inherited from caller.
4099 *
4100 */
4101
1da177e4
LT
4102static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4103{
4104 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4105 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4106 struct ata_port *ap = qc->ap;
4107 struct page *page;
4108 unsigned char *buf;
4109 unsigned int offset, count;
4110
563a6e1f 4111 if (qc->curbytes + bytes >= qc->nbytes)
14be71f4 4112 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4113
4114next_sg:
563a6e1f 4115 if (unlikely(qc->cursg >= qc->n_elem)) {
7fb6ec28 4116 /*
563a6e1f
AL
4117 * The end of qc->sg is reached and the device expects
4118 * more data to transfer. In order not to overrun qc->sg
4119 * and fulfill length specified in the byte count register,
4120 * - for read case, discard trailing data from the device
4121 * - for write case, padding zero data to the device
4122 */
4123 u16 pad_buf[1] = { 0 };
4124 unsigned int words = bytes >> 1;
4125 unsigned int i;
4126
4127 if (words) /* warning if bytes > 1 */
f15a1daf
TH
4128 ata_dev_printk(qc->dev, KERN_WARNING,
4129 "%u bytes trailing data\n", bytes);
563a6e1f
AL
4130
4131 for (i = 0; i < words; i++)
a6b2c5d4 4132 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
563a6e1f 4133
14be71f4 4134 ap->hsm_task_state = HSM_ST_LAST;
563a6e1f
AL
4135 return;
4136 }
4137
cedc9a47 4138 sg = &qc->__sg[qc->cursg];
1da177e4 4139
1da177e4
LT
4140 page = sg->page;
4141 offset = sg->offset + qc->cursg_ofs;
4142
4143 /* get the current page and offset */
4144 page = nth_page(page, (offset >> PAGE_SHIFT));
4145 offset %= PAGE_SIZE;
4146
6952df03 4147 /* don't overrun current sg */
32529e01 4148 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
4149
4150 /* don't cross page boundaries */
4151 count = min(count, (unsigned int)PAGE_SIZE - offset);
4152
7282aa4b
AL
4153 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4154
91b8b313
AL
4155 if (PageHighMem(page)) {
4156 unsigned long flags;
4157
a6b2c5d4 4158 /* FIXME: use bounce buffer */
91b8b313
AL
4159 local_irq_save(flags);
4160 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4161
91b8b313 4162 /* do the actual data transfer */
a6b2c5d4 4163 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
7282aa4b 4164
91b8b313
AL
4165 kunmap_atomic(buf, KM_IRQ0);
4166 local_irq_restore(flags);
4167 } else {
4168 buf = page_address(page);
a6b2c5d4 4169 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
91b8b313 4170 }
1da177e4
LT
4171
4172 bytes -= count;
4173 qc->curbytes += count;
4174 qc->cursg_ofs += count;
4175
32529e01 4176 if (qc->cursg_ofs == sg->length) {
1da177e4
LT
4177 qc->cursg++;
4178 qc->cursg_ofs = 0;
4179 }
4180
563a6e1f 4181 if (bytes)
1da177e4 4182 goto next_sg;
1da177e4
LT
4183}
4184
6ae4cfb5
AL
4185/**
4186 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
4187 * @qc: Command on going
4188 *
4189 * Transfer Transfer data from/to the ATAPI device.
4190 *
4191 * LOCKING:
4192 * Inherited from caller.
6ae4cfb5
AL
4193 */
4194
1da177e4
LT
4195static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4196{
4197 struct ata_port *ap = qc->ap;
4198 struct ata_device *dev = qc->dev;
4199 unsigned int ireason, bc_lo, bc_hi, bytes;
4200 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4201
eec4c3f3
AL
4202 /* Abuse qc->result_tf for temp storage of intermediate TF
4203 * here to save some kernel stack usage.
4204 * For normal completion, qc->result_tf is not relevant. For
4205 * error, qc->result_tf is later overwritten by ata_qc_complete().
4206 * So, the correctness of qc->result_tf is not affected.
4207 */
4208 ap->ops->tf_read(ap, &qc->result_tf);
4209 ireason = qc->result_tf.nsect;
4210 bc_lo = qc->result_tf.lbam;
4211 bc_hi = qc->result_tf.lbah;
1da177e4
LT
4212 bytes = (bc_hi << 8) | bc_lo;
4213
4214 /* shall be cleared to zero, indicating xfer of data */
4215 if (ireason & (1 << 0))
4216 goto err_out;
4217
4218 /* make sure transfer direction matches expected */
4219 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4220 if (do_write != i_write)
4221 goto err_out;
4222
44877b4e 4223 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
312f7da2 4224
1da177e4
LT
4225 __atapi_pio_bytes(qc, bytes);
4226
4227 return;
4228
4229err_out:
f15a1daf 4230 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
11a56d24 4231 qc->err_mask |= AC_ERR_HSM;
14be71f4 4232 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
4233}
4234
4235/**
c234fb00
AL
4236 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4237 * @ap: the target ata_port
4238 * @qc: qc on going
1da177e4 4239 *
c234fb00
AL
4240 * RETURNS:
4241 * 1 if ok in workqueue, 0 otherwise.
1da177e4 4242 */
c234fb00
AL
4243
4244static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 4245{
c234fb00
AL
4246 if (qc->tf.flags & ATA_TFLAG_POLLING)
4247 return 1;
1da177e4 4248
c234fb00
AL
4249 if (ap->hsm_task_state == HSM_ST_FIRST) {
4250 if (qc->tf.protocol == ATA_PROT_PIO &&
4251 (qc->tf.flags & ATA_TFLAG_WRITE))
4252 return 1;
1da177e4 4253
c234fb00
AL
4254 if (is_atapi_taskfile(&qc->tf) &&
4255 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4256 return 1;
fe79e683
AL
4257 }
4258
c234fb00
AL
4259 return 0;
4260}
1da177e4 4261
c17ea20d
TH
4262/**
4263 * ata_hsm_qc_complete - finish a qc running on standard HSM
4264 * @qc: Command to complete
4265 * @in_wq: 1 if called from workqueue, 0 otherwise
4266 *
4267 * Finish @qc which is running on standard HSM.
4268 *
4269 * LOCKING:
cca3974e 4270 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
4271 * Otherwise, none on entry and grabs host lock.
4272 */
4273static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4274{
4275 struct ata_port *ap = qc->ap;
4276 unsigned long flags;
4277
4278 if (ap->ops->error_handler) {
4279 if (in_wq) {
ba6a1308 4280 spin_lock_irqsave(ap->lock, flags);
c17ea20d 4281
cca3974e
JG
4282 /* EH might have kicked in while host lock is
4283 * released.
c17ea20d
TH
4284 */
4285 qc = ata_qc_from_tag(ap, qc->tag);
4286 if (qc) {
4287 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
83625006 4288 ap->ops->irq_on(ap);
c17ea20d
TH
4289 ata_qc_complete(qc);
4290 } else
4291 ata_port_freeze(ap);
4292 }
4293
ba6a1308 4294 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4295 } else {
4296 if (likely(!(qc->err_mask & AC_ERR_HSM)))
4297 ata_qc_complete(qc);
4298 else
4299 ata_port_freeze(ap);
4300 }
4301 } else {
4302 if (in_wq) {
ba6a1308 4303 spin_lock_irqsave(ap->lock, flags);
83625006 4304 ap->ops->irq_on(ap);
c17ea20d 4305 ata_qc_complete(qc);
ba6a1308 4306 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4307 } else
4308 ata_qc_complete(qc);
4309 }
1da177e4 4310
c81e29b4 4311 ata_altstatus(ap); /* flush */
c17ea20d
TH
4312}
4313
bb5cb290
AL
4314/**
4315 * ata_hsm_move - move the HSM to the next state.
4316 * @ap: the target ata_port
4317 * @qc: qc on going
4318 * @status: current device status
4319 * @in_wq: 1 if called from workqueue, 0 otherwise
4320 *
4321 * RETURNS:
4322 * 1 when poll next status needed, 0 otherwise.
4323 */
9a1004d0
TH
4324int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4325 u8 status, int in_wq)
e2cec771 4326{
bb5cb290
AL
4327 unsigned long flags = 0;
4328 int poll_next;
4329
6912ccd5
AL
4330 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4331
bb5cb290
AL
4332 /* Make sure ata_qc_issue_prot() does not throw things
4333 * like DMA polling into the workqueue. Notice that
4334 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4335 */
c234fb00 4336 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 4337
e2cec771 4338fsm_start:
999bb6f4 4339 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
44877b4e 4340 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
999bb6f4 4341
e2cec771
AL
4342 switch (ap->hsm_task_state) {
4343 case HSM_ST_FIRST:
bb5cb290
AL
4344 /* Send first data block or PACKET CDB */
4345
4346 /* If polling, we will stay in the work queue after
4347 * sending the data. Otherwise, interrupt handler
4348 * takes over after sending the data.
4349 */
4350 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4351
e2cec771 4352 /* check device status */
3655d1d3
AL
4353 if (unlikely((status & ATA_DRQ) == 0)) {
4354 /* handle BSY=0, DRQ=0 as error */
4355 if (likely(status & (ATA_ERR | ATA_DF)))
4356 /* device stops HSM for abort/error */
4357 qc->err_mask |= AC_ERR_DEV;
4358 else
4359 /* HSM violation. Let EH handle this */
4360 qc->err_mask |= AC_ERR_HSM;
4361
14be71f4 4362 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 4363 goto fsm_start;
1da177e4
LT
4364 }
4365
71601958
AL
4366 /* Device should not ask for data transfer (DRQ=1)
4367 * when it finds something wrong.
eee6c32f
AL
4368 * We ignore DRQ here and stop the HSM by
4369 * changing hsm_task_state to HSM_ST_ERR and
4370 * let the EH abort the command or reset the device.
71601958
AL
4371 */
4372 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
4373 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
4374 "error, dev_stat 0x%X\n", status);
3655d1d3 4375 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4376 ap->hsm_task_state = HSM_ST_ERR;
4377 goto fsm_start;
71601958 4378 }
1da177e4 4379
bb5cb290
AL
4380 /* Send the CDB (atapi) or the first data block (ata pio out).
4381 * During the state transition, interrupt handler shouldn't
4382 * be invoked before the data transfer is complete and
4383 * hsm_task_state is changed. Hence, the following locking.
4384 */
4385 if (in_wq)
ba6a1308 4386 spin_lock_irqsave(ap->lock, flags);
1da177e4 4387
bb5cb290
AL
4388 if (qc->tf.protocol == ATA_PROT_PIO) {
4389 /* PIO data out protocol.
4390 * send first data block.
4391 */
0565c26d 4392
bb5cb290
AL
4393 /* ata_pio_sectors() might change the state
4394 * to HSM_ST_LAST. so, the state is changed here
4395 * before ata_pio_sectors().
4396 */
4397 ap->hsm_task_state = HSM_ST;
4398 ata_pio_sectors(qc);
4399 ata_altstatus(ap); /* flush */
4400 } else
4401 /* send CDB */
4402 atapi_send_cdb(ap, qc);
4403
4404 if (in_wq)
ba6a1308 4405 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
4406
4407 /* if polling, ata_pio_task() handles the rest.
4408 * otherwise, interrupt handler takes over from here.
4409 */
e2cec771 4410 break;
1c848984 4411
e2cec771
AL
4412 case HSM_ST:
4413 /* complete command or read/write the data register */
4414 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4415 /* ATAPI PIO protocol */
4416 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
4417 /* No more data to transfer or device error.
4418 * Device error will be tagged in HSM_ST_LAST.
4419 */
e2cec771
AL
4420 ap->hsm_task_state = HSM_ST_LAST;
4421 goto fsm_start;
4422 }
1da177e4 4423
71601958
AL
4424 /* Device should not ask for data transfer (DRQ=1)
4425 * when it finds something wrong.
eee6c32f
AL
4426 * We ignore DRQ here and stop the HSM by
4427 * changing hsm_task_state to HSM_ST_ERR and
4428 * let the EH abort the command or reset the device.
71601958
AL
4429 */
4430 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
4431 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
4432 "device error, dev_stat 0x%X\n",
4433 status);
3655d1d3 4434 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4435 ap->hsm_task_state = HSM_ST_ERR;
4436 goto fsm_start;
71601958 4437 }
1da177e4 4438
e2cec771 4439 atapi_pio_bytes(qc);
7fb6ec28 4440
e2cec771
AL
4441 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4442 /* bad ireason reported by device */
4443 goto fsm_start;
1da177e4 4444
e2cec771
AL
4445 } else {
4446 /* ATA PIO protocol */
4447 if (unlikely((status & ATA_DRQ) == 0)) {
4448 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
4449 if (likely(status & (ATA_ERR | ATA_DF)))
4450 /* device stops HSM for abort/error */
4451 qc->err_mask |= AC_ERR_DEV;
4452 else
55a8e2c8
TH
4453 /* HSM violation. Let EH handle this.
4454 * Phantom devices also trigger this
4455 * condition. Mark hint.
4456 */
4457 qc->err_mask |= AC_ERR_HSM |
4458 AC_ERR_NODEV_HINT;
3655d1d3 4459
e2cec771
AL
4460 ap->hsm_task_state = HSM_ST_ERR;
4461 goto fsm_start;
4462 }
1da177e4 4463
eee6c32f
AL
4464 /* For PIO reads, some devices may ask for
4465 * data transfer (DRQ=1) alone with ERR=1.
4466 * We respect DRQ here and transfer one
4467 * block of junk data before changing the
4468 * hsm_task_state to HSM_ST_ERR.
4469 *
4470 * For PIO writes, ERR=1 DRQ=1 doesn't make
4471 * sense since the data block has been
4472 * transferred to the device.
71601958
AL
4473 */
4474 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
4475 /* data might be corrputed */
4476 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
4477
4478 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4479 ata_pio_sectors(qc);
4480 ata_altstatus(ap);
4481 status = ata_wait_idle(ap);
4482 }
4483
3655d1d3
AL
4484 if (status & (ATA_BUSY | ATA_DRQ))
4485 qc->err_mask |= AC_ERR_HSM;
4486
eee6c32f
AL
4487 /* ata_pio_sectors() might change the
4488 * state to HSM_ST_LAST. so, the state
4489 * is changed after ata_pio_sectors().
4490 */
4491 ap->hsm_task_state = HSM_ST_ERR;
4492 goto fsm_start;
71601958
AL
4493 }
4494
e2cec771
AL
4495 ata_pio_sectors(qc);
4496
4497 if (ap->hsm_task_state == HSM_ST_LAST &&
4498 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4499 /* all data read */
4500 ata_altstatus(ap);
52a32205 4501 status = ata_wait_idle(ap);
e2cec771
AL
4502 goto fsm_start;
4503 }
4504 }
4505
4506 ata_altstatus(ap); /* flush */
bb5cb290 4507 poll_next = 1;
1da177e4
LT
4508 break;
4509
14be71f4 4510 case HSM_ST_LAST:
6912ccd5
AL
4511 if (unlikely(!ata_ok(status))) {
4512 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
4513 ap->hsm_task_state = HSM_ST_ERR;
4514 goto fsm_start;
4515 }
4516
4517 /* no more data to transfer */
4332a771 4518 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
44877b4e 4519 ap->print_id, qc->dev->devno, status);
e2cec771 4520
6912ccd5
AL
4521 WARN_ON(qc->err_mask);
4522
e2cec771 4523 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 4524
e2cec771 4525 /* complete taskfile transaction */
c17ea20d 4526 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
4527
4528 poll_next = 0;
1da177e4
LT
4529 break;
4530
14be71f4 4531 case HSM_ST_ERR:
e2cec771
AL
4532 /* make sure qc->err_mask is available to
4533 * know what's wrong and recover
4534 */
4535 WARN_ON(qc->err_mask == 0);
4536
4537 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 4538
999bb6f4 4539 /* complete taskfile transaction */
c17ea20d 4540 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
4541
4542 poll_next = 0;
e2cec771
AL
4543 break;
4544 default:
bb5cb290 4545 poll_next = 0;
6912ccd5 4546 BUG();
1da177e4
LT
4547 }
4548
bb5cb290 4549 return poll_next;
1da177e4
LT
4550}
4551
65f27f38 4552static void ata_pio_task(struct work_struct *work)
8061f5f0 4553{
65f27f38
DH
4554 struct ata_port *ap =
4555 container_of(work, struct ata_port, port_task.work);
4556 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 4557 u8 status;
a1af3734 4558 int poll_next;
8061f5f0 4559
7fb6ec28 4560fsm_start:
a1af3734 4561 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 4562
a1af3734
AL
4563 /*
4564 * This is purely heuristic. This is a fast path.
4565 * Sometimes when we enter, BSY will be cleared in
4566 * a chk-status or two. If not, the drive is probably seeking
4567 * or something. Snooze for a couple msecs, then
4568 * chk-status again. If still busy, queue delayed work.
4569 */
4570 status = ata_busy_wait(ap, ATA_BUSY, 5);
4571 if (status & ATA_BUSY) {
4572 msleep(2);
4573 status = ata_busy_wait(ap, ATA_BUSY, 10);
4574 if (status & ATA_BUSY) {
31ce6dae 4575 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
a1af3734
AL
4576 return;
4577 }
8061f5f0
TH
4578 }
4579
a1af3734
AL
4580 /* move the HSM */
4581 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 4582
a1af3734
AL
4583 /* another command or interrupt handler
4584 * may be running at this point.
4585 */
4586 if (poll_next)
7fb6ec28 4587 goto fsm_start;
8061f5f0
TH
4588}
4589
1da177e4
LT
4590/**
4591 * ata_qc_new - Request an available ATA command, for queueing
4592 * @ap: Port associated with device @dev
4593 * @dev: Device from whom we request an available command structure
4594 *
4595 * LOCKING:
0cba632b 4596 * None.
1da177e4
LT
4597 */
4598
4599static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4600{
4601 struct ata_queued_cmd *qc = NULL;
4602 unsigned int i;
4603
e3180499 4604 /* no command while frozen */
b51e9e5d 4605 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
4606 return NULL;
4607
2ab7db1f
TH
4608 /* the last tag is reserved for internal command. */
4609 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 4610 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 4611 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
4612 break;
4613 }
4614
4615 if (qc)
4616 qc->tag = i;
4617
4618 return qc;
4619}
4620
4621/**
4622 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
4623 * @dev: Device from whom we request an available command structure
4624 *
4625 * LOCKING:
0cba632b 4626 * None.
1da177e4
LT
4627 */
4628
3373efd8 4629struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 4630{
3373efd8 4631 struct ata_port *ap = dev->ap;
1da177e4
LT
4632 struct ata_queued_cmd *qc;
4633
4634 qc = ata_qc_new(ap);
4635 if (qc) {
1da177e4
LT
4636 qc->scsicmd = NULL;
4637 qc->ap = ap;
4638 qc->dev = dev;
1da177e4 4639
2c13b7ce 4640 ata_qc_reinit(qc);
1da177e4
LT
4641 }
4642
4643 return qc;
4644}
4645
1da177e4
LT
4646/**
4647 * ata_qc_free - free unused ata_queued_cmd
4648 * @qc: Command to complete
4649 *
4650 * Designed to free unused ata_queued_cmd object
4651 * in case something prevents using it.
4652 *
4653 * LOCKING:
cca3974e 4654 * spin_lock_irqsave(host lock)
1da177e4
LT
4655 */
4656void ata_qc_free(struct ata_queued_cmd *qc)
4657{
4ba946e9
TH
4658 struct ata_port *ap = qc->ap;
4659 unsigned int tag;
4660
a4631474 4661 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 4662
4ba946e9
TH
4663 qc->flags = 0;
4664 tag = qc->tag;
4665 if (likely(ata_tag_valid(tag))) {
4ba946e9 4666 qc->tag = ATA_TAG_POISON;
6cec4a39 4667 clear_bit(tag, &ap->qc_allocated);
4ba946e9 4668 }
1da177e4
LT
4669}
4670
76014427 4671void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 4672{
dedaf2b0
TH
4673 struct ata_port *ap = qc->ap;
4674
a4631474
TH
4675 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4676 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
4677
4678 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4679 ata_sg_clean(qc);
4680
7401abf2 4681 /* command should be marked inactive atomically with qc completion */
dedaf2b0
TH
4682 if (qc->tf.protocol == ATA_PROT_NCQ)
4683 ap->sactive &= ~(1 << qc->tag);
4684 else
4685 ap->active_tag = ATA_TAG_POISON;
7401abf2 4686
3f3791d3
AL
4687 /* atapi: mark qc as inactive to prevent the interrupt handler
4688 * from completing the command twice later, before the error handler
4689 * is called. (when rc != 0 and atapi request sense is needed)
4690 */
4691 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 4692 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 4693
1da177e4 4694 /* call completion callback */
77853bf2 4695 qc->complete_fn(qc);
1da177e4
LT
4696}
4697
39599a53
TH
4698static void fill_result_tf(struct ata_queued_cmd *qc)
4699{
4700 struct ata_port *ap = qc->ap;
4701
4702 ap->ops->tf_read(ap, &qc->result_tf);
4703 qc->result_tf.flags = qc->tf.flags;
4704}
4705
f686bcb8
TH
4706/**
4707 * ata_qc_complete - Complete an active ATA command
4708 * @qc: Command to complete
4709 * @err_mask: ATA Status register contents
4710 *
4711 * Indicate to the mid and upper layers that an ATA
4712 * command has completed, with either an ok or not-ok status.
4713 *
4714 * LOCKING:
cca3974e 4715 * spin_lock_irqsave(host lock)
f686bcb8
TH
4716 */
4717void ata_qc_complete(struct ata_queued_cmd *qc)
4718{
4719 struct ata_port *ap = qc->ap;
4720
4721 /* XXX: New EH and old EH use different mechanisms to
4722 * synchronize EH with regular execution path.
4723 *
4724 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4725 * Normal execution path is responsible for not accessing a
4726 * failed qc. libata core enforces the rule by returning NULL
4727 * from ata_qc_from_tag() for failed qcs.
4728 *
4729 * Old EH depends on ata_qc_complete() nullifying completion
4730 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4731 * not synchronize with interrupt handler. Only PIO task is
4732 * taken care of.
4733 */
4734 if (ap->ops->error_handler) {
b51e9e5d 4735 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
4736
4737 if (unlikely(qc->err_mask))
4738 qc->flags |= ATA_QCFLAG_FAILED;
4739
4740 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4741 if (!ata_tag_internal(qc->tag)) {
4742 /* always fill result TF for failed qc */
39599a53 4743 fill_result_tf(qc);
f686bcb8
TH
4744 ata_qc_schedule_eh(qc);
4745 return;
4746 }
4747 }
4748
4749 /* read result TF if requested */
4750 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4751 fill_result_tf(qc);
f686bcb8
TH
4752
4753 __ata_qc_complete(qc);
4754 } else {
4755 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4756 return;
4757
4758 /* read result TF if failed or requested */
4759 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4760 fill_result_tf(qc);
f686bcb8
TH
4761
4762 __ata_qc_complete(qc);
4763 }
4764}
4765
dedaf2b0
TH
4766/**
4767 * ata_qc_complete_multiple - Complete multiple qcs successfully
4768 * @ap: port in question
4769 * @qc_active: new qc_active mask
4770 * @finish_qc: LLDD callback invoked before completing a qc
4771 *
4772 * Complete in-flight commands. This functions is meant to be
4773 * called from low-level driver's interrupt routine to complete
4774 * requests normally. ap->qc_active and @qc_active is compared
4775 * and commands are completed accordingly.
4776 *
4777 * LOCKING:
cca3974e 4778 * spin_lock_irqsave(host lock)
dedaf2b0
TH
4779 *
4780 * RETURNS:
4781 * Number of completed commands on success, -errno otherwise.
4782 */
4783int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
4784 void (*finish_qc)(struct ata_queued_cmd *))
4785{
4786 int nr_done = 0;
4787 u32 done_mask;
4788 int i;
4789
4790 done_mask = ap->qc_active ^ qc_active;
4791
4792 if (unlikely(done_mask & qc_active)) {
4793 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4794 "(%08x->%08x)\n", ap->qc_active, qc_active);
4795 return -EINVAL;
4796 }
4797
4798 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4799 struct ata_queued_cmd *qc;
4800
4801 if (!(done_mask & (1 << i)))
4802 continue;
4803
4804 if ((qc = ata_qc_from_tag(ap, i))) {
4805 if (finish_qc)
4806 finish_qc(qc);
4807 ata_qc_complete(qc);
4808 nr_done++;
4809 }
4810 }
4811
4812 return nr_done;
4813}
4814
1da177e4
LT
4815static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
4816{
4817 struct ata_port *ap = qc->ap;
4818
4819 switch (qc->tf.protocol) {
3dc1d881 4820 case ATA_PROT_NCQ:
1da177e4
LT
4821 case ATA_PROT_DMA:
4822 case ATA_PROT_ATAPI_DMA:
4823 return 1;
4824
4825 case ATA_PROT_ATAPI:
4826 case ATA_PROT_PIO:
1da177e4
LT
4827 if (ap->flags & ATA_FLAG_PIO_DMA)
4828 return 1;
4829
4830 /* fall through */
4831
4832 default:
4833 return 0;
4834 }
4835
4836 /* never reached */
4837}
4838
4839/**
4840 * ata_qc_issue - issue taskfile to device
4841 * @qc: command to issue to device
4842 *
4843 * Prepare an ATA command to submission to device.
4844 * This includes mapping the data into a DMA-able
4845 * area, filling in the S/G table, and finally
4846 * writing the taskfile to hardware, starting the command.
4847 *
4848 * LOCKING:
cca3974e 4849 * spin_lock_irqsave(host lock)
1da177e4 4850 */
8e0e694a 4851void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
4852{
4853 struct ata_port *ap = qc->ap;
4854
dedaf2b0
TH
4855 /* Make sure only one non-NCQ command is outstanding. The
4856 * check is skipped for old EH because it reuses active qc to
4857 * request ATAPI sense.
4858 */
4859 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
4860
4861 if (qc->tf.protocol == ATA_PROT_NCQ) {
4862 WARN_ON(ap->sactive & (1 << qc->tag));
4863 ap->sactive |= 1 << qc->tag;
4864 } else {
4865 WARN_ON(ap->sactive);
4866 ap->active_tag = qc->tag;
4867 }
4868
e4a70e76 4869 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 4870 ap->qc_active |= 1 << qc->tag;
e4a70e76 4871
1da177e4
LT
4872 if (ata_should_dma_map(qc)) {
4873 if (qc->flags & ATA_QCFLAG_SG) {
4874 if (ata_sg_setup(qc))
8e436af9 4875 goto sg_err;
1da177e4
LT
4876 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
4877 if (ata_sg_setup_one(qc))
8e436af9 4878 goto sg_err;
1da177e4
LT
4879 }
4880 } else {
4881 qc->flags &= ~ATA_QCFLAG_DMAMAP;
4882 }
4883
4884 ap->ops->qc_prep(qc);
4885
8e0e694a
TH
4886 qc->err_mask |= ap->ops->qc_issue(qc);
4887 if (unlikely(qc->err_mask))
4888 goto err;
4889 return;
1da177e4 4890
8e436af9
TH
4891sg_err:
4892 qc->flags &= ~ATA_QCFLAG_DMAMAP;
8e0e694a
TH
4893 qc->err_mask |= AC_ERR_SYSTEM;
4894err:
4895 ata_qc_complete(qc);
1da177e4
LT
4896}
4897
4898/**
4899 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
4900 * @qc: command to issue to device
4901 *
4902 * Using various libata functions and hooks, this function
4903 * starts an ATA command. ATA commands are grouped into
4904 * classes called "protocols", and issuing each type of protocol
4905 * is slightly different.
4906 *
0baab86b
EF
4907 * May be used as the qc_issue() entry in ata_port_operations.
4908 *
1da177e4 4909 * LOCKING:
cca3974e 4910 * spin_lock_irqsave(host lock)
1da177e4
LT
4911 *
4912 * RETURNS:
9a3d9eb0 4913 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
4914 */
4915
9a3d9eb0 4916unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
4917{
4918 struct ata_port *ap = qc->ap;
4919
e50362ec
AL
4920 /* Use polling pio if the LLD doesn't handle
4921 * interrupt driven pio and atapi CDB interrupt.
4922 */
4923 if (ap->flags & ATA_FLAG_PIO_POLLING) {
4924 switch (qc->tf.protocol) {
4925 case ATA_PROT_PIO:
e3472cbe 4926 case ATA_PROT_NODATA:
e50362ec
AL
4927 case ATA_PROT_ATAPI:
4928 case ATA_PROT_ATAPI_NODATA:
4929 qc->tf.flags |= ATA_TFLAG_POLLING;
4930 break;
4931 case ATA_PROT_ATAPI_DMA:
4932 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 4933 /* see ata_dma_blacklisted() */
e50362ec
AL
4934 BUG();
4935 break;
4936 default:
4937 break;
4938 }
4939 }
4940
3d3cca37
TH
4941 /* Some controllers show flaky interrupt behavior after
4942 * setting xfer mode. Use polling instead.
4943 */
4944 if (unlikely(qc->tf.command == ATA_CMD_SET_FEATURES &&
4945 qc->tf.feature == SETFEATURES_XFER) &&
4946 (ap->flags & ATA_FLAG_SETXFER_POLLING))
4947 qc->tf.flags |= ATA_TFLAG_POLLING;
4948
312f7da2 4949 /* select the device */
1da177e4
LT
4950 ata_dev_select(ap, qc->dev->devno, 1, 0);
4951
312f7da2 4952 /* start the command */
1da177e4
LT
4953 switch (qc->tf.protocol) {
4954 case ATA_PROT_NODATA:
312f7da2
AL
4955 if (qc->tf.flags & ATA_TFLAG_POLLING)
4956 ata_qc_set_polling(qc);
4957
e5338254 4958 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
4959 ap->hsm_task_state = HSM_ST_LAST;
4960
4961 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 4962 ata_port_queue_task(ap, ata_pio_task, qc, 0);
312f7da2 4963
1da177e4
LT
4964 break;
4965
4966 case ATA_PROT_DMA:
587005de 4967 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 4968
1da177e4
LT
4969 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
4970 ap->ops->bmdma_setup(qc); /* set up bmdma */
4971 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 4972 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4973 break;
4974
312f7da2
AL
4975 case ATA_PROT_PIO:
4976 if (qc->tf.flags & ATA_TFLAG_POLLING)
4977 ata_qc_set_polling(qc);
1da177e4 4978
e5338254 4979 ata_tf_to_host(ap, &qc->tf);
312f7da2 4980
54f00389
AL
4981 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4982 /* PIO data out protocol */
4983 ap->hsm_task_state = HSM_ST_FIRST;
31ce6dae 4984 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
4985
4986 /* always send first data block using
e27486db 4987 * the ata_pio_task() codepath.
54f00389 4988 */
312f7da2 4989 } else {
54f00389
AL
4990 /* PIO data in protocol */
4991 ap->hsm_task_state = HSM_ST;
4992
4993 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 4994 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
4995
4996 /* if polling, ata_pio_task() handles the rest.
4997 * otherwise, interrupt handler takes over from here.
4998 */
312f7da2
AL
4999 }
5000
1da177e4
LT
5001 break;
5002
1da177e4 5003 case ATA_PROT_ATAPI:
1da177e4 5004 case ATA_PROT_ATAPI_NODATA:
312f7da2
AL
5005 if (qc->tf.flags & ATA_TFLAG_POLLING)
5006 ata_qc_set_polling(qc);
5007
e5338254 5008 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 5009
312f7da2
AL
5010 ap->hsm_task_state = HSM_ST_FIRST;
5011
5012 /* send cdb by polling if no cdb interrupt */
5013 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
5014 (qc->tf.flags & ATA_TFLAG_POLLING))
31ce6dae 5015 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5016 break;
5017
5018 case ATA_PROT_ATAPI_DMA:
587005de 5019 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5020
1da177e4
LT
5021 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5022 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
5023 ap->hsm_task_state = HSM_ST_FIRST;
5024
5025 /* send cdb by polling if no cdb interrupt */
5026 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
31ce6dae 5027 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5028 break;
5029
5030 default:
5031 WARN_ON(1);
9a3d9eb0 5032 return AC_ERR_SYSTEM;
1da177e4
LT
5033 }
5034
5035 return 0;
5036}
5037
1da177e4
LT
5038/**
5039 * ata_host_intr - Handle host interrupt for given (port, task)
5040 * @ap: Port on which interrupt arrived (possibly...)
5041 * @qc: Taskfile currently active in engine
5042 *
5043 * Handle host interrupt for given queued command. Currently,
5044 * only DMA interrupts are handled. All other commands are
5045 * handled via polling with interrupts disabled (nIEN bit).
5046 *
5047 * LOCKING:
cca3974e 5048 * spin_lock_irqsave(host lock)
1da177e4
LT
5049 *
5050 * RETURNS:
5051 * One if interrupt was handled, zero if not (shared irq).
5052 */
5053
5054inline unsigned int ata_host_intr (struct ata_port *ap,
5055 struct ata_queued_cmd *qc)
5056{
ea54763f 5057 struct ata_eh_info *ehi = &ap->eh_info;
312f7da2 5058 u8 status, host_stat = 0;
1da177e4 5059
312f7da2 5060 VPRINTK("ata%u: protocol %d task_state %d\n",
44877b4e 5061 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 5062
312f7da2
AL
5063 /* Check whether we are expecting interrupt in this state */
5064 switch (ap->hsm_task_state) {
5065 case HSM_ST_FIRST:
6912ccd5
AL
5066 /* Some pre-ATAPI-4 devices assert INTRQ
5067 * at this state when ready to receive CDB.
5068 */
1da177e4 5069
312f7da2
AL
5070 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5071 * The flag was turned on only for atapi devices.
5072 * No need to check is_atapi_taskfile(&qc->tf) again.
5073 */
5074 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 5075 goto idle_irq;
1da177e4 5076 break;
312f7da2
AL
5077 case HSM_ST_LAST:
5078 if (qc->tf.protocol == ATA_PROT_DMA ||
5079 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5080 /* check status of DMA engine */
5081 host_stat = ap->ops->bmdma_status(ap);
44877b4e
TH
5082 VPRINTK("ata%u: host_stat 0x%X\n",
5083 ap->print_id, host_stat);
312f7da2
AL
5084
5085 /* if it's not our irq... */
5086 if (!(host_stat & ATA_DMA_INTR))
5087 goto idle_irq;
5088
5089 /* before we do anything else, clear DMA-Start bit */
5090 ap->ops->bmdma_stop(qc);
a4f16610
AL
5091
5092 if (unlikely(host_stat & ATA_DMA_ERR)) {
5093 /* error when transfering data to/from memory */
5094 qc->err_mask |= AC_ERR_HOST_BUS;
5095 ap->hsm_task_state = HSM_ST_ERR;
5096 }
312f7da2
AL
5097 }
5098 break;
5099 case HSM_ST:
5100 break;
1da177e4
LT
5101 default:
5102 goto idle_irq;
5103 }
5104
312f7da2
AL
5105 /* check altstatus */
5106 status = ata_altstatus(ap);
5107 if (status & ATA_BUSY)
5108 goto idle_irq;
1da177e4 5109
312f7da2
AL
5110 /* check main status, clearing INTRQ */
5111 status = ata_chk_status(ap);
5112 if (unlikely(status & ATA_BUSY))
5113 goto idle_irq;
1da177e4 5114
312f7da2
AL
5115 /* ack bmdma irq events */
5116 ap->ops->irq_clear(ap);
1da177e4 5117
bb5cb290 5118 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
5119
5120 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5121 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5122 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5123
1da177e4
LT
5124 return 1; /* irq handled */
5125
5126idle_irq:
5127 ap->stats.idle_irq++;
5128
5129#ifdef ATA_IRQ_TRAP
5130 if ((ap->stats.idle_irq % 1000) == 0) {
83625006 5131 ap->ops->irq_ack(ap, 0); /* debug trap */
f15a1daf 5132 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 5133 return 1;
1da177e4
LT
5134 }
5135#endif
5136 return 0; /* irq not handled */
5137}
5138
5139/**
5140 * ata_interrupt - Default ATA host interrupt handler
0cba632b 5141 * @irq: irq line (unused)
cca3974e 5142 * @dev_instance: pointer to our ata_host information structure
1da177e4 5143 *
0cba632b
JG
5144 * Default interrupt handler for PCI IDE devices. Calls
5145 * ata_host_intr() for each port that is not disabled.
5146 *
1da177e4 5147 * LOCKING:
cca3974e 5148 * Obtains host lock during operation.
1da177e4
LT
5149 *
5150 * RETURNS:
0cba632b 5151 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
5152 */
5153
7d12e780 5154irqreturn_t ata_interrupt (int irq, void *dev_instance)
1da177e4 5155{
cca3974e 5156 struct ata_host *host = dev_instance;
1da177e4
LT
5157 unsigned int i;
5158 unsigned int handled = 0;
5159 unsigned long flags;
5160
5161 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 5162 spin_lock_irqsave(&host->lock, flags);
1da177e4 5163
cca3974e 5164 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5165 struct ata_port *ap;
5166
cca3974e 5167 ap = host->ports[i];
c1389503 5168 if (ap &&
029f5468 5169 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
5170 struct ata_queued_cmd *qc;
5171
5172 qc = ata_qc_from_tag(ap, ap->active_tag);
312f7da2 5173 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 5174 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
5175 handled |= ata_host_intr(ap, qc);
5176 }
5177 }
5178
cca3974e 5179 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
5180
5181 return IRQ_RETVAL(handled);
5182}
5183
34bf2170
TH
5184/**
5185 * sata_scr_valid - test whether SCRs are accessible
5186 * @ap: ATA port to test SCR accessibility for
5187 *
5188 * Test whether SCRs are accessible for @ap.
5189 *
5190 * LOCKING:
5191 * None.
5192 *
5193 * RETURNS:
5194 * 1 if SCRs are accessible, 0 otherwise.
5195 */
5196int sata_scr_valid(struct ata_port *ap)
5197{
5198 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
5199}
5200
5201/**
5202 * sata_scr_read - read SCR register of the specified port
5203 * @ap: ATA port to read SCR for
5204 * @reg: SCR to read
5205 * @val: Place to store read value
5206 *
5207 * Read SCR register @reg of @ap into *@val. This function is
5208 * guaranteed to succeed if the cable type of the port is SATA
5209 * and the port implements ->scr_read.
5210 *
5211 * LOCKING:
5212 * None.
5213 *
5214 * RETURNS:
5215 * 0 on success, negative errno on failure.
5216 */
5217int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
5218{
5219 if (sata_scr_valid(ap)) {
5220 *val = ap->ops->scr_read(ap, reg);
5221 return 0;
5222 }
5223 return -EOPNOTSUPP;
5224}
5225
5226/**
5227 * sata_scr_write - write SCR register of the specified port
5228 * @ap: ATA port to write SCR for
5229 * @reg: SCR to write
5230 * @val: value to write
5231 *
5232 * Write @val to SCR register @reg of @ap. This function is
5233 * guaranteed to succeed if the cable type of the port is SATA
5234 * and the port implements ->scr_read.
5235 *
5236 * LOCKING:
5237 * None.
5238 *
5239 * RETURNS:
5240 * 0 on success, negative errno on failure.
5241 */
5242int sata_scr_write(struct ata_port *ap, int reg, u32 val)
5243{
5244 if (sata_scr_valid(ap)) {
5245 ap->ops->scr_write(ap, reg, val);
5246 return 0;
5247 }
5248 return -EOPNOTSUPP;
5249}
5250
5251/**
5252 * sata_scr_write_flush - write SCR register of the specified port and flush
5253 * @ap: ATA port to write SCR for
5254 * @reg: SCR to write
5255 * @val: value to write
5256 *
5257 * This function is identical to sata_scr_write() except that this
5258 * function performs flush after writing to the register.
5259 *
5260 * LOCKING:
5261 * None.
5262 *
5263 * RETURNS:
5264 * 0 on success, negative errno on failure.
5265 */
5266int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
5267{
5268 if (sata_scr_valid(ap)) {
5269 ap->ops->scr_write(ap, reg, val);
5270 ap->ops->scr_read(ap, reg);
5271 return 0;
5272 }
5273 return -EOPNOTSUPP;
5274}
5275
5276/**
5277 * ata_port_online - test whether the given port is online
5278 * @ap: ATA port to test
5279 *
5280 * Test whether @ap is online. Note that this function returns 0
5281 * if online status of @ap cannot be obtained, so
5282 * ata_port_online(ap) != !ata_port_offline(ap).
5283 *
5284 * LOCKING:
5285 * None.
5286 *
5287 * RETURNS:
5288 * 1 if the port online status is available and online.
5289 */
5290int ata_port_online(struct ata_port *ap)
5291{
5292 u32 sstatus;
5293
5294 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
5295 return 1;
5296 return 0;
5297}
5298
5299/**
5300 * ata_port_offline - test whether the given port is offline
5301 * @ap: ATA port to test
5302 *
5303 * Test whether @ap is offline. Note that this function returns
5304 * 0 if offline status of @ap cannot be obtained, so
5305 * ata_port_online(ap) != !ata_port_offline(ap).
5306 *
5307 * LOCKING:
5308 * None.
5309 *
5310 * RETURNS:
5311 * 1 if the port offline status is available and offline.
5312 */
5313int ata_port_offline(struct ata_port *ap)
5314{
5315 u32 sstatus;
5316
5317 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
5318 return 1;
5319 return 0;
5320}
0baab86b 5321
77b08fb5 5322int ata_flush_cache(struct ata_device *dev)
9b847548 5323{
977e6b9f 5324 unsigned int err_mask;
9b847548
JA
5325 u8 cmd;
5326
5327 if (!ata_try_flush_cache(dev))
5328 return 0;
5329
6fc49adb 5330 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
5331 cmd = ATA_CMD_FLUSH_EXT;
5332 else
5333 cmd = ATA_CMD_FLUSH;
5334
977e6b9f
TH
5335 err_mask = ata_do_simple_cmd(dev, cmd);
5336 if (err_mask) {
5337 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5338 return -EIO;
5339 }
5340
5341 return 0;
9b847548
JA
5342}
5343
cca3974e
JG
5344static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5345 unsigned int action, unsigned int ehi_flags,
5346 int wait)
500530f6
TH
5347{
5348 unsigned long flags;
5349 int i, rc;
5350
cca3974e
JG
5351 for (i = 0; i < host->n_ports; i++) {
5352 struct ata_port *ap = host->ports[i];
500530f6
TH
5353
5354 /* Previous resume operation might still be in
5355 * progress. Wait for PM_PENDING to clear.
5356 */
5357 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5358 ata_port_wait_eh(ap);
5359 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5360 }
5361
5362 /* request PM ops to EH */
5363 spin_lock_irqsave(ap->lock, flags);
5364
5365 ap->pm_mesg = mesg;
5366 if (wait) {
5367 rc = 0;
5368 ap->pm_result = &rc;
5369 }
5370
5371 ap->pflags |= ATA_PFLAG_PM_PENDING;
5372 ap->eh_info.action |= action;
5373 ap->eh_info.flags |= ehi_flags;
5374
5375 ata_port_schedule_eh(ap);
5376
5377 spin_unlock_irqrestore(ap->lock, flags);
5378
5379 /* wait and check result */
5380 if (wait) {
5381 ata_port_wait_eh(ap);
5382 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5383 if (rc)
5384 return rc;
5385 }
5386 }
5387
5388 return 0;
5389}
5390
5391/**
cca3974e
JG
5392 * ata_host_suspend - suspend host
5393 * @host: host to suspend
500530f6
TH
5394 * @mesg: PM message
5395 *
cca3974e 5396 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
5397 * function requests EH to perform PM operations and waits for EH
5398 * to finish.
5399 *
5400 * LOCKING:
5401 * Kernel thread context (may sleep).
5402 *
5403 * RETURNS:
5404 * 0 on success, -errno on failure.
5405 */
cca3974e 5406int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6
TH
5407{
5408 int i, j, rc;
5409
cca3974e 5410 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
500530f6
TH
5411 if (rc)
5412 goto fail;
5413
5414 /* EH is quiescent now. Fail if we have any ready device.
5415 * This happens if hotplug occurs between completion of device
5416 * suspension and here.
5417 */
cca3974e
JG
5418 for (i = 0; i < host->n_ports; i++) {
5419 struct ata_port *ap = host->ports[i];
500530f6
TH
5420
5421 for (j = 0; j < ATA_MAX_DEVICES; j++) {
5422 struct ata_device *dev = &ap->device[j];
5423
5424 if (ata_dev_ready(dev)) {
5425 ata_port_printk(ap, KERN_WARNING,
5426 "suspend failed, device %d "
5427 "still active\n", dev->devno);
5428 rc = -EBUSY;
5429 goto fail;
5430 }
5431 }
5432 }
5433
cca3974e 5434 host->dev->power.power_state = mesg;
500530f6
TH
5435 return 0;
5436
5437 fail:
cca3974e 5438 ata_host_resume(host);
500530f6
TH
5439 return rc;
5440}
5441
5442/**
cca3974e
JG
5443 * ata_host_resume - resume host
5444 * @host: host to resume
500530f6 5445 *
cca3974e 5446 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
5447 * function requests EH to perform PM operations and returns.
5448 * Note that all resume operations are performed parallely.
5449 *
5450 * LOCKING:
5451 * Kernel thread context (may sleep).
5452 */
cca3974e 5453void ata_host_resume(struct ata_host *host)
500530f6 5454{
cca3974e
JG
5455 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
5456 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5457 host->dev->power.power_state = PMSG_ON;
500530f6
TH
5458}
5459
c893a3ae
RD
5460/**
5461 * ata_port_start - Set port up for dma.
5462 * @ap: Port to initialize
5463 *
5464 * Called just after data structures for each port are
5465 * initialized. Allocates space for PRD table.
5466 *
5467 * May be used as the port_start() entry in ata_port_operations.
5468 *
5469 * LOCKING:
5470 * Inherited from caller.
5471 */
f0d36efd 5472int ata_port_start(struct ata_port *ap)
1da177e4 5473{
2f1f610b 5474 struct device *dev = ap->dev;
6037d6bb 5475 int rc;
1da177e4 5476
f0d36efd
TH
5477 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5478 GFP_KERNEL);
1da177e4
LT
5479 if (!ap->prd)
5480 return -ENOMEM;
5481
6037d6bb 5482 rc = ata_pad_alloc(ap, dev);
f0d36efd 5483 if (rc)
6037d6bb 5484 return rc;
1da177e4 5485
f0d36efd
TH
5486 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
5487 (unsigned long long)ap->prd_dma);
1da177e4
LT
5488 return 0;
5489}
5490
3ef3b43d
TH
5491/**
5492 * ata_dev_init - Initialize an ata_device structure
5493 * @dev: Device structure to initialize
5494 *
5495 * Initialize @dev in preparation for probing.
5496 *
5497 * LOCKING:
5498 * Inherited from caller.
5499 */
5500void ata_dev_init(struct ata_device *dev)
5501{
5502 struct ata_port *ap = dev->ap;
72fa4b74
TH
5503 unsigned long flags;
5504
5a04bf4b
TH
5505 /* SATA spd limit is bound to the first device */
5506 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5507
72fa4b74
TH
5508 /* High bits of dev->flags are used to record warm plug
5509 * requests which occur asynchronously. Synchronize using
cca3974e 5510 * host lock.
72fa4b74 5511 */
ba6a1308 5512 spin_lock_irqsave(ap->lock, flags);
72fa4b74 5513 dev->flags &= ~ATA_DFLAG_INIT_MASK;
ba6a1308 5514 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 5515
72fa4b74
TH
5516 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5517 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
5518 dev->pio_mask = UINT_MAX;
5519 dev->mwdma_mask = UINT_MAX;
5520 dev->udma_mask = UINT_MAX;
5521}
5522
1da177e4 5523/**
155a8a9c 5524 * ata_port_init - Initialize an ata_port structure
1da177e4 5525 * @ap: Structure to initialize
cca3974e 5526 * @host: Collection of hosts to which @ap belongs
1da177e4
LT
5527 * @ent: Probe information provided by low-level driver
5528 * @port_no: Port number associated with this ata_port
5529 *
155a8a9c 5530 * Initialize a new ata_port structure.
0cba632b 5531 *
1da177e4 5532 * LOCKING:
0cba632b 5533 * Inherited from caller.
1da177e4 5534 */
cca3974e 5535void ata_port_init(struct ata_port *ap, struct ata_host *host,
155a8a9c 5536 const struct ata_probe_ent *ent, unsigned int port_no)
1da177e4
LT
5537{
5538 unsigned int i;
5539
cca3974e 5540 ap->lock = &host->lock;
198e0fed 5541 ap->flags = ATA_FLAG_DISABLED;
44877b4e 5542 ap->print_id = ata_print_id++;
1da177e4 5543 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 5544 ap->host = host;
2f1f610b 5545 ap->dev = ent->dev;
1da177e4 5546 ap->port_no = port_no;
fea63e38
TH
5547 if (port_no == 1 && ent->pinfo2) {
5548 ap->pio_mask = ent->pinfo2->pio_mask;
5549 ap->mwdma_mask = ent->pinfo2->mwdma_mask;
5550 ap->udma_mask = ent->pinfo2->udma_mask;
5551 ap->flags |= ent->pinfo2->flags;
5552 ap->ops = ent->pinfo2->port_ops;
5553 } else {
5554 ap->pio_mask = ent->pio_mask;
5555 ap->mwdma_mask = ent->mwdma_mask;
5556 ap->udma_mask = ent->udma_mask;
5557 ap->flags |= ent->port_flags;
5558 ap->ops = ent->port_ops;
5559 }
5a04bf4b 5560 ap->hw_sata_spd_limit = UINT_MAX;
1da177e4
LT
5561 ap->active_tag = ATA_TAG_POISON;
5562 ap->last_ctl = 0xFF;
bd5d825c
BP
5563
5564#if defined(ATA_VERBOSE_DEBUG)
5565 /* turn on all debugging levels */
5566 ap->msg_enable = 0x00FF;
5567#elif defined(ATA_DEBUG)
5568 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 5569#else
0dd4b21f 5570 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 5571#endif
1da177e4 5572
65f27f38
DH
5573 INIT_DELAYED_WORK(&ap->port_task, NULL);
5574 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5575 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 5576 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 5577 init_waitqueue_head(&ap->eh_wait_q);
1da177e4 5578
838df628
TH
5579 /* set cable type */
5580 ap->cbl = ATA_CBL_NONE;
5581 if (ap->flags & ATA_FLAG_SATA)
5582 ap->cbl = ATA_CBL_SATA;
5583
acf356b1
TH
5584 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5585 struct ata_device *dev = &ap->device[i];
38d87234 5586 dev->ap = ap;
72fa4b74 5587 dev->devno = i;
3ef3b43d 5588 ata_dev_init(dev);
acf356b1 5589 }
1da177e4
LT
5590
5591#ifdef ATA_IRQ_TRAP
5592 ap->stats.unhandled_irq = 1;
5593 ap->stats.idle_irq = 1;
5594#endif
5595
5596 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
5597}
5598
155a8a9c 5599/**
4608c160
TH
5600 * ata_port_init_shost - Initialize SCSI host associated with ATA port
5601 * @ap: ATA port to initialize SCSI host for
5602 * @shost: SCSI host associated with @ap
155a8a9c 5603 *
4608c160 5604 * Initialize SCSI host @shost associated with ATA port @ap.
155a8a9c
BK
5605 *
5606 * LOCKING:
5607 * Inherited from caller.
5608 */
4608c160 5609static void ata_port_init_shost(struct ata_port *ap, struct Scsi_Host *shost)
155a8a9c 5610{
cca3974e 5611 ap->scsi_host = shost;
155a8a9c 5612
44877b4e 5613 shost->unique_id = ap->print_id;
4608c160
TH
5614 shost->max_id = 16;
5615 shost->max_lun = 1;
5616 shost->max_channel = 1;
5617 shost->max_cmd_len = 12;
155a8a9c
BK
5618}
5619
1da177e4 5620/**
996139f1 5621 * ata_port_add - Attach low-level ATA driver to system
1da177e4 5622 * @ent: Information provided by low-level driver
cca3974e 5623 * @host: Collections of ports to which we add
1da177e4
LT
5624 * @port_no: Port number associated with this host
5625 *
0cba632b
JG
5626 * Attach low-level ATA driver to system.
5627 *
1da177e4 5628 * LOCKING:
0cba632b 5629 * PCI/etc. bus probe sem.
1da177e4
LT
5630 *
5631 * RETURNS:
0cba632b 5632 * New ata_port on success, for NULL on error.
1da177e4 5633 */
996139f1 5634static struct ata_port * ata_port_add(const struct ata_probe_ent *ent,
cca3974e 5635 struct ata_host *host,
1da177e4
LT
5636 unsigned int port_no)
5637{
996139f1 5638 struct Scsi_Host *shost;
1da177e4 5639 struct ata_port *ap;
1da177e4
LT
5640
5641 DPRINTK("ENTER\n");
aec5c3c1 5642
52783c5d 5643 if (!ent->port_ops->error_handler &&
cca3974e 5644 !(ent->port_flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST))) {
aec5c3c1
TH
5645 printk(KERN_ERR "ata%u: no reset mechanism available\n",
5646 port_no);
5647 return NULL;
5648 }
5649
996139f1
JG
5650 shost = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
5651 if (!shost)
1da177e4
LT
5652 return NULL;
5653
996139f1 5654 shost->transportt = &ata_scsi_transport_template;
30afc84c 5655
996139f1 5656 ap = ata_shost_to_port(shost);
1da177e4 5657
cca3974e 5658 ata_port_init(ap, host, ent, port_no);
996139f1 5659 ata_port_init_shost(ap, shost);
1da177e4 5660
1da177e4 5661 return ap;
1da177e4
LT
5662}
5663
f0d36efd
TH
5664static void ata_host_release(struct device *gendev, void *res)
5665{
5666 struct ata_host *host = dev_get_drvdata(gendev);
5667 int i;
5668
5669 for (i = 0; i < host->n_ports; i++) {
5670 struct ata_port *ap = host->ports[i];
5671
5672 if (!ap)
5673 continue;
5674
5675 if (ap->ops->port_stop)
5676 ap->ops->port_stop(ap);
5677
5678 scsi_host_put(ap->scsi_host);
5679 }
5680
5681 if (host->ops->host_stop)
5682 host->ops->host_stop(host);
1aa56cca
TH
5683
5684 dev_set_drvdata(gendev, NULL);
f0d36efd
TH
5685}
5686
b03732f0 5687/**
cca3974e
JG
5688 * ata_sas_host_init - Initialize a host struct
5689 * @host: host to initialize
5690 * @dev: device host is attached to
5691 * @flags: host flags
5692 * @ops: port_ops
b03732f0
BK
5693 *
5694 * LOCKING:
5695 * PCI/etc. bus probe sem.
5696 *
5697 */
5698
cca3974e
JG
5699void ata_host_init(struct ata_host *host, struct device *dev,
5700 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 5701{
cca3974e
JG
5702 spin_lock_init(&host->lock);
5703 host->dev = dev;
5704 host->flags = flags;
5705 host->ops = ops;
b03732f0
BK
5706}
5707
1da177e4 5708/**
0cba632b
JG
5709 * ata_device_add - Register hardware device with ATA and SCSI layers
5710 * @ent: Probe information describing hardware device to be registered
5711 *
5712 * This function processes the information provided in the probe
5713 * information struct @ent, allocates the necessary ATA and SCSI
5714 * host information structures, initializes them, and registers
5715 * everything with requisite kernel subsystems.
5716 *
5717 * This function requests irqs, probes the ATA bus, and probes
5718 * the SCSI bus.
1da177e4
LT
5719 *
5720 * LOCKING:
0cba632b 5721 * PCI/etc. bus probe sem.
1da177e4
LT
5722 *
5723 * RETURNS:
0cba632b 5724 * Number of ports registered. Zero on error (no ports registered).
1da177e4 5725 */
057ace5e 5726int ata_device_add(const struct ata_probe_ent *ent)
1da177e4 5727{
6d0500df 5728 unsigned int i;
1da177e4 5729 struct device *dev = ent->dev;
cca3974e 5730 struct ata_host *host;
39b07ce6 5731 int rc;
1da177e4
LT
5732
5733 DPRINTK("ENTER\n");
f20b16ff 5734
02f076aa
AC
5735 if (ent->irq == 0) {
5736 dev_printk(KERN_ERR, dev, "is not available: No interrupt assigned.\n");
5737 return 0;
5738 }
f0d36efd
TH
5739
5740 if (!devres_open_group(dev, ata_device_add, GFP_KERNEL))
5741 return 0;
5742
1da177e4 5743 /* alloc a container for our list of ATA ports (buses) */
f0d36efd
TH
5744 host = devres_alloc(ata_host_release, sizeof(struct ata_host) +
5745 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
cca3974e 5746 if (!host)
f0d36efd
TH
5747 goto err_out;
5748 devres_add(dev, host);
5749 dev_set_drvdata(dev, host);
1da177e4 5750
cca3974e
JG
5751 ata_host_init(host, dev, ent->_host_flags, ent->port_ops);
5752 host->n_ports = ent->n_ports;
5753 host->irq = ent->irq;
5754 host->irq2 = ent->irq2;
0d5ff566 5755 host->iomap = ent->iomap;
cca3974e 5756 host->private_data = ent->private_data;
1da177e4
LT
5757
5758 /* register each port bound to this device */
cca3974e 5759 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5760 struct ata_port *ap;
5761 unsigned long xfer_mode_mask;
2ec7df04 5762 int irq_line = ent->irq;
1da177e4 5763
cca3974e 5764 ap = ata_port_add(ent, host, i);
c38778c3 5765 host->ports[i] = ap;
1da177e4
LT
5766 if (!ap)
5767 goto err_out;
5768
dd5b06c4
TH
5769 /* dummy? */
5770 if (ent->dummy_port_mask & (1 << i)) {
5771 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
5772 ap->ops = &ata_dummy_port_ops;
5773 continue;
5774 }
5775
5776 /* start port */
5777 rc = ap->ops->port_start(ap);
5778 if (rc) {
cca3974e
JG
5779 host->ports[i] = NULL;
5780 scsi_host_put(ap->scsi_host);
dd5b06c4
TH
5781 goto err_out;
5782 }
5783
2ec7df04
AC
5784 /* Report the secondary IRQ for second channel legacy */
5785 if (i == 1 && ent->irq2)
5786 irq_line = ent->irq2;
5787
1da177e4
LT
5788 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
5789 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
5790 (ap->pio_mask << ATA_SHIFT_PIO);
5791
5792 /* print per-port info to dmesg */
0d5ff566
TH
5793 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%p "
5794 "ctl 0x%p bmdma 0x%p irq %d\n",
f15a1daf
TH
5795 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
5796 ata_mode_string(xfer_mode_mask),
5797 ap->ioaddr.cmd_addr,
5798 ap->ioaddr.ctl_addr,
5799 ap->ioaddr.bmdma_addr,
2ec7df04 5800 irq_line);
1da177e4 5801
0f0a3ad3
TH
5802 /* freeze port before requesting IRQ */
5803 ata_eh_freeze_port(ap);
1da177e4
LT
5804 }
5805
2ec7df04 5806 /* obtain irq, that may be shared between channels */
f0d36efd
TH
5807 rc = devm_request_irq(dev, ent->irq, ent->port_ops->irq_handler,
5808 ent->irq_flags, DRV_NAME, host);
39b07ce6
JG
5809 if (rc) {
5810 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5811 ent->irq, rc);
1da177e4 5812 goto err_out;
39b07ce6 5813 }
1da177e4 5814
2ec7df04
AC
5815 /* do we have a second IRQ for the other channel, eg legacy mode */
5816 if (ent->irq2) {
5817 /* We will get weird core code crashes later if this is true
5818 so trap it now */
5819 BUG_ON(ent->irq == ent->irq2);
5820
f0d36efd
TH
5821 rc = devm_request_irq(dev, ent->irq2,
5822 ent->port_ops->irq_handler, ent->irq_flags,
5823 DRV_NAME, host);
2ec7df04
AC
5824 if (rc) {
5825 dev_printk(KERN_ERR, dev, "irq %lu request failed: %d\n",
5826 ent->irq2, rc);
f0d36efd 5827 goto err_out;
2ec7df04
AC
5828 }
5829 }
5830
f0d36efd 5831 /* resource acquisition complete */
b878ca5d 5832 devres_remove_group(dev, ata_device_add);
f0d36efd 5833
1da177e4
LT
5834 /* perform each probe synchronously */
5835 DPRINTK("probe begin\n");
cca3974e
JG
5836 for (i = 0; i < host->n_ports; i++) {
5837 struct ata_port *ap = host->ports[i];
5a04bf4b 5838 u32 scontrol;
1da177e4
LT
5839 int rc;
5840
5a04bf4b
TH
5841 /* init sata_spd_limit to the current value */
5842 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
5843 int spd = (scontrol >> 4) & 0xf;
5844 ap->hw_sata_spd_limit &= (1 << spd) - 1;
5845 }
5846 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5847
cca3974e 5848 rc = scsi_add_host(ap->scsi_host, dev);
1da177e4 5849 if (rc) {
f15a1daf 5850 ata_port_printk(ap, KERN_ERR, "scsi_add_host failed\n");
1da177e4
LT
5851 /* FIXME: do something useful here */
5852 /* FIXME: handle unconditional calls to
5853 * scsi_scan_host and ata_host_remove, below,
5854 * at the very least
5855 */
5856 }
3e706399 5857
52783c5d 5858 if (ap->ops->error_handler) {
1cdaf534 5859 struct ata_eh_info *ehi = &ap->eh_info;
3e706399
TH
5860 unsigned long flags;
5861
5862 ata_port_probe(ap);
5863
5864 /* kick EH for boot probing */
ba6a1308 5865 spin_lock_irqsave(ap->lock, flags);
3e706399 5866
1cdaf534
TH
5867 ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
5868 ehi->action |= ATA_EH_SOFTRESET;
5869 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
3e706399 5870
b51e9e5d 5871 ap->pflags |= ATA_PFLAG_LOADING;
3e706399
TH
5872 ata_port_schedule_eh(ap);
5873
ba6a1308 5874 spin_unlock_irqrestore(ap->lock, flags);
3e706399
TH
5875
5876 /* wait for EH to finish */
5877 ata_port_wait_eh(ap);
5878 } else {
44877b4e 5879 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
3e706399 5880 rc = ata_bus_probe(ap);
44877b4e 5881 DPRINTK("ata%u: bus probe end\n", ap->print_id);
3e706399
TH
5882
5883 if (rc) {
5884 /* FIXME: do something useful here?
5885 * Current libata behavior will
5886 * tear down everything when
5887 * the module is removed
5888 * or the h/w is unplugged.
5889 */
5890 }
5891 }
1da177e4
LT
5892 }
5893
5894 /* probes are done, now scan each port's disk(s) */
c893a3ae 5895 DPRINTK("host probe begin\n");
cca3974e
JG
5896 for (i = 0; i < host->n_ports; i++) {
5897 struct ata_port *ap = host->ports[i];
1da177e4 5898
644dd0cc 5899 ata_scsi_scan_host(ap);
1da177e4
LT
5900 }
5901
1da177e4
LT
5902 VPRINTK("EXIT, returning %u\n", ent->n_ports);
5903 return ent->n_ports; /* success */
5904
f0d36efd
TH
5905 err_out:
5906 devres_release_group(dev, ata_device_add);
f0d36efd 5907 VPRINTK("EXIT, returning %d\n", rc);
1da177e4
LT
5908 return 0;
5909}
5910
720ba126
TH
5911/**
5912 * ata_port_detach - Detach ATA port in prepration of device removal
5913 * @ap: ATA port to be detached
5914 *
5915 * Detach all ATA devices and the associated SCSI devices of @ap;
5916 * then, remove the associated SCSI host. @ap is guaranteed to
5917 * be quiescent on return from this function.
5918 *
5919 * LOCKING:
5920 * Kernel thread context (may sleep).
5921 */
5922void ata_port_detach(struct ata_port *ap)
5923{
5924 unsigned long flags;
5925 int i;
5926
5927 if (!ap->ops->error_handler)
c3cf30a9 5928 goto skip_eh;
720ba126
TH
5929
5930 /* tell EH we're leaving & flush EH */
ba6a1308 5931 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 5932 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 5933 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5934
5935 ata_port_wait_eh(ap);
5936
5937 /* EH is now guaranteed to see UNLOADING, so no new device
5938 * will be attached. Disable all existing devices.
5939 */
ba6a1308 5940 spin_lock_irqsave(ap->lock, flags);
720ba126
TH
5941
5942 for (i = 0; i < ATA_MAX_DEVICES; i++)
5943 ata_dev_disable(&ap->device[i]);
5944
ba6a1308 5945 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5946
5947 /* Final freeze & EH. All in-flight commands are aborted. EH
5948 * will be skipped and retrials will be terminated with bad
5949 * target.
5950 */
ba6a1308 5951 spin_lock_irqsave(ap->lock, flags);
720ba126 5952 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 5953 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5954
5955 ata_port_wait_eh(ap);
5956
5957 /* Flush hotplug task. The sequence is similar to
5958 * ata_port_flush_task().
5959 */
5960 flush_workqueue(ata_aux_wq);
5961 cancel_delayed_work(&ap->hotplug_task);
5962 flush_workqueue(ata_aux_wq);
5963
c3cf30a9 5964 skip_eh:
720ba126 5965 /* remove the associated SCSI host */
cca3974e 5966 scsi_remove_host(ap->scsi_host);
720ba126
TH
5967}
5968
0529c159
TH
5969/**
5970 * ata_host_detach - Detach all ports of an ATA host
5971 * @host: Host to detach
5972 *
5973 * Detach all ports of @host.
5974 *
5975 * LOCKING:
5976 * Kernel thread context (may sleep).
5977 */
5978void ata_host_detach(struct ata_host *host)
5979{
5980 int i;
5981
5982 for (i = 0; i < host->n_ports; i++)
5983 ata_port_detach(host->ports[i]);
5984}
5985
f6d950e2
BK
5986struct ata_probe_ent *
5987ata_probe_ent_alloc(struct device *dev, const struct ata_port_info *port)
5988{
5989 struct ata_probe_ent *probe_ent;
5990
4d05447e 5991 probe_ent = devm_kzalloc(dev, sizeof(*probe_ent), GFP_KERNEL);
f6d950e2
BK
5992 if (!probe_ent) {
5993 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
5994 kobject_name(&(dev->kobj)));
5995 return NULL;
5996 }
5997
5998 INIT_LIST_HEAD(&probe_ent->node);
5999 probe_ent->dev = dev;
6000
6001 probe_ent->sht = port->sht;
cca3974e 6002 probe_ent->port_flags = port->flags;
f6d950e2
BK
6003 probe_ent->pio_mask = port->pio_mask;
6004 probe_ent->mwdma_mask = port->mwdma_mask;
6005 probe_ent->udma_mask = port->udma_mask;
6006 probe_ent->port_ops = port->port_ops;
d639ca94 6007 probe_ent->private_data = port->private_data;
f6d950e2
BK
6008
6009 return probe_ent;
6010}
6011
1da177e4
LT
6012/**
6013 * ata_std_ports - initialize ioaddr with standard port offsets.
6014 * @ioaddr: IO address structure to be initialized
0baab86b
EF
6015 *
6016 * Utility function which initializes data_addr, error_addr,
6017 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
6018 * device_addr, status_addr, and command_addr to standard offsets
6019 * relative to cmd_addr.
6020 *
6021 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 6022 */
0baab86b 6023
1da177e4
LT
6024void ata_std_ports(struct ata_ioports *ioaddr)
6025{
6026 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
6027 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
6028 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
6029 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
6030 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
6031 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
6032 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
6033 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
6034 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
6035 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
6036}
6037
0baab86b 6038
374b1873
JG
6039#ifdef CONFIG_PCI
6040
1da177e4
LT
6041/**
6042 * ata_pci_remove_one - PCI layer callback for device removal
6043 * @pdev: PCI device that was removed
6044 *
b878ca5d
TH
6045 * PCI layer indicates to libata via this hook that hot-unplug or
6046 * module unload event has occurred. Detach all ports. Resource
6047 * release is handled via devres.
1da177e4
LT
6048 *
6049 * LOCKING:
6050 * Inherited from PCI layer (may sleep).
6051 */
f0d36efd 6052void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4
LT
6053{
6054 struct device *dev = pci_dev_to_dev(pdev);
cca3974e 6055 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 6056
b878ca5d 6057 ata_host_detach(host);
1da177e4
LT
6058}
6059
6060/* move to PCI subsystem */
057ace5e 6061int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
6062{
6063 unsigned long tmp = 0;
6064
6065 switch (bits->width) {
6066 case 1: {
6067 u8 tmp8 = 0;
6068 pci_read_config_byte(pdev, bits->reg, &tmp8);
6069 tmp = tmp8;
6070 break;
6071 }
6072 case 2: {
6073 u16 tmp16 = 0;
6074 pci_read_config_word(pdev, bits->reg, &tmp16);
6075 tmp = tmp16;
6076 break;
6077 }
6078 case 4: {
6079 u32 tmp32 = 0;
6080 pci_read_config_dword(pdev, bits->reg, &tmp32);
6081 tmp = tmp32;
6082 break;
6083 }
6084
6085 default:
6086 return -EINVAL;
6087 }
6088
6089 tmp &= bits->mask;
6090
6091 return (tmp == bits->val) ? 1 : 0;
6092}
9b847548 6093
3c5100c1 6094void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
6095{
6096 pci_save_state(pdev);
4c90d971 6097 pci_disable_device(pdev);
500530f6 6098
4c90d971 6099 if (mesg.event == PM_EVENT_SUSPEND)
500530f6 6100 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
6101}
6102
553c4aa6 6103int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 6104{
553c4aa6
TH
6105 int rc;
6106
9b847548
JA
6107 pci_set_power_state(pdev, PCI_D0);
6108 pci_restore_state(pdev);
553c4aa6 6109
b878ca5d 6110 rc = pcim_enable_device(pdev);
553c4aa6
TH
6111 if (rc) {
6112 dev_printk(KERN_ERR, &pdev->dev,
6113 "failed to enable device after resume (%d)\n", rc);
6114 return rc;
6115 }
6116
9b847548 6117 pci_set_master(pdev);
553c4aa6 6118 return 0;
500530f6
TH
6119}
6120
3c5100c1 6121int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 6122{
cca3974e 6123 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
6124 int rc = 0;
6125
cca3974e 6126 rc = ata_host_suspend(host, mesg);
500530f6
TH
6127 if (rc)
6128 return rc;
6129
3c5100c1 6130 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
6131
6132 return 0;
6133}
6134
6135int ata_pci_device_resume(struct pci_dev *pdev)
6136{
cca3974e 6137 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 6138 int rc;
500530f6 6139
553c4aa6
TH
6140 rc = ata_pci_device_do_resume(pdev);
6141 if (rc == 0)
6142 ata_host_resume(host);
6143 return rc;
9b847548 6144}
1da177e4
LT
6145#endif /* CONFIG_PCI */
6146
6147
1da177e4
LT
6148static int __init ata_init(void)
6149{
a8601e5f 6150 ata_probe_timeout *= HZ;
1da177e4
LT
6151 ata_wq = create_workqueue("ata");
6152 if (!ata_wq)
6153 return -ENOMEM;
6154
453b07ac
TH
6155 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6156 if (!ata_aux_wq) {
6157 destroy_workqueue(ata_wq);
6158 return -ENOMEM;
6159 }
6160
1da177e4
LT
6161 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6162 return 0;
6163}
6164
6165static void __exit ata_exit(void)
6166{
6167 destroy_workqueue(ata_wq);
453b07ac 6168 destroy_workqueue(ata_aux_wq);
1da177e4
LT
6169}
6170
a4625085 6171subsys_initcall(ata_init);
1da177e4
LT
6172module_exit(ata_exit);
6173
67846b30 6174static unsigned long ratelimit_time;
34af946a 6175static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
6176
6177int ata_ratelimit(void)
6178{
6179 int rc;
6180 unsigned long flags;
6181
6182 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6183
6184 if (time_after(jiffies, ratelimit_time)) {
6185 rc = 1;
6186 ratelimit_time = jiffies + (HZ/5);
6187 } else
6188 rc = 0;
6189
6190 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6191
6192 return rc;
6193}
6194
c22daff4
TH
6195/**
6196 * ata_wait_register - wait until register value changes
6197 * @reg: IO-mapped register
6198 * @mask: Mask to apply to read register value
6199 * @val: Wait condition
6200 * @interval_msec: polling interval in milliseconds
6201 * @timeout_msec: timeout in milliseconds
6202 *
6203 * Waiting for some bits of register to change is a common
6204 * operation for ATA controllers. This function reads 32bit LE
6205 * IO-mapped register @reg and tests for the following condition.
6206 *
6207 * (*@reg & mask) != val
6208 *
6209 * If the condition is met, it returns; otherwise, the process is
6210 * repeated after @interval_msec until timeout.
6211 *
6212 * LOCKING:
6213 * Kernel thread context (may sleep)
6214 *
6215 * RETURNS:
6216 * The final register value.
6217 */
6218u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6219 unsigned long interval_msec,
6220 unsigned long timeout_msec)
6221{
6222 unsigned long timeout;
6223 u32 tmp;
6224
6225 tmp = ioread32(reg);
6226
6227 /* Calculate timeout _after_ the first read to make sure
6228 * preceding writes reach the controller before starting to
6229 * eat away the timeout.
6230 */
6231 timeout = jiffies + (timeout_msec * HZ) / 1000;
6232
6233 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6234 msleep(interval_msec);
6235 tmp = ioread32(reg);
6236 }
6237
6238 return tmp;
6239}
6240
dd5b06c4
TH
6241/*
6242 * Dummy port_ops
6243 */
6244static void ata_dummy_noret(struct ata_port *ap) { }
6245static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
6246static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6247
6248static u8 ata_dummy_check_status(struct ata_port *ap)
6249{
6250 return ATA_DRDY;
6251}
6252
6253static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6254{
6255 return AC_ERR_SYSTEM;
6256}
6257
6258const struct ata_port_operations ata_dummy_port_ops = {
6259 .port_disable = ata_port_disable,
6260 .check_status = ata_dummy_check_status,
6261 .check_altstatus = ata_dummy_check_status,
6262 .dev_select = ata_noop_dev_select,
6263 .qc_prep = ata_noop_qc_prep,
6264 .qc_issue = ata_dummy_qc_issue,
6265 .freeze = ata_dummy_noret,
6266 .thaw = ata_dummy_noret,
6267 .error_handler = ata_dummy_noret,
6268 .post_internal_cmd = ata_dummy_qc_noret,
6269 .irq_clear = ata_dummy_noret,
6270 .port_start = ata_dummy_ret0,
6271 .port_stop = ata_dummy_noret,
6272};
6273
1da177e4
LT
6274/*
6275 * libata is essentially a library of internal helper functions for
6276 * low-level ATA host controller drivers. As such, the API/ABI is
6277 * likely to change as new drivers are added and updated.
6278 * Do not depend on ABI/API stability.
6279 */
6280
e9c83914
TH
6281EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6282EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6283EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 6284EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
1da177e4
LT
6285EXPORT_SYMBOL_GPL(ata_std_bios_param);
6286EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 6287EXPORT_SYMBOL_GPL(ata_host_init);
1da177e4 6288EXPORT_SYMBOL_GPL(ata_device_add);
0529c159 6289EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4
LT
6290EXPORT_SYMBOL_GPL(ata_sg_init);
6291EXPORT_SYMBOL_GPL(ata_sg_init_one);
9a1004d0 6292EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 6293EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 6294EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 6295EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
6296EXPORT_SYMBOL_GPL(ata_tf_load);
6297EXPORT_SYMBOL_GPL(ata_tf_read);
6298EXPORT_SYMBOL_GPL(ata_noop_dev_select);
6299EXPORT_SYMBOL_GPL(ata_std_dev_select);
6300EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6301EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6302EXPORT_SYMBOL_GPL(ata_check_status);
6303EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
6304EXPORT_SYMBOL_GPL(ata_exec_command);
6305EXPORT_SYMBOL_GPL(ata_port_start);
1da177e4 6306EXPORT_SYMBOL_GPL(ata_interrupt);
0d5ff566
TH
6307EXPORT_SYMBOL_GPL(ata_data_xfer);
6308EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
1da177e4 6309EXPORT_SYMBOL_GPL(ata_qc_prep);
e46834cd 6310EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
6311EXPORT_SYMBOL_GPL(ata_bmdma_setup);
6312EXPORT_SYMBOL_GPL(ata_bmdma_start);
6313EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
6314EXPORT_SYMBOL_GPL(ata_bmdma_status);
6315EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
6316EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
6317EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
6318EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
6319EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
6320EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 6321EXPORT_SYMBOL_GPL(ata_port_probe);
10305f0f 6322EXPORT_SYMBOL_GPL(ata_dev_disable);
3c567b7d 6323EXPORT_SYMBOL_GPL(sata_set_spd);
d7bb4cc7
TH
6324EXPORT_SYMBOL_GPL(sata_phy_debounce);
6325EXPORT_SYMBOL_GPL(sata_phy_resume);
1da177e4
LT
6326EXPORT_SYMBOL_GPL(sata_phy_reset);
6327EXPORT_SYMBOL_GPL(__sata_phy_reset);
6328EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 6329EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 6330EXPORT_SYMBOL_GPL(ata_std_softreset);
b6103f6d 6331EXPORT_SYMBOL_GPL(sata_port_hardreset);
c2bd5804
TH
6332EXPORT_SYMBOL_GPL(sata_std_hardreset);
6333EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
6334EXPORT_SYMBOL_GPL(ata_dev_classify);
6335EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 6336EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 6337EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 6338EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 6339EXPORT_SYMBOL_GPL(ata_busy_sleep);
86e45b6b 6340EXPORT_SYMBOL_GPL(ata_port_queue_task);
1da177e4
LT
6341EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6342EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 6343EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 6344EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 6345EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4 6346EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
6347EXPORT_SYMBOL_GPL(sata_scr_valid);
6348EXPORT_SYMBOL_GPL(sata_scr_read);
6349EXPORT_SYMBOL_GPL(sata_scr_write);
6350EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6351EXPORT_SYMBOL_GPL(ata_port_online);
6352EXPORT_SYMBOL_GPL(ata_port_offline);
cca3974e
JG
6353EXPORT_SYMBOL_GPL(ata_host_suspend);
6354EXPORT_SYMBOL_GPL(ata_host_resume);
6a62a04d
TH
6355EXPORT_SYMBOL_GPL(ata_id_string);
6356EXPORT_SYMBOL_GPL(ata_id_c_string);
10305f0f 6357EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
6919a0a6 6358EXPORT_SYMBOL_GPL(ata_device_blacklisted);
1da177e4
LT
6359EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6360
1bc4ccff 6361EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
452503f9
AC
6362EXPORT_SYMBOL_GPL(ata_timing_compute);
6363EXPORT_SYMBOL_GPL(ata_timing_merge);
6364
1da177e4
LT
6365#ifdef CONFIG_PCI
6366EXPORT_SYMBOL_GPL(pci_test_config_bits);
6367EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
6368EXPORT_SYMBOL_GPL(ata_pci_init_one);
6369EXPORT_SYMBOL_GPL(ata_pci_remove_one);
500530f6
TH
6370EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6371EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
6372EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6373EXPORT_SYMBOL_GPL(ata_pci_device_resume);
67951ade
AC
6374EXPORT_SYMBOL_GPL(ata_pci_default_filter);
6375EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 6376#endif /* CONFIG_PCI */
9b847548 6377
9b847548
JA
6378EXPORT_SYMBOL_GPL(ata_scsi_device_suspend);
6379EXPORT_SYMBOL_GPL(ata_scsi_device_resume);
ece1d636 6380
ece1d636 6381EXPORT_SYMBOL_GPL(ata_eng_timeout);
7b70fc03
TH
6382EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6383EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499
TH
6384EXPORT_SYMBOL_GPL(ata_port_freeze);
6385EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6386EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
6387EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6388EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 6389EXPORT_SYMBOL_GPL(ata_do_eh);
83625006
AI
6390EXPORT_SYMBOL_GPL(ata_irq_on);
6391EXPORT_SYMBOL_GPL(ata_dummy_irq_on);
6392EXPORT_SYMBOL_GPL(ata_irq_ack);
6393EXPORT_SYMBOL_GPL(ata_dummy_irq_ack);
a619f981 6394EXPORT_SYMBOL_GPL(ata_dev_try_classify);