]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/ata/libata-core.c
pata_scc had been missed by ata_std_prereset() switch
[net-next-2.6.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
1da177e4
LT
33 */
34
1da177e4
LT
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/mm.h>
41#include <linux/highmem.h>
42#include <linux/spinlock.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/timer.h>
46#include <linux/interrupt.h>
47#include <linux/completion.h>
48#include <linux/suspend.h>
49#include <linux/workqueue.h>
67846b30 50#include <linux/jiffies.h>
378f058c 51#include <linux/scatterlist.h>
1da177e4 52#include <scsi/scsi.h>
193515d5 53#include <scsi/scsi_cmnd.h>
1da177e4
LT
54#include <scsi/scsi_host.h>
55#include <linux/libata.h>
56#include <asm/io.h>
57#include <asm/semaphore.h>
58#include <asm/byteorder.h>
59
60#include "libata.h"
61
cb48cab7 62#define DRV_VERSION "2.20" /* must be exactly four chars */
fda0efc5
JG
63
64
d7bb4cc7 65/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
66const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
67const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
68const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 69
3373efd8
TH
70static unsigned int ata_dev_init_params(struct ata_device *dev,
71 u16 heads, u16 sectors);
72static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
73static void ata_dev_xfermask(struct ata_device *dev);
1da177e4 74
f3187195 75unsigned int ata_print_id = 1;
1da177e4
LT
76static struct workqueue_struct *ata_wq;
77
453b07ac
TH
78struct workqueue_struct *ata_aux_wq;
79
418dc1f5 80int atapi_enabled = 1;
1623c81e
JG
81module_param(atapi_enabled, int, 0444);
82MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
83
95de719a
AL
84int atapi_dmadir = 0;
85module_param(atapi_dmadir, int, 0444);
86MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
87
c3c013a2
JG
88int libata_fua = 0;
89module_param_named(fua, libata_fua, int, 0444);
90MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
91
1e999736
AC
92static int ata_ignore_hpa = 0;
93module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
94MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
95
a8601e5f
AM
96static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
97module_param(ata_probe_timeout, int, 0444);
98MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
99
d7d0dad6
JG
100int libata_noacpi = 1;
101module_param_named(noacpi, libata_noacpi, int, 0444);
11ef697b
KCA
102MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
103
920a4b10
TH
104int ata_spindown_compat = 1;
105module_param_named(spindown_compat, ata_spindown_compat, int, 0644);
106MODULE_PARM_DESC(spindown_compat, "Enable backward compatible spindown "
107 "behavior. Will be removed. More info can be found in "
108 "Documentation/feature-removal-schedule.txt\n");
109
1da177e4
LT
110MODULE_AUTHOR("Jeff Garzik");
111MODULE_DESCRIPTION("Library module for ATA devices");
112MODULE_LICENSE("GPL");
113MODULE_VERSION(DRV_VERSION);
114
0baab86b 115
1da177e4
LT
116/**
117 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
118 * @tf: Taskfile to convert
119 * @fis: Buffer into which data will output
120 * @pmp: Port multiplier port
121 *
122 * Converts a standard ATA taskfile to a Serial ATA
123 * FIS structure (Register - Host to Device).
124 *
125 * LOCKING:
126 * Inherited from caller.
127 */
128
057ace5e 129void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
1da177e4
LT
130{
131 fis[0] = 0x27; /* Register - Host to Device FIS */
132 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
133 bit 7 indicates Command FIS */
134 fis[2] = tf->command;
135 fis[3] = tf->feature;
136
137 fis[4] = tf->lbal;
138 fis[5] = tf->lbam;
139 fis[6] = tf->lbah;
140 fis[7] = tf->device;
141
142 fis[8] = tf->hob_lbal;
143 fis[9] = tf->hob_lbam;
144 fis[10] = tf->hob_lbah;
145 fis[11] = tf->hob_feature;
146
147 fis[12] = tf->nsect;
148 fis[13] = tf->hob_nsect;
149 fis[14] = 0;
150 fis[15] = tf->ctl;
151
152 fis[16] = 0;
153 fis[17] = 0;
154 fis[18] = 0;
155 fis[19] = 0;
156}
157
158/**
159 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
160 * @fis: Buffer from which data will be input
161 * @tf: Taskfile to output
162 *
e12a1be6 163 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
164 *
165 * LOCKING:
166 * Inherited from caller.
167 */
168
057ace5e 169void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
170{
171 tf->command = fis[2]; /* status */
172 tf->feature = fis[3]; /* error */
173
174 tf->lbal = fis[4];
175 tf->lbam = fis[5];
176 tf->lbah = fis[6];
177 tf->device = fis[7];
178
179 tf->hob_lbal = fis[8];
180 tf->hob_lbam = fis[9];
181 tf->hob_lbah = fis[10];
182
183 tf->nsect = fis[12];
184 tf->hob_nsect = fis[13];
185}
186
8cbd6df1
AL
187static const u8 ata_rw_cmds[] = {
188 /* pio multi */
189 ATA_CMD_READ_MULTI,
190 ATA_CMD_WRITE_MULTI,
191 ATA_CMD_READ_MULTI_EXT,
192 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
193 0,
194 0,
195 0,
196 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
197 /* pio */
198 ATA_CMD_PIO_READ,
199 ATA_CMD_PIO_WRITE,
200 ATA_CMD_PIO_READ_EXT,
201 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
202 0,
203 0,
204 0,
205 0,
8cbd6df1
AL
206 /* dma */
207 ATA_CMD_READ,
208 ATA_CMD_WRITE,
209 ATA_CMD_READ_EXT,
9a3dccc4
TH
210 ATA_CMD_WRITE_EXT,
211 0,
212 0,
213 0,
214 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 215};
1da177e4
LT
216
217/**
8cbd6df1 218 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
219 * @tf: command to examine and configure
220 * @dev: device tf belongs to
1da177e4 221 *
2e9edbf8 222 * Examine the device configuration and tf->flags to calculate
8cbd6df1 223 * the proper read/write commands and protocol to use.
1da177e4
LT
224 *
225 * LOCKING:
226 * caller.
227 */
bd056d7e 228static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 229{
9a3dccc4 230 u8 cmd;
1da177e4 231
9a3dccc4 232 int index, fua, lba48, write;
2e9edbf8 233
9a3dccc4 234 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
235 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
236 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 237
8cbd6df1
AL
238 if (dev->flags & ATA_DFLAG_PIO) {
239 tf->protocol = ATA_PROT_PIO;
9a3dccc4 240 index = dev->multi_count ? 0 : 8;
bd056d7e 241 } else if (lba48 && (dev->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
242 /* Unable to use DMA due to host limitation */
243 tf->protocol = ATA_PROT_PIO;
0565c26d 244 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
245 } else {
246 tf->protocol = ATA_PROT_DMA;
9a3dccc4 247 index = 16;
8cbd6df1 248 }
1da177e4 249
9a3dccc4
TH
250 cmd = ata_rw_cmds[index + fua + lba48 + write];
251 if (cmd) {
252 tf->command = cmd;
253 return 0;
254 }
255 return -1;
1da177e4
LT
256}
257
35b649fe
TH
258/**
259 * ata_tf_read_block - Read block address from ATA taskfile
260 * @tf: ATA taskfile of interest
261 * @dev: ATA device @tf belongs to
262 *
263 * LOCKING:
264 * None.
265 *
266 * Read block address from @tf. This function can handle all
267 * three address formats - LBA, LBA48 and CHS. tf->protocol and
268 * flags select the address format to use.
269 *
270 * RETURNS:
271 * Block address read from @tf.
272 */
273u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
274{
275 u64 block = 0;
276
277 if (tf->flags & ATA_TFLAG_LBA) {
278 if (tf->flags & ATA_TFLAG_LBA48) {
279 block |= (u64)tf->hob_lbah << 40;
280 block |= (u64)tf->hob_lbam << 32;
281 block |= tf->hob_lbal << 24;
282 } else
283 block |= (tf->device & 0xf) << 24;
284
285 block |= tf->lbah << 16;
286 block |= tf->lbam << 8;
287 block |= tf->lbal;
288 } else {
289 u32 cyl, head, sect;
290
291 cyl = tf->lbam | (tf->lbah << 8);
292 head = tf->device & 0xf;
293 sect = tf->lbal;
294
295 block = (cyl * dev->heads + head) * dev->sectors + sect;
296 }
297
298 return block;
299}
300
bd056d7e
TH
301/**
302 * ata_build_rw_tf - Build ATA taskfile for given read/write request
303 * @tf: Target ATA taskfile
304 * @dev: ATA device @tf belongs to
305 * @block: Block address
306 * @n_block: Number of blocks
307 * @tf_flags: RW/FUA etc...
308 * @tag: tag
309 *
310 * LOCKING:
311 * None.
312 *
313 * Build ATA taskfile @tf for read/write request described by
314 * @block, @n_block, @tf_flags and @tag on @dev.
315 *
316 * RETURNS:
317 *
318 * 0 on success, -ERANGE if the request is too large for @dev,
319 * -EINVAL if the request is invalid.
320 */
321int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
322 u64 block, u32 n_block, unsigned int tf_flags,
323 unsigned int tag)
324{
325 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
326 tf->flags |= tf_flags;
327
6d1245bf 328 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
329 /* yay, NCQ */
330 if (!lba_48_ok(block, n_block))
331 return -ERANGE;
332
333 tf->protocol = ATA_PROT_NCQ;
334 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
335
336 if (tf->flags & ATA_TFLAG_WRITE)
337 tf->command = ATA_CMD_FPDMA_WRITE;
338 else
339 tf->command = ATA_CMD_FPDMA_READ;
340
341 tf->nsect = tag << 3;
342 tf->hob_feature = (n_block >> 8) & 0xff;
343 tf->feature = n_block & 0xff;
344
345 tf->hob_lbah = (block >> 40) & 0xff;
346 tf->hob_lbam = (block >> 32) & 0xff;
347 tf->hob_lbal = (block >> 24) & 0xff;
348 tf->lbah = (block >> 16) & 0xff;
349 tf->lbam = (block >> 8) & 0xff;
350 tf->lbal = block & 0xff;
351
352 tf->device = 1 << 6;
353 if (tf->flags & ATA_TFLAG_FUA)
354 tf->device |= 1 << 7;
355 } else if (dev->flags & ATA_DFLAG_LBA) {
356 tf->flags |= ATA_TFLAG_LBA;
357
358 if (lba_28_ok(block, n_block)) {
359 /* use LBA28 */
360 tf->device |= (block >> 24) & 0xf;
361 } else if (lba_48_ok(block, n_block)) {
362 if (!(dev->flags & ATA_DFLAG_LBA48))
363 return -ERANGE;
364
365 /* use LBA48 */
366 tf->flags |= ATA_TFLAG_LBA48;
367
368 tf->hob_nsect = (n_block >> 8) & 0xff;
369
370 tf->hob_lbah = (block >> 40) & 0xff;
371 tf->hob_lbam = (block >> 32) & 0xff;
372 tf->hob_lbal = (block >> 24) & 0xff;
373 } else
374 /* request too large even for LBA48 */
375 return -ERANGE;
376
377 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
378 return -EINVAL;
379
380 tf->nsect = n_block & 0xff;
381
382 tf->lbah = (block >> 16) & 0xff;
383 tf->lbam = (block >> 8) & 0xff;
384 tf->lbal = block & 0xff;
385
386 tf->device |= ATA_LBA;
387 } else {
388 /* CHS */
389 u32 sect, head, cyl, track;
390
391 /* The request -may- be too large for CHS addressing. */
392 if (!lba_28_ok(block, n_block))
393 return -ERANGE;
394
395 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
396 return -EINVAL;
397
398 /* Convert LBA to CHS */
399 track = (u32)block / dev->sectors;
400 cyl = track / dev->heads;
401 head = track % dev->heads;
402 sect = (u32)block % dev->sectors + 1;
403
404 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
405 (u32)block, track, cyl, head, sect);
406
407 /* Check whether the converted CHS can fit.
408 Cylinder: 0-65535
409 Head: 0-15
410 Sector: 1-255*/
411 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
412 return -ERANGE;
413
414 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
415 tf->lbal = sect;
416 tf->lbam = cyl;
417 tf->lbah = cyl >> 8;
418 tf->device |= head;
419 }
420
421 return 0;
422}
423
cb95d562
TH
424/**
425 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
426 * @pio_mask: pio_mask
427 * @mwdma_mask: mwdma_mask
428 * @udma_mask: udma_mask
429 *
430 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
431 * unsigned int xfer_mask.
432 *
433 * LOCKING:
434 * None.
435 *
436 * RETURNS:
437 * Packed xfer_mask.
438 */
439static unsigned int ata_pack_xfermask(unsigned int pio_mask,
440 unsigned int mwdma_mask,
441 unsigned int udma_mask)
442{
443 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
444 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
445 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
446}
447
c0489e4e
TH
448/**
449 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
450 * @xfer_mask: xfer_mask to unpack
451 * @pio_mask: resulting pio_mask
452 * @mwdma_mask: resulting mwdma_mask
453 * @udma_mask: resulting udma_mask
454 *
455 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
456 * Any NULL distination masks will be ignored.
457 */
458static void ata_unpack_xfermask(unsigned int xfer_mask,
459 unsigned int *pio_mask,
460 unsigned int *mwdma_mask,
461 unsigned int *udma_mask)
462{
463 if (pio_mask)
464 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
465 if (mwdma_mask)
466 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
467 if (udma_mask)
468 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
469}
470
cb95d562 471static const struct ata_xfer_ent {
be9a50c8 472 int shift, bits;
cb95d562
TH
473 u8 base;
474} ata_xfer_tbl[] = {
475 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
476 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
477 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
478 { -1, },
479};
480
481/**
482 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
483 * @xfer_mask: xfer_mask of interest
484 *
485 * Return matching XFER_* value for @xfer_mask. Only the highest
486 * bit of @xfer_mask is considered.
487 *
488 * LOCKING:
489 * None.
490 *
491 * RETURNS:
492 * Matching XFER_* value, 0 if no match found.
493 */
494static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
495{
496 int highbit = fls(xfer_mask) - 1;
497 const struct ata_xfer_ent *ent;
498
499 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
500 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
501 return ent->base + highbit - ent->shift;
502 return 0;
503}
504
505/**
506 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
507 * @xfer_mode: XFER_* of interest
508 *
509 * Return matching xfer_mask for @xfer_mode.
510 *
511 * LOCKING:
512 * None.
513 *
514 * RETURNS:
515 * Matching xfer_mask, 0 if no match found.
516 */
517static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
518{
519 const struct ata_xfer_ent *ent;
520
521 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
522 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
523 return 1 << (ent->shift + xfer_mode - ent->base);
524 return 0;
525}
526
527/**
528 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
529 * @xfer_mode: XFER_* of interest
530 *
531 * Return matching xfer_shift for @xfer_mode.
532 *
533 * LOCKING:
534 * None.
535 *
536 * RETURNS:
537 * Matching xfer_shift, -1 if no match found.
538 */
539static int ata_xfer_mode2shift(unsigned int xfer_mode)
540{
541 const struct ata_xfer_ent *ent;
542
543 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
544 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
545 return ent->shift;
546 return -1;
547}
548
1da177e4 549/**
1da7b0d0
TH
550 * ata_mode_string - convert xfer_mask to string
551 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
552 *
553 * Determine string which represents the highest speed
1da7b0d0 554 * (highest bit in @modemask).
1da177e4
LT
555 *
556 * LOCKING:
557 * None.
558 *
559 * RETURNS:
560 * Constant C string representing highest speed listed in
1da7b0d0 561 * @mode_mask, or the constant C string "<n/a>".
1da177e4 562 */
1da7b0d0 563static const char *ata_mode_string(unsigned int xfer_mask)
1da177e4 564{
75f554bc
TH
565 static const char * const xfer_mode_str[] = {
566 "PIO0",
567 "PIO1",
568 "PIO2",
569 "PIO3",
570 "PIO4",
b352e57d
AC
571 "PIO5",
572 "PIO6",
75f554bc
TH
573 "MWDMA0",
574 "MWDMA1",
575 "MWDMA2",
b352e57d
AC
576 "MWDMA3",
577 "MWDMA4",
75f554bc
TH
578 "UDMA/16",
579 "UDMA/25",
580 "UDMA/33",
581 "UDMA/44",
582 "UDMA/66",
583 "UDMA/100",
584 "UDMA/133",
585 "UDMA7",
586 };
1da7b0d0 587 int highbit;
1da177e4 588
1da7b0d0
TH
589 highbit = fls(xfer_mask) - 1;
590 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
591 return xfer_mode_str[highbit];
1da177e4 592 return "<n/a>";
1da177e4
LT
593}
594
4c360c81
TH
595static const char *sata_spd_string(unsigned int spd)
596{
597 static const char * const spd_str[] = {
598 "1.5 Gbps",
599 "3.0 Gbps",
600 };
601
602 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
603 return "<unknown>";
604 return spd_str[spd - 1];
605}
606
3373efd8 607void ata_dev_disable(struct ata_device *dev)
0b8efb0a 608{
0dd4b21f 609 if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
f15a1daf 610 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
4ae72a1e
TH
611 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
612 ATA_DNXFER_QUIET);
0b8efb0a
TH
613 dev->class++;
614 }
615}
616
1da177e4 617/**
0d5ff566 618 * ata_devchk - PATA device presence detection
1da177e4
LT
619 * @ap: ATA channel to examine
620 * @device: Device to examine (starting at zero)
621 *
622 * This technique was originally described in
623 * Hale Landis's ATADRVR (www.ata-atapi.com), and
624 * later found its way into the ATA/ATAPI spec.
625 *
626 * Write a pattern to the ATA shadow registers,
627 * and if a device is present, it will respond by
628 * correctly storing and echoing back the
629 * ATA shadow register contents.
630 *
631 * LOCKING:
632 * caller.
633 */
634
0d5ff566 635static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1da177e4
LT
636{
637 struct ata_ioports *ioaddr = &ap->ioaddr;
638 u8 nsect, lbal;
639
640 ap->ops->dev_select(ap, device);
641
0d5ff566
TH
642 iowrite8(0x55, ioaddr->nsect_addr);
643 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 644
0d5ff566
TH
645 iowrite8(0xaa, ioaddr->nsect_addr);
646 iowrite8(0x55, ioaddr->lbal_addr);
1da177e4 647
0d5ff566
TH
648 iowrite8(0x55, ioaddr->nsect_addr);
649 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 650
0d5ff566
TH
651 nsect = ioread8(ioaddr->nsect_addr);
652 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
653
654 if ((nsect == 0x55) && (lbal == 0xaa))
655 return 1; /* we found a device */
656
657 return 0; /* nothing found */
658}
659
1da177e4
LT
660/**
661 * ata_dev_classify - determine device type based on ATA-spec signature
662 * @tf: ATA taskfile register set for device to be identified
663 *
664 * Determine from taskfile register contents whether a device is
665 * ATA or ATAPI, as per "Signature and persistence" section
666 * of ATA/PI spec (volume 1, sect 5.14).
667 *
668 * LOCKING:
669 * None.
670 *
671 * RETURNS:
672 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
673 * the event of failure.
674 */
675
057ace5e 676unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
677{
678 /* Apple's open source Darwin code hints that some devices only
679 * put a proper signature into the LBA mid/high registers,
680 * So, we only check those. It's sufficient for uniqueness.
681 */
682
683 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
684 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
685 DPRINTK("found ATA device by sig\n");
686 return ATA_DEV_ATA;
687 }
688
689 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
690 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
691 DPRINTK("found ATAPI device by sig\n");
692 return ATA_DEV_ATAPI;
693 }
694
695 DPRINTK("unknown device\n");
696 return ATA_DEV_UNKNOWN;
697}
698
699/**
700 * ata_dev_try_classify - Parse returned ATA device signature
701 * @ap: ATA channel to examine
702 * @device: Device to examine (starting at zero)
b4dc7623 703 * @r_err: Value of error register on completion
1da177e4
LT
704 *
705 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
706 * an ATA/ATAPI-defined set of values is placed in the ATA
707 * shadow registers, indicating the results of device detection
708 * and diagnostics.
709 *
710 * Select the ATA device, and read the values from the ATA shadow
711 * registers. Then parse according to the Error register value,
712 * and the spec-defined values examined by ata_dev_classify().
713 *
714 * LOCKING:
715 * caller.
b4dc7623
TH
716 *
717 * RETURNS:
718 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4
LT
719 */
720
a619f981 721unsigned int
b4dc7623 722ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
1da177e4 723{
1da177e4
LT
724 struct ata_taskfile tf;
725 unsigned int class;
726 u8 err;
727
728 ap->ops->dev_select(ap, device);
729
730 memset(&tf, 0, sizeof(tf));
731
1da177e4 732 ap->ops->tf_read(ap, &tf);
0169e284 733 err = tf.feature;
b4dc7623
TH
734 if (r_err)
735 *r_err = err;
1da177e4 736
93590859
AC
737 /* see if device passed diags: if master then continue and warn later */
738 if (err == 0 && device == 0)
739 /* diagnostic fail : do nothing _YET_ */
740 ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
741 else if (err == 1)
1da177e4
LT
742 /* do nothing */ ;
743 else if ((device == 0) && (err == 0x81))
744 /* do nothing */ ;
745 else
b4dc7623 746 return ATA_DEV_NONE;
1da177e4 747
b4dc7623 748 /* determine if device is ATA or ATAPI */
1da177e4 749 class = ata_dev_classify(&tf);
b4dc7623 750
1da177e4 751 if (class == ATA_DEV_UNKNOWN)
b4dc7623 752 return ATA_DEV_NONE;
1da177e4 753 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
b4dc7623
TH
754 return ATA_DEV_NONE;
755 return class;
1da177e4
LT
756}
757
758/**
6a62a04d 759 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
760 * @id: IDENTIFY DEVICE results we will examine
761 * @s: string into which data is output
762 * @ofs: offset into identify device page
763 * @len: length of string to return. must be an even number.
764 *
765 * The strings in the IDENTIFY DEVICE page are broken up into
766 * 16-bit chunks. Run through the string, and output each
767 * 8-bit chunk linearly, regardless of platform.
768 *
769 * LOCKING:
770 * caller.
771 */
772
6a62a04d
TH
773void ata_id_string(const u16 *id, unsigned char *s,
774 unsigned int ofs, unsigned int len)
1da177e4
LT
775{
776 unsigned int c;
777
778 while (len > 0) {
779 c = id[ofs] >> 8;
780 *s = c;
781 s++;
782
783 c = id[ofs] & 0xff;
784 *s = c;
785 s++;
786
787 ofs++;
788 len -= 2;
789 }
790}
791
0e949ff3 792/**
6a62a04d 793 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
794 * @id: IDENTIFY DEVICE results we will examine
795 * @s: string into which data is output
796 * @ofs: offset into identify device page
797 * @len: length of string to return. must be an odd number.
798 *
6a62a04d 799 * This function is identical to ata_id_string except that it
0e949ff3
TH
800 * trims trailing spaces and terminates the resulting string with
801 * null. @len must be actual maximum length (even number) + 1.
802 *
803 * LOCKING:
804 * caller.
805 */
6a62a04d
TH
806void ata_id_c_string(const u16 *id, unsigned char *s,
807 unsigned int ofs, unsigned int len)
0e949ff3
TH
808{
809 unsigned char *p;
810
811 WARN_ON(!(len & 1));
812
6a62a04d 813 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
814
815 p = s + strnlen(s, len - 1);
816 while (p > s && p[-1] == ' ')
817 p--;
818 *p = '\0';
819}
0baab86b 820
1e999736
AC
821static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
822{
823 u64 sectors = 0;
824
825 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
826 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
827 sectors |= (tf->hob_lbal & 0xff) << 24;
828 sectors |= (tf->lbah & 0xff) << 16;
829 sectors |= (tf->lbam & 0xff) << 8;
830 sectors |= (tf->lbal & 0xff);
831
832 return ++sectors;
833}
834
835static u64 ata_tf_to_lba(struct ata_taskfile *tf)
836{
837 u64 sectors = 0;
838
839 sectors |= (tf->device & 0x0f) << 24;
840 sectors |= (tf->lbah & 0xff) << 16;
841 sectors |= (tf->lbam & 0xff) << 8;
842 sectors |= (tf->lbal & 0xff);
843
844 return ++sectors;
845}
846
847/**
848 * ata_read_native_max_address_ext - LBA48 native max query
849 * @dev: Device to query
850 *
851 * Perform an LBA48 size query upon the device in question. Return the
852 * actual LBA48 size or zero if the command fails.
853 */
854
855static u64 ata_read_native_max_address_ext(struct ata_device *dev)
856{
857 unsigned int err;
858 struct ata_taskfile tf;
859
860 ata_tf_init(dev, &tf);
861
862 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
863 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | ATA_TFLAG_ISADDR;
864 tf.protocol |= ATA_PROT_NODATA;
865 tf.device |= 0x40;
866
867 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
868 if (err)
869 return 0;
870
871 return ata_tf_to_lba48(&tf);
872}
873
874/**
875 * ata_read_native_max_address - LBA28 native max query
876 * @dev: Device to query
877 *
878 * Performa an LBA28 size query upon the device in question. Return the
879 * actual LBA28 size or zero if the command fails.
880 */
881
882static u64 ata_read_native_max_address(struct ata_device *dev)
883{
884 unsigned int err;
885 struct ata_taskfile tf;
886
887 ata_tf_init(dev, &tf);
888
889 tf.command = ATA_CMD_READ_NATIVE_MAX;
890 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
891 tf.protocol |= ATA_PROT_NODATA;
892 tf.device |= 0x40;
893
894 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
895 if (err)
896 return 0;
897
898 return ata_tf_to_lba(&tf);
899}
900
901/**
902 * ata_set_native_max_address_ext - LBA48 native max set
903 * @dev: Device to query
6b38d1d1 904 * @new_sectors: new max sectors value to set for the device
1e999736
AC
905 *
906 * Perform an LBA48 size set max upon the device in question. Return the
907 * actual LBA48 size or zero if the command fails.
908 */
909
910static u64 ata_set_native_max_address_ext(struct ata_device *dev, u64 new_sectors)
911{
912 unsigned int err;
913 struct ata_taskfile tf;
914
915 new_sectors--;
916
917 ata_tf_init(dev, &tf);
918
919 tf.command = ATA_CMD_SET_MAX_EXT;
920 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | ATA_TFLAG_ISADDR;
921 tf.protocol |= ATA_PROT_NODATA;
922 tf.device |= 0x40;
923
924 tf.lbal = (new_sectors >> 0) & 0xff;
925 tf.lbam = (new_sectors >> 8) & 0xff;
926 tf.lbah = (new_sectors >> 16) & 0xff;
927
928 tf.hob_lbal = (new_sectors >> 24) & 0xff;
929 tf.hob_lbam = (new_sectors >> 32) & 0xff;
930 tf.hob_lbah = (new_sectors >> 40) & 0xff;
931
932 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
933 if (err)
934 return 0;
935
936 return ata_tf_to_lba48(&tf);
937}
938
939/**
940 * ata_set_native_max_address - LBA28 native max set
941 * @dev: Device to query
6b38d1d1 942 * @new_sectors: new max sectors value to set for the device
1e999736
AC
943 *
944 * Perform an LBA28 size set max upon the device in question. Return the
945 * actual LBA28 size or zero if the command fails.
946 */
947
948static u64 ata_set_native_max_address(struct ata_device *dev, u64 new_sectors)
949{
950 unsigned int err;
951 struct ata_taskfile tf;
952
953 new_sectors--;
954
955 ata_tf_init(dev, &tf);
956
957 tf.command = ATA_CMD_SET_MAX;
958 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
959 tf.protocol |= ATA_PROT_NODATA;
960
961 tf.lbal = (new_sectors >> 0) & 0xff;
962 tf.lbam = (new_sectors >> 8) & 0xff;
963 tf.lbah = (new_sectors >> 16) & 0xff;
964 tf.device |= ((new_sectors >> 24) & 0x0f) | 0x40;
965
966 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
967 if (err)
968 return 0;
969
970 return ata_tf_to_lba(&tf);
971}
972
973/**
974 * ata_hpa_resize - Resize a device with an HPA set
975 * @dev: Device to resize
976 *
977 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
978 * it if required to the full size of the media. The caller must check
979 * the drive has the HPA feature set enabled.
980 */
981
982static u64 ata_hpa_resize(struct ata_device *dev)
983{
984 u64 sectors = dev->n_sectors;
985 u64 hpa_sectors;
986
987 if (ata_id_has_lba48(dev->id))
988 hpa_sectors = ata_read_native_max_address_ext(dev);
989 else
990 hpa_sectors = ata_read_native_max_address(dev);
991
992 /* if no hpa, both should be equal */
bd1d5ec6
AM
993 ata_dev_printk(dev, KERN_INFO, "%s 1: sectors = %lld, "
994 "hpa_sectors = %lld\n",
995 __FUNCTION__, (long long)sectors, (long long)hpa_sectors);
1e999736
AC
996
997 if (hpa_sectors > sectors) {
998 ata_dev_printk(dev, KERN_INFO,
999 "Host Protected Area detected:\n"
1000 "\tcurrent size: %lld sectors\n"
1001 "\tnative size: %lld sectors\n",
bd1d5ec6 1002 (long long)sectors, (long long)hpa_sectors);
1e999736
AC
1003
1004 if (ata_ignore_hpa) {
1005 if (ata_id_has_lba48(dev->id))
1006 hpa_sectors = ata_set_native_max_address_ext(dev, hpa_sectors);
1007 else
bd1d5ec6
AM
1008 hpa_sectors = ata_set_native_max_address(dev,
1009 hpa_sectors);
1e999736
AC
1010
1011 if (hpa_sectors) {
bd1d5ec6
AM
1012 ata_dev_printk(dev, KERN_INFO, "native size "
1013 "increased to %lld sectors\n",
1014 (long long)hpa_sectors);
1e999736
AC
1015 return hpa_sectors;
1016 }
1017 }
1018 }
1019 return sectors;
1020}
1021
2940740b
TH
1022static u64 ata_id_n_sectors(const u16 *id)
1023{
1024 if (ata_id_has_lba(id)) {
1025 if (ata_id_has_lba48(id))
1026 return ata_id_u64(id, 100);
1027 else
1028 return ata_id_u32(id, 60);
1029 } else {
1030 if (ata_id_current_chs_valid(id))
1031 return ata_id_u32(id, 57);
1032 else
1033 return id[1] * id[3] * id[6];
1034 }
1035}
1036
10305f0f
AC
1037/**
1038 * ata_id_to_dma_mode - Identify DMA mode from id block
1039 * @dev: device to identify
cc261267 1040 * @unknown: mode to assume if we cannot tell
10305f0f
AC
1041 *
1042 * Set up the timing values for the device based upon the identify
1043 * reported values for the DMA mode. This function is used by drivers
1044 * which rely upon firmware configured modes, but wish to report the
1045 * mode correctly when possible.
1046 *
1047 * In addition we emit similarly formatted messages to the default
1048 * ata_dev_set_mode handler, in order to provide consistency of
1049 * presentation.
1050 */
1051
1052void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
1053{
1054 unsigned int mask;
1055 u8 mode;
1056
1057 /* Pack the DMA modes */
1058 mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
1059 if (dev->id[53] & 0x04)
1060 mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
1061
1062 /* Select the mode in use */
1063 mode = ata_xfer_mask2mode(mask);
1064
1065 if (mode != 0) {
1066 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
1067 ata_mode_string(mask));
1068 } else {
1069 /* SWDMA perhaps ? */
1070 mode = unknown;
1071 ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
1072 }
1073
1074 /* Configure the device reporting */
1075 dev->xfer_mode = mode;
1076 dev->xfer_shift = ata_xfer_mode2shift(mode);
1077}
1078
0baab86b
EF
1079/**
1080 * ata_noop_dev_select - Select device 0/1 on ATA bus
1081 * @ap: ATA channel to manipulate
1082 * @device: ATA device (numbered from zero) to select
1083 *
1084 * This function performs no actual function.
1085 *
1086 * May be used as the dev_select() entry in ata_port_operations.
1087 *
1088 * LOCKING:
1089 * caller.
1090 */
1da177e4
LT
1091void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
1092{
1093}
1094
0baab86b 1095
1da177e4
LT
1096/**
1097 * ata_std_dev_select - Select device 0/1 on ATA bus
1098 * @ap: ATA channel to manipulate
1099 * @device: ATA device (numbered from zero) to select
1100 *
1101 * Use the method defined in the ATA specification to
1102 * make either device 0, or device 1, active on the
0baab86b
EF
1103 * ATA channel. Works with both PIO and MMIO.
1104 *
1105 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
1106 *
1107 * LOCKING:
1108 * caller.
1109 */
1110
1111void ata_std_dev_select (struct ata_port *ap, unsigned int device)
1112{
1113 u8 tmp;
1114
1115 if (device == 0)
1116 tmp = ATA_DEVICE_OBS;
1117 else
1118 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1119
0d5ff566 1120 iowrite8(tmp, ap->ioaddr.device_addr);
1da177e4
LT
1121 ata_pause(ap); /* needed; also flushes, for mmio */
1122}
1123
1124/**
1125 * ata_dev_select - Select device 0/1 on ATA bus
1126 * @ap: ATA channel to manipulate
1127 * @device: ATA device (numbered from zero) to select
1128 * @wait: non-zero to wait for Status register BSY bit to clear
1129 * @can_sleep: non-zero if context allows sleeping
1130 *
1131 * Use the method defined in the ATA specification to
1132 * make either device 0, or device 1, active on the
1133 * ATA channel.
1134 *
1135 * This is a high-level version of ata_std_dev_select(),
1136 * which additionally provides the services of inserting
1137 * the proper pauses and status polling, where needed.
1138 *
1139 * LOCKING:
1140 * caller.
1141 */
1142
1143void ata_dev_select(struct ata_port *ap, unsigned int device,
1144 unsigned int wait, unsigned int can_sleep)
1145{
88574551 1146 if (ata_msg_probe(ap))
44877b4e
TH
1147 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
1148 "device %u, wait %u\n", device, wait);
1da177e4
LT
1149
1150 if (wait)
1151 ata_wait_idle(ap);
1152
1153 ap->ops->dev_select(ap, device);
1154
1155 if (wait) {
1156 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
1157 msleep(150);
1158 ata_wait_idle(ap);
1159 }
1160}
1161
1162/**
1163 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 1164 * @id: IDENTIFY DEVICE page to dump
1da177e4 1165 *
0bd3300a
TH
1166 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1167 * page.
1da177e4
LT
1168 *
1169 * LOCKING:
1170 * caller.
1171 */
1172
0bd3300a 1173static inline void ata_dump_id(const u16 *id)
1da177e4
LT
1174{
1175 DPRINTK("49==0x%04x "
1176 "53==0x%04x "
1177 "63==0x%04x "
1178 "64==0x%04x "
1179 "75==0x%04x \n",
0bd3300a
TH
1180 id[49],
1181 id[53],
1182 id[63],
1183 id[64],
1184 id[75]);
1da177e4
LT
1185 DPRINTK("80==0x%04x "
1186 "81==0x%04x "
1187 "82==0x%04x "
1188 "83==0x%04x "
1189 "84==0x%04x \n",
0bd3300a
TH
1190 id[80],
1191 id[81],
1192 id[82],
1193 id[83],
1194 id[84]);
1da177e4
LT
1195 DPRINTK("88==0x%04x "
1196 "93==0x%04x\n",
0bd3300a
TH
1197 id[88],
1198 id[93]);
1da177e4
LT
1199}
1200
cb95d562
TH
1201/**
1202 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1203 * @id: IDENTIFY data to compute xfer mask from
1204 *
1205 * Compute the xfermask for this device. This is not as trivial
1206 * as it seems if we must consider early devices correctly.
1207 *
1208 * FIXME: pre IDE drive timing (do we care ?).
1209 *
1210 * LOCKING:
1211 * None.
1212 *
1213 * RETURNS:
1214 * Computed xfermask
1215 */
1216static unsigned int ata_id_xfermask(const u16 *id)
1217{
1218 unsigned int pio_mask, mwdma_mask, udma_mask;
1219
1220 /* Usual case. Word 53 indicates word 64 is valid */
1221 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1222 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1223 pio_mask <<= 3;
1224 pio_mask |= 0x7;
1225 } else {
1226 /* If word 64 isn't valid then Word 51 high byte holds
1227 * the PIO timing number for the maximum. Turn it into
1228 * a mask.
1229 */
7a0f1c8a 1230 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb
AC
1231 if (mode < 5) /* Valid PIO range */
1232 pio_mask = (2 << mode) - 1;
1233 else
1234 pio_mask = 1;
cb95d562
TH
1235
1236 /* But wait.. there's more. Design your standards by
1237 * committee and you too can get a free iordy field to
1238 * process. However its the speeds not the modes that
1239 * are supported... Note drivers using the timing API
1240 * will get this right anyway
1241 */
1242 }
1243
1244 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1245
b352e57d
AC
1246 if (ata_id_is_cfa(id)) {
1247 /*
1248 * Process compact flash extended modes
1249 */
1250 int pio = id[163] & 0x7;
1251 int dma = (id[163] >> 3) & 7;
1252
1253 if (pio)
1254 pio_mask |= (1 << 5);
1255 if (pio > 1)
1256 pio_mask |= (1 << 6);
1257 if (dma)
1258 mwdma_mask |= (1 << 3);
1259 if (dma > 1)
1260 mwdma_mask |= (1 << 4);
1261 }
1262
fb21f0d0
TH
1263 udma_mask = 0;
1264 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1265 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1266
1267 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1268}
1269
86e45b6b
TH
1270/**
1271 * ata_port_queue_task - Queue port_task
1272 * @ap: The ata_port to queue port_task for
e2a7f77a 1273 * @fn: workqueue function to be scheduled
65f27f38 1274 * @data: data for @fn to use
e2a7f77a 1275 * @delay: delay time for workqueue function
86e45b6b
TH
1276 *
1277 * Schedule @fn(@data) for execution after @delay jiffies using
1278 * port_task. There is one port_task per port and it's the
1279 * user(low level driver)'s responsibility to make sure that only
1280 * one task is active at any given time.
1281 *
1282 * libata core layer takes care of synchronization between
1283 * port_task and EH. ata_port_queue_task() may be ignored for EH
1284 * synchronization.
1285 *
1286 * LOCKING:
1287 * Inherited from caller.
1288 */
65f27f38 1289void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
86e45b6b
TH
1290 unsigned long delay)
1291{
1292 int rc;
1293
b51e9e5d 1294 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
86e45b6b
TH
1295 return;
1296
65f27f38
DH
1297 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1298 ap->port_task_data = data;
86e45b6b 1299
52bad64d 1300 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1301
1302 /* rc == 0 means that another user is using port task */
1303 WARN_ON(rc == 0);
1304}
1305
1306/**
1307 * ata_port_flush_task - Flush port_task
1308 * @ap: The ata_port to flush port_task for
1309 *
1310 * After this function completes, port_task is guranteed not to
1311 * be running or scheduled.
1312 *
1313 * LOCKING:
1314 * Kernel thread context (may sleep)
1315 */
1316void ata_port_flush_task(struct ata_port *ap)
1317{
1318 unsigned long flags;
1319
1320 DPRINTK("ENTER\n");
1321
ba6a1308 1322 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 1323 ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK;
ba6a1308 1324 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b
TH
1325
1326 DPRINTK("flush #1\n");
28e53bdd 1327 cancel_work_sync(&ap->port_task.work); /* akpm: seems unneeded */
86e45b6b
TH
1328
1329 /*
1330 * At this point, if a task is running, it's guaranteed to see
1331 * the FLUSH flag; thus, it will never queue pio tasks again.
1332 * Cancel and flush.
1333 */
1334 if (!cancel_delayed_work(&ap->port_task)) {
0dd4b21f 1335 if (ata_msg_ctl(ap))
88574551
TH
1336 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
1337 __FUNCTION__);
28e53bdd 1338 cancel_work_sync(&ap->port_task.work);
86e45b6b
TH
1339 }
1340
ba6a1308 1341 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 1342 ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK;
ba6a1308 1343 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b 1344
0dd4b21f
BP
1345 if (ata_msg_ctl(ap))
1346 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
86e45b6b
TH
1347}
1348
7102d230 1349static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1350{
77853bf2 1351 struct completion *waiting = qc->private_data;
a2a7a662 1352
a2a7a662 1353 complete(waiting);
a2a7a662
TH
1354}
1355
1356/**
2432697b 1357 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1358 * @dev: Device to which the command is sent
1359 * @tf: Taskfile registers for the command and the result
d69cf37d 1360 * @cdb: CDB for packet command
a2a7a662 1361 * @dma_dir: Data tranfer direction of the command
2432697b
TH
1362 * @sg: sg list for the data buffer of the command
1363 * @n_elem: Number of sg entries
a2a7a662
TH
1364 *
1365 * Executes libata internal command with timeout. @tf contains
1366 * command on entry and result on return. Timeout and error
1367 * conditions are reported via return value. No recovery action
1368 * is taken after a command times out. It's caller's duty to
1369 * clean up after timeout.
1370 *
1371 * LOCKING:
1372 * None. Should be called with kernel context, might sleep.
551e8889
TH
1373 *
1374 * RETURNS:
1375 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1376 */
2432697b
TH
1377unsigned ata_exec_internal_sg(struct ata_device *dev,
1378 struct ata_taskfile *tf, const u8 *cdb,
1379 int dma_dir, struct scatterlist *sg,
1380 unsigned int n_elem)
a2a7a662 1381{
3373efd8 1382 struct ata_port *ap = dev->ap;
a2a7a662
TH
1383 u8 command = tf->command;
1384 struct ata_queued_cmd *qc;
2ab7db1f 1385 unsigned int tag, preempted_tag;
dedaf2b0 1386 u32 preempted_sactive, preempted_qc_active;
60be6b9a 1387 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1388 unsigned long flags;
77853bf2 1389 unsigned int err_mask;
d95a717f 1390 int rc;
a2a7a662 1391
ba6a1308 1392 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1393
e3180499 1394 /* no internal command while frozen */
b51e9e5d 1395 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1396 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1397 return AC_ERR_SYSTEM;
1398 }
1399
2ab7db1f 1400 /* initialize internal qc */
a2a7a662 1401
2ab7db1f
TH
1402 /* XXX: Tag 0 is used for drivers with legacy EH as some
1403 * drivers choke if any other tag is given. This breaks
1404 * ata_tag_internal() test for those drivers. Don't use new
1405 * EH stuff without converting to it.
1406 */
1407 if (ap->ops->error_handler)
1408 tag = ATA_TAG_INTERNAL;
1409 else
1410 tag = 0;
1411
6cec4a39 1412 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1413 BUG();
f69499f4 1414 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1415
1416 qc->tag = tag;
1417 qc->scsicmd = NULL;
1418 qc->ap = ap;
1419 qc->dev = dev;
1420 ata_qc_reinit(qc);
1421
1422 preempted_tag = ap->active_tag;
dedaf2b0
TH
1423 preempted_sactive = ap->sactive;
1424 preempted_qc_active = ap->qc_active;
2ab7db1f 1425 ap->active_tag = ATA_TAG_POISON;
dedaf2b0
TH
1426 ap->sactive = 0;
1427 ap->qc_active = 0;
2ab7db1f
TH
1428
1429 /* prepare & issue qc */
a2a7a662 1430 qc->tf = *tf;
d69cf37d
TH
1431 if (cdb)
1432 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1433 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1434 qc->dma_dir = dma_dir;
1435 if (dma_dir != DMA_NONE) {
2432697b
TH
1436 unsigned int i, buflen = 0;
1437
1438 for (i = 0; i < n_elem; i++)
1439 buflen += sg[i].length;
1440
1441 ata_sg_init(qc, sg, n_elem);
49c80429 1442 qc->nbytes = buflen;
a2a7a662
TH
1443 }
1444
77853bf2 1445 qc->private_data = &wait;
a2a7a662
TH
1446 qc->complete_fn = ata_qc_complete_internal;
1447
8e0e694a 1448 ata_qc_issue(qc);
a2a7a662 1449
ba6a1308 1450 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1451
a8601e5f 1452 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
d95a717f
TH
1453
1454 ata_port_flush_task(ap);
41ade50c 1455
d95a717f 1456 if (!rc) {
ba6a1308 1457 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1458
1459 /* We're racing with irq here. If we lose, the
1460 * following test prevents us from completing the qc
d95a717f
TH
1461 * twice. If we win, the port is frozen and will be
1462 * cleaned up by ->post_internal_cmd().
a2a7a662 1463 */
77853bf2 1464 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1465 qc->err_mask |= AC_ERR_TIMEOUT;
1466
1467 if (ap->ops->error_handler)
1468 ata_port_freeze(ap);
1469 else
1470 ata_qc_complete(qc);
f15a1daf 1471
0dd4b21f
BP
1472 if (ata_msg_warn(ap))
1473 ata_dev_printk(dev, KERN_WARNING,
88574551 1474 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1475 }
1476
ba6a1308 1477 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1478 }
1479
d95a717f
TH
1480 /* do post_internal_cmd */
1481 if (ap->ops->post_internal_cmd)
1482 ap->ops->post_internal_cmd(qc);
1483
a51d644a
TH
1484 /* perform minimal error analysis */
1485 if (qc->flags & ATA_QCFLAG_FAILED) {
1486 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1487 qc->err_mask |= AC_ERR_DEV;
1488
1489 if (!qc->err_mask)
1490 qc->err_mask |= AC_ERR_OTHER;
1491
1492 if (qc->err_mask & ~AC_ERR_OTHER)
1493 qc->err_mask &= ~AC_ERR_OTHER;
d95a717f
TH
1494 }
1495
15869303 1496 /* finish up */
ba6a1308 1497 spin_lock_irqsave(ap->lock, flags);
15869303 1498
e61e0672 1499 *tf = qc->result_tf;
77853bf2
TH
1500 err_mask = qc->err_mask;
1501
1502 ata_qc_free(qc);
2ab7db1f 1503 ap->active_tag = preempted_tag;
dedaf2b0
TH
1504 ap->sactive = preempted_sactive;
1505 ap->qc_active = preempted_qc_active;
77853bf2 1506
1f7dd3e9
TH
1507 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1508 * Until those drivers are fixed, we detect the condition
1509 * here, fail the command with AC_ERR_SYSTEM and reenable the
1510 * port.
1511 *
1512 * Note that this doesn't change any behavior as internal
1513 * command failure results in disabling the device in the
1514 * higher layer for LLDDs without new reset/EH callbacks.
1515 *
1516 * Kill the following code as soon as those drivers are fixed.
1517 */
198e0fed 1518 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1519 err_mask |= AC_ERR_SYSTEM;
1520 ata_port_probe(ap);
1521 }
1522
ba6a1308 1523 spin_unlock_irqrestore(ap->lock, flags);
15869303 1524
77853bf2 1525 return err_mask;
a2a7a662
TH
1526}
1527
2432697b 1528/**
33480a0e 1529 * ata_exec_internal - execute libata internal command
2432697b
TH
1530 * @dev: Device to which the command is sent
1531 * @tf: Taskfile registers for the command and the result
1532 * @cdb: CDB for packet command
1533 * @dma_dir: Data tranfer direction of the command
1534 * @buf: Data buffer of the command
1535 * @buflen: Length of data buffer
1536 *
1537 * Wrapper around ata_exec_internal_sg() which takes simple
1538 * buffer instead of sg list.
1539 *
1540 * LOCKING:
1541 * None. Should be called with kernel context, might sleep.
1542 *
1543 * RETURNS:
1544 * Zero on success, AC_ERR_* mask on failure
1545 */
1546unsigned ata_exec_internal(struct ata_device *dev,
1547 struct ata_taskfile *tf, const u8 *cdb,
1548 int dma_dir, void *buf, unsigned int buflen)
1549{
33480a0e
TH
1550 struct scatterlist *psg = NULL, sg;
1551 unsigned int n_elem = 0;
2432697b 1552
33480a0e
TH
1553 if (dma_dir != DMA_NONE) {
1554 WARN_ON(!buf);
1555 sg_init_one(&sg, buf, buflen);
1556 psg = &sg;
1557 n_elem++;
1558 }
2432697b 1559
33480a0e 1560 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
2432697b
TH
1561}
1562
977e6b9f
TH
1563/**
1564 * ata_do_simple_cmd - execute simple internal command
1565 * @dev: Device to which the command is sent
1566 * @cmd: Opcode to execute
1567 *
1568 * Execute a 'simple' command, that only consists of the opcode
1569 * 'cmd' itself, without filling any other registers
1570 *
1571 * LOCKING:
1572 * Kernel thread context (may sleep).
1573 *
1574 * RETURNS:
1575 * Zero on success, AC_ERR_* mask on failure
e58eb583 1576 */
77b08fb5 1577unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1578{
1579 struct ata_taskfile tf;
e58eb583
TH
1580
1581 ata_tf_init(dev, &tf);
1582
1583 tf.command = cmd;
1584 tf.flags |= ATA_TFLAG_DEVICE;
1585 tf.protocol = ATA_PROT_NODATA;
1586
977e6b9f 1587 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
e58eb583
TH
1588}
1589
1bc4ccff
AC
1590/**
1591 * ata_pio_need_iordy - check if iordy needed
1592 * @adev: ATA device
1593 *
1594 * Check if the current speed of the device requires IORDY. Used
1595 * by various controllers for chip configuration.
1596 */
432729f0 1597
1bc4ccff
AC
1598unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1599{
432729f0
AC
1600 /* Controller doesn't support IORDY. Probably a pointless check
1601 as the caller should know this */
1602 if (adev->ap->flags & ATA_FLAG_NO_IORDY)
1bc4ccff 1603 return 0;
432729f0
AC
1604 /* PIO3 and higher it is mandatory */
1605 if (adev->pio_mode > XFER_PIO_2)
1606 return 1;
1607 /* We turn it on when possible */
1608 if (ata_id_has_iordy(adev->id))
1bc4ccff 1609 return 1;
432729f0
AC
1610 return 0;
1611}
2e9edbf8 1612
432729f0
AC
1613/**
1614 * ata_pio_mask_no_iordy - Return the non IORDY mask
1615 * @adev: ATA device
1616 *
1617 * Compute the highest mode possible if we are not using iordy. Return
1618 * -1 if no iordy mode is available.
1619 */
1620
1621static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1622{
1bc4ccff 1623 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1bc4ccff 1624 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
432729f0 1625 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1bc4ccff
AC
1626 /* Is the speed faster than the drive allows non IORDY ? */
1627 if (pio) {
1628 /* This is cycle times not frequency - watch the logic! */
1629 if (pio > 240) /* PIO2 is 240nS per cycle */
432729f0
AC
1630 return 3 << ATA_SHIFT_PIO;
1631 return 7 << ATA_SHIFT_PIO;
1bc4ccff
AC
1632 }
1633 }
432729f0 1634 return 3 << ATA_SHIFT_PIO;
1bc4ccff
AC
1635}
1636
1da177e4 1637/**
49016aca 1638 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1639 * @dev: target device
1640 * @p_class: pointer to class of the target device (may be changed)
bff04647 1641 * @flags: ATA_READID_* flags
fe635c7e 1642 * @id: buffer to read IDENTIFY data into
1da177e4 1643 *
49016aca
TH
1644 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1645 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1646 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1647 * for pre-ATA4 drives.
1da177e4
LT
1648 *
1649 * LOCKING:
49016aca
TH
1650 * Kernel thread context (may sleep)
1651 *
1652 * RETURNS:
1653 * 0 on success, -errno otherwise.
1da177e4 1654 */
a9beec95 1655int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1656 unsigned int flags, u16 *id)
1da177e4 1657{
3373efd8 1658 struct ata_port *ap = dev->ap;
49016aca 1659 unsigned int class = *p_class;
a0123703 1660 struct ata_taskfile tf;
49016aca
TH
1661 unsigned int err_mask = 0;
1662 const char *reason;
54936f8b 1663 int may_fallback = 1, tried_spinup = 0;
49016aca 1664 int rc;
1da177e4 1665
0dd4b21f 1666 if (ata_msg_ctl(ap))
44877b4e 1667 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1668
49016aca 1669 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
49016aca 1670 retry:
3373efd8 1671 ata_tf_init(dev, &tf);
a0123703 1672
49016aca
TH
1673 switch (class) {
1674 case ATA_DEV_ATA:
a0123703 1675 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1676 break;
1677 case ATA_DEV_ATAPI:
a0123703 1678 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1679 break;
1680 default:
1681 rc = -ENODEV;
1682 reason = "unsupported class";
1683 goto err_out;
1da177e4
LT
1684 }
1685
a0123703 1686 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1687
1688 /* Some devices choke if TF registers contain garbage. Make
1689 * sure those are properly initialized.
1690 */
1691 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1692
1693 /* Device presence detection is unreliable on some
1694 * controllers. Always poll IDENTIFY if available.
1695 */
1696 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 1697
3373efd8 1698 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
49016aca 1699 id, sizeof(id[0]) * ATA_ID_WORDS);
a0123703 1700 if (err_mask) {
800b3996 1701 if (err_mask & AC_ERR_NODEV_HINT) {
55a8e2c8 1702 DPRINTK("ata%u.%d: NODEV after polling detection\n",
44877b4e 1703 ap->print_id, dev->devno);
55a8e2c8
TH
1704 return -ENOENT;
1705 }
1706
54936f8b
TH
1707 /* Device or controller might have reported the wrong
1708 * device class. Give a shot at the other IDENTIFY if
1709 * the current one is aborted by the device.
1710 */
1711 if (may_fallback &&
1712 (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1713 may_fallback = 0;
1714
1715 if (class == ATA_DEV_ATA)
1716 class = ATA_DEV_ATAPI;
1717 else
1718 class = ATA_DEV_ATA;
1719 goto retry;
1720 }
1721
49016aca
TH
1722 rc = -EIO;
1723 reason = "I/O error";
1da177e4
LT
1724 goto err_out;
1725 }
1726
54936f8b
TH
1727 /* Falling back doesn't make sense if ID data was read
1728 * successfully at least once.
1729 */
1730 may_fallback = 0;
1731
49016aca 1732 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1733
49016aca 1734 /* sanity check */
a4f5749b
TH
1735 rc = -EINVAL;
1736 reason = "device reports illegal type";
1737
1738 if (class == ATA_DEV_ATA) {
1739 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1740 goto err_out;
1741 } else {
1742 if (ata_id_is_ata(id))
1743 goto err_out;
49016aca
TH
1744 }
1745
169439c2
ML
1746 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1747 tried_spinup = 1;
1748 /*
1749 * Drive powered-up in standby mode, and requires a specific
1750 * SET_FEATURES spin-up subcommand before it will accept
1751 * anything other than the original IDENTIFY command.
1752 */
1753 ata_tf_init(dev, &tf);
1754 tf.command = ATA_CMD_SET_FEATURES;
1755 tf.feature = SETFEATURES_SPINUP;
1756 tf.protocol = ATA_PROT_NODATA;
1757 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1758 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1759 if (err_mask) {
1760 rc = -EIO;
1761 reason = "SPINUP failed";
1762 goto err_out;
1763 }
1764 /*
1765 * If the drive initially returned incomplete IDENTIFY info,
1766 * we now must reissue the IDENTIFY command.
1767 */
1768 if (id[2] == 0x37c8)
1769 goto retry;
1770 }
1771
bff04647 1772 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
1773 /*
1774 * The exact sequence expected by certain pre-ATA4 drives is:
1775 * SRST RESET
1776 * IDENTIFY
1777 * INITIALIZE DEVICE PARAMETERS
1778 * anything else..
1779 * Some drives were very specific about that exact sequence.
1780 */
1781 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 1782 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
1783 if (err_mask) {
1784 rc = -EIO;
1785 reason = "INIT_DEV_PARAMS failed";
1786 goto err_out;
1787 }
1788
1789 /* current CHS translation info (id[53-58]) might be
1790 * changed. reread the identify device info.
1791 */
bff04647 1792 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
1793 goto retry;
1794 }
1795 }
1796
1797 *p_class = class;
fe635c7e 1798
49016aca
TH
1799 return 0;
1800
1801 err_out:
88574551 1802 if (ata_msg_warn(ap))
0dd4b21f 1803 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 1804 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
1805 return rc;
1806}
1807
3373efd8 1808static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 1809{
3373efd8 1810 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
1811}
1812
a6e6ce8e
TH
1813static void ata_dev_config_ncq(struct ata_device *dev,
1814 char *desc, size_t desc_sz)
1815{
1816 struct ata_port *ap = dev->ap;
1817 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1818
1819 if (!ata_id_has_ncq(dev->id)) {
1820 desc[0] = '\0';
1821 return;
1822 }
6919a0a6
AC
1823 if (ata_device_blacklisted(dev) & ATA_HORKAGE_NONCQ) {
1824 snprintf(desc, desc_sz, "NCQ (not used)");
1825 return;
1826 }
a6e6ce8e 1827 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 1828 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
1829 dev->flags |= ATA_DFLAG_NCQ;
1830 }
1831
1832 if (hdepth >= ddepth)
1833 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1834 else
1835 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1836}
1837
49016aca 1838/**
ffeae418 1839 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
1840 * @dev: Target device to configure
1841 *
1842 * Configure @dev according to @dev->id. Generic and low-level
1843 * driver specific fixups are also applied.
49016aca
TH
1844 *
1845 * LOCKING:
ffeae418
TH
1846 * Kernel thread context (may sleep)
1847 *
1848 * RETURNS:
1849 * 0 on success, -errno otherwise
49016aca 1850 */
efdaedc4 1851int ata_dev_configure(struct ata_device *dev)
49016aca 1852{
3373efd8 1853 struct ata_port *ap = dev->ap;
efdaedc4 1854 int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO;
1148c3a7 1855 const u16 *id = dev->id;
ff8854b2 1856 unsigned int xfer_mask;
b352e57d 1857 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
1858 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
1859 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 1860 int rc;
49016aca 1861
0dd4b21f 1862 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
44877b4e
TH
1863 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
1864 __FUNCTION__);
ffeae418 1865 return 0;
49016aca
TH
1866 }
1867
0dd4b21f 1868 if (ata_msg_probe(ap))
44877b4e 1869 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1870
08573a86 1871 /* set _SDD */
3a32a8e9 1872 rc = ata_acpi_push_id(dev);
08573a86
KCA
1873 if (rc) {
1874 ata_dev_printk(dev, KERN_WARNING, "failed to set _SDD(%d)\n",
1875 rc);
1876 }
1877
1878 /* retrieve and execute the ATA task file of _GTF */
1879 ata_acpi_exec_tfs(ap);
1880
c39f5ebe 1881 /* print device capabilities */
0dd4b21f 1882 if (ata_msg_probe(ap))
88574551
TH
1883 ata_dev_printk(dev, KERN_DEBUG,
1884 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1885 "85:%04x 86:%04x 87:%04x 88:%04x\n",
0dd4b21f 1886 __FUNCTION__,
f15a1daf
TH
1887 id[49], id[82], id[83], id[84],
1888 id[85], id[86], id[87], id[88]);
c39f5ebe 1889
208a9933 1890 /* initialize to-be-configured parameters */
ea1dd4e1 1891 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
1892 dev->max_sectors = 0;
1893 dev->cdb_len = 0;
1894 dev->n_sectors = 0;
1895 dev->cylinders = 0;
1896 dev->heads = 0;
1897 dev->sectors = 0;
1898
1da177e4
LT
1899 /*
1900 * common ATA, ATAPI feature tests
1901 */
1902
ff8854b2 1903 /* find max transfer mode; for printk only */
1148c3a7 1904 xfer_mask = ata_id_xfermask(id);
1da177e4 1905
0dd4b21f
BP
1906 if (ata_msg_probe(ap))
1907 ata_dump_id(id);
1da177e4
LT
1908
1909 /* ATA-specific feature tests */
1910 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
1911 if (ata_id_is_cfa(id)) {
1912 if (id[162] & 1) /* CPRM may make this media unusable */
44877b4e
TH
1913 ata_dev_printk(dev, KERN_WARNING,
1914 "supports DRM functions and may "
1915 "not be fully accessable.\n");
b352e57d
AC
1916 snprintf(revbuf, 7, "CFA");
1917 }
1918 else
1919 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1920
1148c3a7 1921 dev->n_sectors = ata_id_n_sectors(id);
1e999736 1922 dev->n_sectors_boot = dev->n_sectors;
2940740b 1923
3f64f565 1924 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
591a6e8e 1925 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
3f64f565
EM
1926 sizeof(fwrevbuf));
1927
591a6e8e 1928 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
3f64f565
EM
1929 sizeof(modelbuf));
1930
1931 if (dev->id[59] & 0x100)
1932 dev->multi_count = dev->id[59] & 0xff;
1933
1148c3a7 1934 if (ata_id_has_lba(id)) {
4c2d721a 1935 const char *lba_desc;
a6e6ce8e 1936 char ncq_desc[20];
8bf62ece 1937
4c2d721a
TH
1938 lba_desc = "LBA";
1939 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 1940 if (ata_id_has_lba48(id)) {
8bf62ece 1941 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 1942 lba_desc = "LBA48";
6fc49adb
TH
1943
1944 if (dev->n_sectors >= (1UL << 28) &&
1945 ata_id_has_flush_ext(id))
1946 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 1947 }
8bf62ece 1948
1e999736
AC
1949 if (ata_id_hpa_enabled(dev->id))
1950 dev->n_sectors = ata_hpa_resize(dev);
1951
a6e6ce8e
TH
1952 /* config NCQ */
1953 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1954
8bf62ece 1955 /* print device info to dmesg */
3f64f565
EM
1956 if (ata_msg_drv(ap) && print_info) {
1957 ata_dev_printk(dev, KERN_INFO,
1958 "%s: %s, %s, max %s\n",
1959 revbuf, modelbuf, fwrevbuf,
1960 ata_mode_string(xfer_mask));
1961 ata_dev_printk(dev, KERN_INFO,
1962 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 1963 (unsigned long long)dev->n_sectors,
3f64f565
EM
1964 dev->multi_count, lba_desc, ncq_desc);
1965 }
ffeae418 1966 } else {
8bf62ece
AL
1967 /* CHS */
1968
1969 /* Default translation */
1148c3a7
TH
1970 dev->cylinders = id[1];
1971 dev->heads = id[3];
1972 dev->sectors = id[6];
8bf62ece 1973
1148c3a7 1974 if (ata_id_current_chs_valid(id)) {
8bf62ece 1975 /* Current CHS translation is valid. */
1148c3a7
TH
1976 dev->cylinders = id[54];
1977 dev->heads = id[55];
1978 dev->sectors = id[56];
8bf62ece
AL
1979 }
1980
1981 /* print device info to dmesg */
3f64f565 1982 if (ata_msg_drv(ap) && print_info) {
88574551 1983 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
1984 "%s: %s, %s, max %s\n",
1985 revbuf, modelbuf, fwrevbuf,
1986 ata_mode_string(xfer_mask));
a84471fe 1987 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
1988 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
1989 (unsigned long long)dev->n_sectors,
1990 dev->multi_count, dev->cylinders,
1991 dev->heads, dev->sectors);
1992 }
07f6f7d0
AL
1993 }
1994
6e7846e9 1995 dev->cdb_len = 16;
1da177e4
LT
1996 }
1997
1998 /* ATAPI-specific feature tests */
2c13b7ce 1999 else if (dev->class == ATA_DEV_ATAPI) {
08a556db
AL
2000 char *cdb_intr_string = "";
2001
1148c3a7 2002 rc = atapi_cdb_len(id);
1da177e4 2003 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 2004 if (ata_msg_warn(ap))
88574551
TH
2005 ata_dev_printk(dev, KERN_WARNING,
2006 "unsupported CDB len\n");
ffeae418 2007 rc = -EINVAL;
1da177e4
LT
2008 goto err_out_nosup;
2009 }
6e7846e9 2010 dev->cdb_len = (unsigned int) rc;
1da177e4 2011
08a556db 2012 if (ata_id_cdb_intr(dev->id)) {
312f7da2 2013 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
2014 cdb_intr_string = ", CDB intr";
2015 }
312f7da2 2016
1da177e4 2017 /* print device info to dmesg */
5afc8142 2018 if (ata_msg_drv(ap) && print_info)
12436c30
TH
2019 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
2020 ata_mode_string(xfer_mask),
2021 cdb_intr_string);
1da177e4
LT
2022 }
2023
914ed354
TH
2024 /* determine max_sectors */
2025 dev->max_sectors = ATA_MAX_SECTORS;
2026 if (dev->flags & ATA_DFLAG_LBA48)
2027 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2028
93590859
AC
2029 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2030 /* Let the user know. We don't want to disallow opens for
2031 rescue purposes, or in case the vendor is just a blithering
2032 idiot */
2033 if (print_info) {
2034 ata_dev_printk(dev, KERN_WARNING,
2035"Drive reports diagnostics failure. This may indicate a drive\n");
2036 ata_dev_printk(dev, KERN_WARNING,
2037"fault or invalid emulation. Contact drive vendor for information.\n");
2038 }
2039 }
2040
4b2f3ede 2041 /* limit bridge transfers to udma5, 200 sectors */
3373efd8 2042 if (ata_dev_knobble(dev)) {
5afc8142 2043 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
2044 ata_dev_printk(dev, KERN_INFO,
2045 "applying bridge limits\n");
5a529139 2046 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
2047 dev->max_sectors = ATA_MAX_SECTORS;
2048 }
2049
18d6e9d5 2050 if (ata_device_blacklisted(dev) & ATA_HORKAGE_MAX_SEC_128)
03ec52de
TH
2051 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2052 dev->max_sectors);
18d6e9d5 2053
6f23a31d
AL
2054 /* limit ATAPI DMA to R/W commands only */
2055 if (ata_device_blacklisted(dev) & ATA_HORKAGE_DMA_RW_ONLY)
2056 dev->horkage |= ATA_HORKAGE_DMA_RW_ONLY;
2057
4b2f3ede 2058 if (ap->ops->dev_config)
cd0d3bbc 2059 ap->ops->dev_config(dev);
4b2f3ede 2060
0dd4b21f
BP
2061 if (ata_msg_probe(ap))
2062 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2063 __FUNCTION__, ata_chk_status(ap));
ffeae418 2064 return 0;
1da177e4
LT
2065
2066err_out_nosup:
0dd4b21f 2067 if (ata_msg_probe(ap))
88574551
TH
2068 ata_dev_printk(dev, KERN_DEBUG,
2069 "%s: EXIT, err\n", __FUNCTION__);
ffeae418 2070 return rc;
1da177e4
LT
2071}
2072
be0d18df 2073/**
2e41e8e6 2074 * ata_cable_40wire - return 40 wire cable type
be0d18df
AC
2075 * @ap: port
2076 *
2e41e8e6 2077 * Helper method for drivers which want to hardwire 40 wire cable
be0d18df
AC
2078 * detection.
2079 */
2080
2081int ata_cable_40wire(struct ata_port *ap)
2082{
2083 return ATA_CBL_PATA40;
2084}
2085
2086/**
2e41e8e6 2087 * ata_cable_80wire - return 80 wire cable type
be0d18df
AC
2088 * @ap: port
2089 *
2e41e8e6 2090 * Helper method for drivers which want to hardwire 80 wire cable
be0d18df
AC
2091 * detection.
2092 */
2093
2094int ata_cable_80wire(struct ata_port *ap)
2095{
2096 return ATA_CBL_PATA80;
2097}
2098
2099/**
2100 * ata_cable_unknown - return unknown PATA cable.
2101 * @ap: port
2102 *
2103 * Helper method for drivers which have no PATA cable detection.
2104 */
2105
2106int ata_cable_unknown(struct ata_port *ap)
2107{
2108 return ATA_CBL_PATA_UNK;
2109}
2110
2111/**
2112 * ata_cable_sata - return SATA cable type
2113 * @ap: port
2114 *
2115 * Helper method for drivers which have SATA cables
2116 */
2117
2118int ata_cable_sata(struct ata_port *ap)
2119{
2120 return ATA_CBL_SATA;
2121}
2122
1da177e4
LT
2123/**
2124 * ata_bus_probe - Reset and probe ATA bus
2125 * @ap: Bus to probe
2126 *
0cba632b
JG
2127 * Master ATA bus probing function. Initiates a hardware-dependent
2128 * bus reset, then attempts to identify any devices found on
2129 * the bus.
2130 *
1da177e4 2131 * LOCKING:
0cba632b 2132 * PCI/etc. bus probe sem.
1da177e4
LT
2133 *
2134 * RETURNS:
96072e69 2135 * Zero on success, negative errno otherwise.
1da177e4
LT
2136 */
2137
80289167 2138int ata_bus_probe(struct ata_port *ap)
1da177e4 2139{
28ca5c57 2140 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 2141 int tries[ATA_MAX_DEVICES];
4ae72a1e 2142 int i, rc;
e82cbdb9 2143 struct ata_device *dev;
1da177e4 2144
28ca5c57 2145 ata_port_probe(ap);
c19ba8af 2146
14d2bac1
TH
2147 for (i = 0; i < ATA_MAX_DEVICES; i++)
2148 tries[i] = ATA_PROBE_MAX_TRIES;
2149
2150 retry:
2044470c 2151 /* reset and determine device classes */
52783c5d 2152 ap->ops->phy_reset(ap);
2061a47a 2153
52783c5d
TH
2154 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2155 dev = &ap->device[i];
c19ba8af 2156
52783c5d
TH
2157 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2158 dev->class != ATA_DEV_UNKNOWN)
2159 classes[dev->devno] = dev->class;
2160 else
2161 classes[dev->devno] = ATA_DEV_NONE;
2044470c 2162
52783c5d 2163 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 2164 }
1da177e4 2165
52783c5d 2166 ata_port_probe(ap);
2044470c 2167
b6079ca4
AC
2168 /* after the reset the device state is PIO 0 and the controller
2169 state is undefined. Record the mode */
2170
2171 for (i = 0; i < ATA_MAX_DEVICES; i++)
2172 ap->device[i].pio_mode = XFER_PIO_0;
2173
f31f0cc2
JG
2174 /* read IDENTIFY page and configure devices. We have to do the identify
2175 specific sequence bass-ackwards so that PDIAG- is released by
2176 the slave device */
2177
2178 for (i = ATA_MAX_DEVICES - 1; i >= 0; i--) {
e82cbdb9 2179 dev = &ap->device[i];
28ca5c57 2180
ec573755
TH
2181 if (tries[i])
2182 dev->class = classes[i];
ffeae418 2183
14d2bac1 2184 if (!ata_dev_enabled(dev))
ffeae418 2185 continue;
ffeae418 2186
bff04647
TH
2187 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2188 dev->id);
14d2bac1
TH
2189 if (rc)
2190 goto fail;
f31f0cc2
JG
2191 }
2192
be0d18df
AC
2193 /* Now ask for the cable type as PDIAG- should have been released */
2194 if (ap->ops->cable_detect)
2195 ap->cbl = ap->ops->cable_detect(ap);
2196
f31f0cc2
JG
2197 /* After the identify sequence we can now set up the devices. We do
2198 this in the normal order so that the user doesn't get confused */
2199
2200 for(i = 0; i < ATA_MAX_DEVICES; i++) {
2201 dev = &ap->device[i];
2202 if (!ata_dev_enabled(dev))
2203 continue;
14d2bac1 2204
efdaedc4
TH
2205 ap->eh_context.i.flags |= ATA_EHI_PRINTINFO;
2206 rc = ata_dev_configure(dev);
2207 ap->eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
2208 if (rc)
2209 goto fail;
1da177e4
LT
2210 }
2211
e82cbdb9 2212 /* configure transfer mode */
3adcebb2 2213 rc = ata_set_mode(ap, &dev);
4ae72a1e 2214 if (rc)
51713d35 2215 goto fail;
1da177e4 2216
e82cbdb9
TH
2217 for (i = 0; i < ATA_MAX_DEVICES; i++)
2218 if (ata_dev_enabled(&ap->device[i]))
2219 return 0;
1da177e4 2220
e82cbdb9
TH
2221 /* no device present, disable port */
2222 ata_port_disable(ap);
1da177e4 2223 ap->ops->port_disable(ap);
96072e69 2224 return -ENODEV;
14d2bac1
TH
2225
2226 fail:
4ae72a1e
TH
2227 tries[dev->devno]--;
2228
14d2bac1
TH
2229 switch (rc) {
2230 case -EINVAL:
4ae72a1e 2231 /* eeek, something went very wrong, give up */
14d2bac1
TH
2232 tries[dev->devno] = 0;
2233 break;
4ae72a1e
TH
2234
2235 case -ENODEV:
2236 /* give it just one more chance */
2237 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 2238 case -EIO:
4ae72a1e
TH
2239 if (tries[dev->devno] == 1) {
2240 /* This is the last chance, better to slow
2241 * down than lose it.
2242 */
2243 sata_down_spd_limit(ap);
2244 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2245 }
14d2bac1
TH
2246 }
2247
4ae72a1e 2248 if (!tries[dev->devno])
3373efd8 2249 ata_dev_disable(dev);
ec573755 2250
14d2bac1 2251 goto retry;
1da177e4
LT
2252}
2253
2254/**
0cba632b
JG
2255 * ata_port_probe - Mark port as enabled
2256 * @ap: Port for which we indicate enablement
1da177e4 2257 *
0cba632b
JG
2258 * Modify @ap data structure such that the system
2259 * thinks that the entire port is enabled.
2260 *
cca3974e 2261 * LOCKING: host lock, or some other form of
0cba632b 2262 * serialization.
1da177e4
LT
2263 */
2264
2265void ata_port_probe(struct ata_port *ap)
2266{
198e0fed 2267 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
2268}
2269
3be680b7
TH
2270/**
2271 * sata_print_link_status - Print SATA link status
2272 * @ap: SATA port to printk link status about
2273 *
2274 * This function prints link speed and status of a SATA link.
2275 *
2276 * LOCKING:
2277 * None.
2278 */
43727fbc 2279void sata_print_link_status(struct ata_port *ap)
3be680b7 2280{
6d5f9732 2281 u32 sstatus, scontrol, tmp;
3be680b7 2282
81952c54 2283 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
3be680b7 2284 return;
81952c54 2285 sata_scr_read(ap, SCR_CONTROL, &scontrol);
3be680b7 2286
81952c54 2287 if (ata_port_online(ap)) {
3be680b7 2288 tmp = (sstatus >> 4) & 0xf;
f15a1daf
TH
2289 ata_port_printk(ap, KERN_INFO,
2290 "SATA link up %s (SStatus %X SControl %X)\n",
2291 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 2292 } else {
f15a1daf
TH
2293 ata_port_printk(ap, KERN_INFO,
2294 "SATA link down (SStatus %X SControl %X)\n",
2295 sstatus, scontrol);
3be680b7
TH
2296 }
2297}
2298
1da177e4 2299/**
780a87f7
JG
2300 * __sata_phy_reset - Wake/reset a low-level SATA PHY
2301 * @ap: SATA port associated with target SATA PHY.
1da177e4 2302 *
780a87f7
JG
2303 * This function issues commands to standard SATA Sxxx
2304 * PHY registers, to wake up the phy (and device), and
2305 * clear any reset condition.
1da177e4
LT
2306 *
2307 * LOCKING:
0cba632b 2308 * PCI/etc. bus probe sem.
1da177e4
LT
2309 *
2310 */
2311void __sata_phy_reset(struct ata_port *ap)
2312{
2313 u32 sstatus;
2314 unsigned long timeout = jiffies + (HZ * 5);
2315
2316 if (ap->flags & ATA_FLAG_SATA_RESET) {
cdcca89e 2317 /* issue phy wake/reset */
81952c54 2318 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
62ba2841
TH
2319 /* Couldn't find anything in SATA I/II specs, but
2320 * AHCI-1.1 10.4.2 says at least 1 ms. */
2321 mdelay(1);
1da177e4 2322 }
81952c54
TH
2323 /* phy wake/clear reset */
2324 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1da177e4
LT
2325
2326 /* wait for phy to become ready, if necessary */
2327 do {
2328 msleep(200);
81952c54 2329 sata_scr_read(ap, SCR_STATUS, &sstatus);
1da177e4
LT
2330 if ((sstatus & 0xf) != 1)
2331 break;
2332 } while (time_before(jiffies, timeout));
2333
3be680b7
TH
2334 /* print link status */
2335 sata_print_link_status(ap);
656563e3 2336
3be680b7 2337 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 2338 if (!ata_port_offline(ap))
1da177e4 2339 ata_port_probe(ap);
3be680b7 2340 else
1da177e4 2341 ata_port_disable(ap);
1da177e4 2342
198e0fed 2343 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2344 return;
2345
2346 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2347 ata_port_disable(ap);
2348 return;
2349 }
2350
2351 ap->cbl = ATA_CBL_SATA;
2352}
2353
2354/**
780a87f7
JG
2355 * sata_phy_reset - Reset SATA bus.
2356 * @ap: SATA port associated with target SATA PHY.
1da177e4 2357 *
780a87f7
JG
2358 * This function resets the SATA bus, and then probes
2359 * the bus for devices.
1da177e4
LT
2360 *
2361 * LOCKING:
0cba632b 2362 * PCI/etc. bus probe sem.
1da177e4
LT
2363 *
2364 */
2365void sata_phy_reset(struct ata_port *ap)
2366{
2367 __sata_phy_reset(ap);
198e0fed 2368 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2369 return;
2370 ata_bus_reset(ap);
2371}
2372
ebdfca6e
AC
2373/**
2374 * ata_dev_pair - return other device on cable
ebdfca6e
AC
2375 * @adev: device
2376 *
2377 * Obtain the other device on the same cable, or if none is
2378 * present NULL is returned
2379 */
2e9edbf8 2380
3373efd8 2381struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2382{
3373efd8 2383 struct ata_port *ap = adev->ap;
ebdfca6e 2384 struct ata_device *pair = &ap->device[1 - adev->devno];
e1211e3f 2385 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2386 return NULL;
2387 return pair;
2388}
2389
1da177e4 2390/**
780a87f7
JG
2391 * ata_port_disable - Disable port.
2392 * @ap: Port to be disabled.
1da177e4 2393 *
780a87f7
JG
2394 * Modify @ap data structure such that the system
2395 * thinks that the entire port is disabled, and should
2396 * never attempt to probe or communicate with devices
2397 * on this port.
2398 *
cca3974e 2399 * LOCKING: host lock, or some other form of
780a87f7 2400 * serialization.
1da177e4
LT
2401 */
2402
2403void ata_port_disable(struct ata_port *ap)
2404{
2405 ap->device[0].class = ATA_DEV_NONE;
2406 ap->device[1].class = ATA_DEV_NONE;
198e0fed 2407 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2408}
2409
1c3fae4d 2410/**
3c567b7d 2411 * sata_down_spd_limit - adjust SATA spd limit downward
1c3fae4d
TH
2412 * @ap: Port to adjust SATA spd limit for
2413 *
2414 * Adjust SATA spd limit of @ap downward. Note that this
2415 * function only adjusts the limit. The change must be applied
3c567b7d 2416 * using sata_set_spd().
1c3fae4d
TH
2417 *
2418 * LOCKING:
2419 * Inherited from caller.
2420 *
2421 * RETURNS:
2422 * 0 on success, negative errno on failure
2423 */
3c567b7d 2424int sata_down_spd_limit(struct ata_port *ap)
1c3fae4d 2425{
81952c54
TH
2426 u32 sstatus, spd, mask;
2427 int rc, highbit;
1c3fae4d 2428
81952c54
TH
2429 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
2430 if (rc)
2431 return rc;
1c3fae4d
TH
2432
2433 mask = ap->sata_spd_limit;
2434 if (mask <= 1)
2435 return -EINVAL;
2436 highbit = fls(mask) - 1;
2437 mask &= ~(1 << highbit);
2438
81952c54 2439 spd = (sstatus >> 4) & 0xf;
1c3fae4d
TH
2440 if (spd <= 1)
2441 return -EINVAL;
2442 spd--;
2443 mask &= (1 << spd) - 1;
2444 if (!mask)
2445 return -EINVAL;
2446
2447 ap->sata_spd_limit = mask;
2448
f15a1daf
TH
2449 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
2450 sata_spd_string(fls(mask)));
1c3fae4d
TH
2451
2452 return 0;
2453}
2454
3c567b7d 2455static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
1c3fae4d
TH
2456{
2457 u32 spd, limit;
2458
2459 if (ap->sata_spd_limit == UINT_MAX)
2460 limit = 0;
2461 else
2462 limit = fls(ap->sata_spd_limit);
2463
2464 spd = (*scontrol >> 4) & 0xf;
2465 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2466
2467 return spd != limit;
2468}
2469
2470/**
3c567b7d 2471 * sata_set_spd_needed - is SATA spd configuration needed
1c3fae4d
TH
2472 * @ap: Port in question
2473 *
2474 * Test whether the spd limit in SControl matches
2475 * @ap->sata_spd_limit. This function is used to determine
2476 * whether hardreset is necessary to apply SATA spd
2477 * configuration.
2478 *
2479 * LOCKING:
2480 * Inherited from caller.
2481 *
2482 * RETURNS:
2483 * 1 if SATA spd configuration is needed, 0 otherwise.
2484 */
3c567b7d 2485int sata_set_spd_needed(struct ata_port *ap)
1c3fae4d
TH
2486{
2487 u32 scontrol;
2488
81952c54 2489 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
1c3fae4d
TH
2490 return 0;
2491
3c567b7d 2492 return __sata_set_spd_needed(ap, &scontrol);
1c3fae4d
TH
2493}
2494
2495/**
3c567b7d 2496 * sata_set_spd - set SATA spd according to spd limit
1c3fae4d
TH
2497 * @ap: Port to set SATA spd for
2498 *
2499 * Set SATA spd of @ap according to sata_spd_limit.
2500 *
2501 * LOCKING:
2502 * Inherited from caller.
2503 *
2504 * RETURNS:
2505 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2506 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2507 */
3c567b7d 2508int sata_set_spd(struct ata_port *ap)
1c3fae4d
TH
2509{
2510 u32 scontrol;
81952c54 2511 int rc;
1c3fae4d 2512
81952c54
TH
2513 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2514 return rc;
1c3fae4d 2515
3c567b7d 2516 if (!__sata_set_spd_needed(ap, &scontrol))
1c3fae4d
TH
2517 return 0;
2518
81952c54
TH
2519 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2520 return rc;
2521
1c3fae4d
TH
2522 return 1;
2523}
2524
452503f9
AC
2525/*
2526 * This mode timing computation functionality is ported over from
2527 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2528 */
2529/*
b352e57d 2530 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2531 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2532 * for UDMA6, which is currently supported only by Maxtor drives.
2533 *
2534 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2535 */
2536
2537static const struct ata_timing ata_timing[] = {
2538
2539 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2540 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2541 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2542 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2543
b352e57d
AC
2544 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2545 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
452503f9
AC
2546 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2547 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2548 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2549
2550/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2e9edbf8 2551
452503f9
AC
2552 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2553 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2554 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2e9edbf8 2555
452503f9
AC
2556 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2557 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2558 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2559
b352e57d
AC
2560 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2561 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
452503f9
AC
2562 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2563 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2564
2565 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2566 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2567 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2568
2569/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2570
2571 { 0xFF }
2572};
2573
2574#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2575#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2576
2577static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2578{
2579 q->setup = EZ(t->setup * 1000, T);
2580 q->act8b = EZ(t->act8b * 1000, T);
2581 q->rec8b = EZ(t->rec8b * 1000, T);
2582 q->cyc8b = EZ(t->cyc8b * 1000, T);
2583 q->active = EZ(t->active * 1000, T);
2584 q->recover = EZ(t->recover * 1000, T);
2585 q->cycle = EZ(t->cycle * 1000, T);
2586 q->udma = EZ(t->udma * 1000, UT);
2587}
2588
2589void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2590 struct ata_timing *m, unsigned int what)
2591{
2592 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2593 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2594 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2595 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2596 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2597 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2598 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2599 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2600}
2601
2602static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2603{
2604 const struct ata_timing *t;
2605
2606 for (t = ata_timing; t->mode != speed; t++)
91190758 2607 if (t->mode == 0xFF)
452503f9 2608 return NULL;
2e9edbf8 2609 return t;
452503f9
AC
2610}
2611
2612int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2613 struct ata_timing *t, int T, int UT)
2614{
2615 const struct ata_timing *s;
2616 struct ata_timing p;
2617
2618 /*
2e9edbf8 2619 * Find the mode.
75b1f2f8 2620 */
452503f9
AC
2621
2622 if (!(s = ata_timing_find_mode(speed)))
2623 return -EINVAL;
2624
75b1f2f8
AL
2625 memcpy(t, s, sizeof(*s));
2626
452503f9
AC
2627 /*
2628 * If the drive is an EIDE drive, it can tell us it needs extended
2629 * PIO/MW_DMA cycle timing.
2630 */
2631
2632 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2633 memset(&p, 0, sizeof(p));
2634 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2635 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2636 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2637 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2638 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2639 }
2640 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2641 }
2642
2643 /*
2644 * Convert the timing to bus clock counts.
2645 */
2646
75b1f2f8 2647 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2648
2649 /*
c893a3ae
RD
2650 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2651 * S.M.A.R.T * and some other commands. We have to ensure that the
2652 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2653 */
2654
fd3367af 2655 if (speed > XFER_PIO_6) {
452503f9
AC
2656 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2657 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2658 }
2659
2660 /*
c893a3ae 2661 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2662 */
2663
2664 if (t->act8b + t->rec8b < t->cyc8b) {
2665 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2666 t->rec8b = t->cyc8b - t->act8b;
2667 }
2668
2669 if (t->active + t->recover < t->cycle) {
2670 t->active += (t->cycle - (t->active + t->recover)) / 2;
2671 t->recover = t->cycle - t->active;
2672 }
4f701d1e
AC
2673
2674 /* In a few cases quantisation may produce enough errors to
2675 leave t->cycle too low for the sum of active and recovery
2676 if so we must correct this */
2677 if (t->active + t->recover > t->cycle)
2678 t->cycle = t->active + t->recover;
452503f9
AC
2679
2680 return 0;
2681}
2682
cf176e1a
TH
2683/**
2684 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 2685 * @dev: Device to adjust xfer masks
458337db 2686 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
2687 *
2688 * Adjust xfer masks of @dev downward. Note that this function
2689 * does not apply the change. Invoking ata_set_mode() afterwards
2690 * will apply the limit.
2691 *
2692 * LOCKING:
2693 * Inherited from caller.
2694 *
2695 * RETURNS:
2696 * 0 on success, negative errno on failure
2697 */
458337db 2698int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 2699{
458337db
TH
2700 char buf[32];
2701 unsigned int orig_mask, xfer_mask;
2702 unsigned int pio_mask, mwdma_mask, udma_mask;
2703 int quiet, highbit;
cf176e1a 2704
458337db
TH
2705 quiet = !!(sel & ATA_DNXFER_QUIET);
2706 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 2707
458337db
TH
2708 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2709 dev->mwdma_mask,
2710 dev->udma_mask);
2711 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 2712
458337db
TH
2713 switch (sel) {
2714 case ATA_DNXFER_PIO:
2715 highbit = fls(pio_mask) - 1;
2716 pio_mask &= ~(1 << highbit);
2717 break;
2718
2719 case ATA_DNXFER_DMA:
2720 if (udma_mask) {
2721 highbit = fls(udma_mask) - 1;
2722 udma_mask &= ~(1 << highbit);
2723 if (!udma_mask)
2724 return -ENOENT;
2725 } else if (mwdma_mask) {
2726 highbit = fls(mwdma_mask) - 1;
2727 mwdma_mask &= ~(1 << highbit);
2728 if (!mwdma_mask)
2729 return -ENOENT;
2730 }
2731 break;
2732
2733 case ATA_DNXFER_40C:
2734 udma_mask &= ATA_UDMA_MASK_40C;
2735 break;
2736
2737 case ATA_DNXFER_FORCE_PIO0:
2738 pio_mask &= 1;
2739 case ATA_DNXFER_FORCE_PIO:
2740 mwdma_mask = 0;
2741 udma_mask = 0;
2742 break;
2743
458337db
TH
2744 default:
2745 BUG();
2746 }
2747
2748 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2749
2750 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2751 return -ENOENT;
2752
2753 if (!quiet) {
2754 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2755 snprintf(buf, sizeof(buf), "%s:%s",
2756 ata_mode_string(xfer_mask),
2757 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2758 else
2759 snprintf(buf, sizeof(buf), "%s",
2760 ata_mode_string(xfer_mask));
2761
2762 ata_dev_printk(dev, KERN_WARNING,
2763 "limiting speed to %s\n", buf);
2764 }
cf176e1a
TH
2765
2766 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2767 &dev->udma_mask);
2768
cf176e1a 2769 return 0;
cf176e1a
TH
2770}
2771
3373efd8 2772static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 2773{
baa1e78a 2774 struct ata_eh_context *ehc = &dev->ap->eh_context;
83206a29
TH
2775 unsigned int err_mask;
2776 int rc;
1da177e4 2777
e8384607 2778 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
2779 if (dev->xfer_shift == ATA_SHIFT_PIO)
2780 dev->flags |= ATA_DFLAG_PIO;
2781
3373efd8 2782 err_mask = ata_dev_set_xfermode(dev);
11750a40
AC
2783 /* Old CFA may refuse this command, which is just fine */
2784 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2785 err_mask &= ~AC_ERR_DEV;
2786
83206a29 2787 if (err_mask) {
f15a1daf
TH
2788 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2789 "(err_mask=0x%x)\n", err_mask);
83206a29
TH
2790 return -EIO;
2791 }
1da177e4 2792
baa1e78a 2793 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3373efd8 2794 rc = ata_dev_revalidate(dev, 0);
baa1e78a 2795 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
5eb45c02 2796 if (rc)
83206a29 2797 return rc;
48a8a14f 2798
23e71c3d
TH
2799 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2800 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 2801
f15a1daf
TH
2802 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2803 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
83206a29 2804 return 0;
1da177e4
LT
2805}
2806
1da177e4 2807/**
04351821 2808 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
1da177e4 2809 * @ap: port on which timings will be programmed
e82cbdb9 2810 * @r_failed_dev: out paramter for failed device
1da177e4 2811 *
04351821
AC
2812 * Standard implementation of the function used to tune and set
2813 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2814 * ata_dev_set_mode() fails, pointer to the failing device is
e82cbdb9 2815 * returned in @r_failed_dev.
780a87f7 2816 *
1da177e4 2817 * LOCKING:
0cba632b 2818 * PCI/etc. bus probe sem.
e82cbdb9
TH
2819 *
2820 * RETURNS:
2821 * 0 on success, negative errno otherwise
1da177e4 2822 */
04351821
AC
2823
2824int ata_do_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
1da177e4 2825{
e8e0619f 2826 struct ata_device *dev;
e82cbdb9 2827 int i, rc = 0, used_dma = 0, found = 0;
1da177e4 2828
3adcebb2 2829
a6d5a51c
TH
2830 /* step 1: calculate xfer_mask */
2831 for (i = 0; i < ATA_MAX_DEVICES; i++) {
acf356b1 2832 unsigned int pio_mask, dma_mask;
a6d5a51c 2833
e8e0619f
TH
2834 dev = &ap->device[i];
2835
e1211e3f 2836 if (!ata_dev_enabled(dev))
a6d5a51c
TH
2837 continue;
2838
3373efd8 2839 ata_dev_xfermask(dev);
1da177e4 2840
acf356b1
TH
2841 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2842 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2843 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2844 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 2845
4f65977d 2846 found = 1;
5444a6f4
AC
2847 if (dev->dma_mode)
2848 used_dma = 1;
a6d5a51c 2849 }
4f65977d 2850 if (!found)
e82cbdb9 2851 goto out;
a6d5a51c
TH
2852
2853 /* step 2: always set host PIO timings */
e8e0619f
TH
2854 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2855 dev = &ap->device[i];
2856 if (!ata_dev_enabled(dev))
2857 continue;
2858
2859 if (!dev->pio_mode) {
f15a1daf 2860 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 2861 rc = -EINVAL;
e82cbdb9 2862 goto out;
e8e0619f
TH
2863 }
2864
2865 dev->xfer_mode = dev->pio_mode;
2866 dev->xfer_shift = ATA_SHIFT_PIO;
2867 if (ap->ops->set_piomode)
2868 ap->ops->set_piomode(ap, dev);
2869 }
1da177e4 2870
a6d5a51c 2871 /* step 3: set host DMA timings */
e8e0619f
TH
2872 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2873 dev = &ap->device[i];
2874
2875 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2876 continue;
2877
2878 dev->xfer_mode = dev->dma_mode;
2879 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2880 if (ap->ops->set_dmamode)
2881 ap->ops->set_dmamode(ap, dev);
2882 }
1da177e4
LT
2883
2884 /* step 4: update devices' xfer mode */
83206a29 2885 for (i = 0; i < ATA_MAX_DEVICES; i++) {
e8e0619f 2886 dev = &ap->device[i];
1da177e4 2887
18d90deb 2888 /* don't update suspended devices' xfer mode */
9666f400 2889 if (!ata_dev_enabled(dev))
83206a29
TH
2890 continue;
2891
3373efd8 2892 rc = ata_dev_set_mode(dev);
5bbc53f4 2893 if (rc)
e82cbdb9 2894 goto out;
83206a29 2895 }
1da177e4 2896
e8e0619f
TH
2897 /* Record simplex status. If we selected DMA then the other
2898 * host channels are not permitted to do so.
5444a6f4 2899 */
cca3974e 2900 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 2901 ap->host->simplex_claimed = ap;
5444a6f4 2902
e8e0619f 2903 /* step5: chip specific finalisation */
1da177e4
LT
2904 if (ap->ops->post_set_mode)
2905 ap->ops->post_set_mode(ap);
e82cbdb9
TH
2906 out:
2907 if (rc)
2908 *r_failed_dev = dev;
2909 return rc;
1da177e4
LT
2910}
2911
04351821
AC
2912/**
2913 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2914 * @ap: port on which timings will be programmed
2915 * @r_failed_dev: out paramter for failed device
2916 *
2917 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2918 * ata_set_mode() fails, pointer to the failing device is
2919 * returned in @r_failed_dev.
2920 *
2921 * LOCKING:
2922 * PCI/etc. bus probe sem.
2923 *
2924 * RETURNS:
2925 * 0 on success, negative errno otherwise
2926 */
2927int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2928{
2929 /* has private set_mode? */
2930 if (ap->ops->set_mode)
2931 return ap->ops->set_mode(ap, r_failed_dev);
2932 return ata_do_set_mode(ap, r_failed_dev);
2933}
2934
1fdffbce
JG
2935/**
2936 * ata_tf_to_host - issue ATA taskfile to host controller
2937 * @ap: port to which command is being issued
2938 * @tf: ATA taskfile register set
2939 *
2940 * Issues ATA taskfile register set to ATA host controller,
2941 * with proper synchronization with interrupt handler and
2942 * other threads.
2943 *
2944 * LOCKING:
cca3974e 2945 * spin_lock_irqsave(host lock)
1fdffbce
JG
2946 */
2947
2948static inline void ata_tf_to_host(struct ata_port *ap,
2949 const struct ata_taskfile *tf)
2950{
2951 ap->ops->tf_load(ap, tf);
2952 ap->ops->exec_command(ap, tf);
2953}
2954
1da177e4
LT
2955/**
2956 * ata_busy_sleep - sleep until BSY clears, or timeout
2957 * @ap: port containing status register to be polled
2958 * @tmout_pat: impatience timeout
2959 * @tmout: overall timeout
2960 *
780a87f7
JG
2961 * Sleep until ATA Status register bit BSY clears,
2962 * or a timeout occurs.
2963 *
d1adc1bb
TH
2964 * LOCKING:
2965 * Kernel thread context (may sleep).
2966 *
2967 * RETURNS:
2968 * 0 on success, -errno otherwise.
1da177e4 2969 */
d1adc1bb
TH
2970int ata_busy_sleep(struct ata_port *ap,
2971 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
2972{
2973 unsigned long timer_start, timeout;
2974 u8 status;
2975
2976 status = ata_busy_wait(ap, ATA_BUSY, 300);
2977 timer_start = jiffies;
2978 timeout = timer_start + tmout_pat;
d1adc1bb
TH
2979 while (status != 0xff && (status & ATA_BUSY) &&
2980 time_before(jiffies, timeout)) {
1da177e4
LT
2981 msleep(50);
2982 status = ata_busy_wait(ap, ATA_BUSY, 3);
2983 }
2984
d1adc1bb 2985 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 2986 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
2987 "port is slow to respond, please be patient "
2988 "(Status 0x%x)\n", status);
1da177e4
LT
2989
2990 timeout = timer_start + tmout;
d1adc1bb
TH
2991 while (status != 0xff && (status & ATA_BUSY) &&
2992 time_before(jiffies, timeout)) {
1da177e4
LT
2993 msleep(50);
2994 status = ata_chk_status(ap);
2995 }
2996
d1adc1bb
TH
2997 if (status == 0xff)
2998 return -ENODEV;
2999
1da177e4 3000 if (status & ATA_BUSY) {
f15a1daf 3001 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
3002 "(%lu secs, Status 0x%x)\n",
3003 tmout / HZ, status);
d1adc1bb 3004 return -EBUSY;
1da177e4
LT
3005 }
3006
3007 return 0;
3008}
3009
d4b2bab4
TH
3010/**
3011 * ata_wait_ready - sleep until BSY clears, or timeout
3012 * @ap: port containing status register to be polled
3013 * @deadline: deadline jiffies for the operation
3014 *
3015 * Sleep until ATA Status register bit BSY clears, or timeout
3016 * occurs.
3017 *
3018 * LOCKING:
3019 * Kernel thread context (may sleep).
3020 *
3021 * RETURNS:
3022 * 0 on success, -errno otherwise.
3023 */
3024int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3025{
3026 unsigned long start = jiffies;
3027 int warned = 0;
3028
3029 while (1) {
3030 u8 status = ata_chk_status(ap);
3031 unsigned long now = jiffies;
3032
3033 if (!(status & ATA_BUSY))
3034 return 0;
3035 if (status == 0xff)
3036 return -ENODEV;
3037 if (time_after(now, deadline))
3038 return -EBUSY;
3039
3040 if (!warned && time_after(now, start + 5 * HZ) &&
3041 (deadline - now > 3 * HZ)) {
3042 ata_port_printk(ap, KERN_WARNING,
3043 "port is slow to respond, please be patient "
3044 "(Status 0x%x)\n", status);
3045 warned = 1;
3046 }
3047
3048 msleep(50);
3049 }
3050}
3051
3052static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3053 unsigned long deadline)
1da177e4
LT
3054{
3055 struct ata_ioports *ioaddr = &ap->ioaddr;
3056 unsigned int dev0 = devmask & (1 << 0);
3057 unsigned int dev1 = devmask & (1 << 1);
9b89391c 3058 int rc, ret = 0;
1da177e4
LT
3059
3060 /* if device 0 was found in ata_devchk, wait for its
3061 * BSY bit to clear
3062 */
d4b2bab4
TH
3063 if (dev0) {
3064 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3065 if (rc) {
3066 if (rc != -ENODEV)
3067 return rc;
3068 ret = rc;
3069 }
d4b2bab4 3070 }
1da177e4
LT
3071
3072 /* if device 1 was found in ata_devchk, wait for
3073 * register access, then wait for BSY to clear
3074 */
1da177e4
LT
3075 while (dev1) {
3076 u8 nsect, lbal;
3077
3078 ap->ops->dev_select(ap, 1);
0d5ff566
TH
3079 nsect = ioread8(ioaddr->nsect_addr);
3080 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
3081 if ((nsect == 1) && (lbal == 1))
3082 break;
d4b2bab4
TH
3083 if (time_after(jiffies, deadline))
3084 return -EBUSY;
1da177e4
LT
3085 msleep(50); /* give drive a breather */
3086 }
d4b2bab4
TH
3087 if (dev1) {
3088 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3089 if (rc) {
3090 if (rc != -ENODEV)
3091 return rc;
3092 ret = rc;
3093 }
d4b2bab4 3094 }
1da177e4
LT
3095
3096 /* is all this really necessary? */
3097 ap->ops->dev_select(ap, 0);
3098 if (dev1)
3099 ap->ops->dev_select(ap, 1);
3100 if (dev0)
3101 ap->ops->dev_select(ap, 0);
d4b2bab4 3102
9b89391c 3103 return ret;
1da177e4
LT
3104}
3105
d4b2bab4
TH
3106static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3107 unsigned long deadline)
1da177e4
LT
3108{
3109 struct ata_ioports *ioaddr = &ap->ioaddr;
3110
44877b4e 3111 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
1da177e4
LT
3112
3113 /* software reset. causes dev0 to be selected */
0d5ff566
TH
3114 iowrite8(ap->ctl, ioaddr->ctl_addr);
3115 udelay(20); /* FIXME: flush */
3116 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3117 udelay(20); /* FIXME: flush */
3118 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3119
3120 /* spec mandates ">= 2ms" before checking status.
3121 * We wait 150ms, because that was the magic delay used for
3122 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
3123 * between when the ATA command register is written, and then
3124 * status is checked. Because waiting for "a while" before
3125 * checking status is fine, post SRST, we perform this magic
3126 * delay here as well.
09c7ad79
AC
3127 *
3128 * Old drivers/ide uses the 2mS rule and then waits for ready
1da177e4
LT
3129 */
3130 msleep(150);
3131
2e9edbf8 3132 /* Before we perform post reset processing we want to see if
298a41ca
TH
3133 * the bus shows 0xFF because the odd clown forgets the D7
3134 * pulldown resistor.
3135 */
d1adc1bb 3136 if (ata_check_status(ap) == 0xFF)
9b89391c 3137 return -ENODEV;
09c7ad79 3138
d4b2bab4 3139 return ata_bus_post_reset(ap, devmask, deadline);
1da177e4
LT
3140}
3141
3142/**
3143 * ata_bus_reset - reset host port and associated ATA channel
3144 * @ap: port to reset
3145 *
3146 * This is typically the first time we actually start issuing
3147 * commands to the ATA channel. We wait for BSY to clear, then
3148 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3149 * result. Determine what devices, if any, are on the channel
3150 * by looking at the device 0/1 error register. Look at the signature
3151 * stored in each device's taskfile registers, to determine if
3152 * the device is ATA or ATAPI.
3153 *
3154 * LOCKING:
0cba632b 3155 * PCI/etc. bus probe sem.
cca3974e 3156 * Obtains host lock.
1da177e4
LT
3157 *
3158 * SIDE EFFECTS:
198e0fed 3159 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
3160 */
3161
3162void ata_bus_reset(struct ata_port *ap)
3163{
3164 struct ata_ioports *ioaddr = &ap->ioaddr;
3165 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3166 u8 err;
aec5c3c1 3167 unsigned int dev0, dev1 = 0, devmask = 0;
9b89391c 3168 int rc;
1da177e4 3169
44877b4e 3170 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
1da177e4
LT
3171
3172 /* determine if device 0/1 are present */
3173 if (ap->flags & ATA_FLAG_SATA_RESET)
3174 dev0 = 1;
3175 else {
3176 dev0 = ata_devchk(ap, 0);
3177 if (slave_possible)
3178 dev1 = ata_devchk(ap, 1);
3179 }
3180
3181 if (dev0)
3182 devmask |= (1 << 0);
3183 if (dev1)
3184 devmask |= (1 << 1);
3185
3186 /* select device 0 again */
3187 ap->ops->dev_select(ap, 0);
3188
3189 /* issue bus reset */
9b89391c
TH
3190 if (ap->flags & ATA_FLAG_SRST) {
3191 rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
3192 if (rc && rc != -ENODEV)
aec5c3c1 3193 goto err_out;
9b89391c 3194 }
1da177e4
LT
3195
3196 /*
3197 * determine by signature whether we have ATA or ATAPI devices
3198 */
b4dc7623 3199 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
1da177e4 3200 if ((slave_possible) && (err != 0x81))
b4dc7623 3201 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
1da177e4
LT
3202
3203 /* re-enable interrupts */
83625006 3204 ap->ops->irq_on(ap);
1da177e4
LT
3205
3206 /* is double-select really necessary? */
3207 if (ap->device[1].class != ATA_DEV_NONE)
3208 ap->ops->dev_select(ap, 1);
3209 if (ap->device[0].class != ATA_DEV_NONE)
3210 ap->ops->dev_select(ap, 0);
3211
3212 /* if no devices were detected, disable this port */
3213 if ((ap->device[0].class == ATA_DEV_NONE) &&
3214 (ap->device[1].class == ATA_DEV_NONE))
3215 goto err_out;
3216
3217 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3218 /* set up device control for ATA_FLAG_SATA_RESET */
0d5ff566 3219 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3220 }
3221
3222 DPRINTK("EXIT\n");
3223 return;
3224
3225err_out:
f15a1daf 3226 ata_port_printk(ap, KERN_ERR, "disabling port\n");
1da177e4
LT
3227 ap->ops->port_disable(ap);
3228
3229 DPRINTK("EXIT\n");
3230}
3231
d7bb4cc7
TH
3232/**
3233 * sata_phy_debounce - debounce SATA phy status
3234 * @ap: ATA port to debounce SATA phy status for
3235 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3236 * @deadline: deadline jiffies for the operation
d7bb4cc7
TH
3237 *
3238 * Make sure SStatus of @ap reaches stable state, determined by
3239 * holding the same value where DET is not 1 for @duration polled
3240 * every @interval, before @timeout. Timeout constraints the
d4b2bab4
TH
3241 * beginning of the stable state. Because DET gets stuck at 1 on
3242 * some controllers after hot unplugging, this functions waits
d7bb4cc7
TH
3243 * until timeout then returns 0 if DET is stable at 1.
3244 *
d4b2bab4
TH
3245 * @timeout is further limited by @deadline. The sooner of the
3246 * two is used.
3247 *
d7bb4cc7
TH
3248 * LOCKING:
3249 * Kernel thread context (may sleep)
3250 *
3251 * RETURNS:
3252 * 0 on success, -errno on failure.
3253 */
d4b2bab4
TH
3254int sata_phy_debounce(struct ata_port *ap, const unsigned long *params,
3255 unsigned long deadline)
7a7921e8 3256{
d7bb4cc7 3257 unsigned long interval_msec = params[0];
d4b2bab4
TH
3258 unsigned long duration = msecs_to_jiffies(params[1]);
3259 unsigned long last_jiffies, t;
d7bb4cc7
TH
3260 u32 last, cur;
3261 int rc;
3262
d4b2bab4
TH
3263 t = jiffies + msecs_to_jiffies(params[2]);
3264 if (time_before(t, deadline))
3265 deadline = t;
3266
d7bb4cc7
TH
3267 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
3268 return rc;
3269 cur &= 0xf;
3270
3271 last = cur;
3272 last_jiffies = jiffies;
3273
3274 while (1) {
3275 msleep(interval_msec);
3276 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
3277 return rc;
3278 cur &= 0xf;
3279
3280 /* DET stable? */
3281 if (cur == last) {
d4b2bab4 3282 if (cur == 1 && time_before(jiffies, deadline))
d7bb4cc7
TH
3283 continue;
3284 if (time_after(jiffies, last_jiffies + duration))
3285 return 0;
3286 continue;
3287 }
3288
3289 /* unstable, start over */
3290 last = cur;
3291 last_jiffies = jiffies;
3292
d4b2bab4
TH
3293 /* check deadline */
3294 if (time_after(jiffies, deadline))
d7bb4cc7
TH
3295 return -EBUSY;
3296 }
3297}
3298
3299/**
3300 * sata_phy_resume - resume SATA phy
3301 * @ap: ATA port to resume SATA phy for
3302 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3303 * @deadline: deadline jiffies for the operation
d7bb4cc7
TH
3304 *
3305 * Resume SATA phy of @ap and debounce it.
3306 *
3307 * LOCKING:
3308 * Kernel thread context (may sleep)
3309 *
3310 * RETURNS:
3311 * 0 on success, -errno on failure.
3312 */
d4b2bab4
TH
3313int sata_phy_resume(struct ata_port *ap, const unsigned long *params,
3314 unsigned long deadline)
d7bb4cc7
TH
3315{
3316 u32 scontrol;
81952c54
TH
3317 int rc;
3318
3319 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
3320 return rc;
7a7921e8 3321
852ee16a 3322 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54
TH
3323
3324 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
3325 return rc;
7a7921e8 3326
d7bb4cc7
TH
3327 /* Some PHYs react badly if SStatus is pounded immediately
3328 * after resuming. Delay 200ms before debouncing.
3329 */
3330 msleep(200);
7a7921e8 3331
d4b2bab4 3332 return sata_phy_debounce(ap, params, deadline);
7a7921e8
TH
3333}
3334
f5914a46
TH
3335/**
3336 * ata_std_prereset - prepare for reset
3337 * @ap: ATA port to be reset
d4b2bab4 3338 * @deadline: deadline jiffies for the operation
f5914a46 3339 *
b8cffc6a
TH
3340 * @ap is about to be reset. Initialize it. Failure from
3341 * prereset makes libata abort whole reset sequence and give up
3342 * that port, so prereset should be best-effort. It does its
3343 * best to prepare for reset sequence but if things go wrong, it
3344 * should just whine, not fail.
f5914a46
TH
3345 *
3346 * LOCKING:
3347 * Kernel thread context (may sleep)
3348 *
3349 * RETURNS:
3350 * 0 on success, -errno otherwise.
3351 */
d4b2bab4 3352int ata_std_prereset(struct ata_port *ap, unsigned long deadline)
f5914a46
TH
3353{
3354 struct ata_eh_context *ehc = &ap->eh_context;
e9c83914 3355 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
3356 int rc;
3357
31daabda 3358 /* handle link resume */
28324304
TH
3359 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
3360 (ap->flags & ATA_FLAG_HRST_TO_RESUME))
3361 ehc->i.action |= ATA_EH_HARDRESET;
3362
f5914a46
TH
3363 /* if we're about to do hardreset, nothing more to do */
3364 if (ehc->i.action & ATA_EH_HARDRESET)
3365 return 0;
3366
3367 /* if SATA, resume phy */
3368 if (ap->cbl == ATA_CBL_SATA) {
d4b2bab4 3369 rc = sata_phy_resume(ap, timing, deadline);
b8cffc6a
TH
3370 /* whine about phy resume failure but proceed */
3371 if (rc && rc != -EOPNOTSUPP)
f5914a46
TH
3372 ata_port_printk(ap, KERN_WARNING, "failed to resume "
3373 "link for reset (errno=%d)\n", rc);
f5914a46
TH
3374 }
3375
3376 /* Wait for !BSY if the controller can wait for the first D2H
3377 * Reg FIS and we don't know that no device is attached.
3378 */
b8cffc6a
TH
3379 if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap)) {
3380 rc = ata_wait_ready(ap, deadline);
3381 if (rc) {
3382 ata_port_printk(ap, KERN_WARNING, "device not ready "
3383 "(errno=%d), forcing hardreset\n", rc);
3384 ehc->i.action |= ATA_EH_HARDRESET;
3385 }
3386 }
f5914a46
TH
3387
3388 return 0;
3389}
3390
c2bd5804
TH
3391/**
3392 * ata_std_softreset - reset host port via ATA SRST
3393 * @ap: port to reset
c2bd5804 3394 * @classes: resulting classes of attached devices
d4b2bab4 3395 * @deadline: deadline jiffies for the operation
c2bd5804 3396 *
52783c5d 3397 * Reset host port using ATA SRST.
c2bd5804
TH
3398 *
3399 * LOCKING:
3400 * Kernel thread context (may sleep)
3401 *
3402 * RETURNS:
3403 * 0 on success, -errno otherwise.
3404 */
d4b2bab4
TH
3405int ata_std_softreset(struct ata_port *ap, unsigned int *classes,
3406 unsigned long deadline)
c2bd5804
TH
3407{
3408 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
d4b2bab4
TH
3409 unsigned int devmask = 0;
3410 int rc;
c2bd5804
TH
3411 u8 err;
3412
3413 DPRINTK("ENTER\n");
3414
81952c54 3415 if (ata_port_offline(ap)) {
3a39746a
TH
3416 classes[0] = ATA_DEV_NONE;
3417 goto out;
3418 }
3419
c2bd5804
TH
3420 /* determine if device 0/1 are present */
3421 if (ata_devchk(ap, 0))
3422 devmask |= (1 << 0);
3423 if (slave_possible && ata_devchk(ap, 1))
3424 devmask |= (1 << 1);
3425
c2bd5804
TH
3426 /* select device 0 again */
3427 ap->ops->dev_select(ap, 0);
3428
3429 /* issue bus reset */
3430 DPRINTK("about to softreset, devmask=%x\n", devmask);
d4b2bab4 3431 rc = ata_bus_softreset(ap, devmask, deadline);
9b89391c
TH
3432 /* if link is occupied, -ENODEV too is an error */
3433 if (rc && (rc != -ENODEV || sata_scr_valid(ap))) {
d4b2bab4
TH
3434 ata_port_printk(ap, KERN_ERR, "SRST failed (errno=%d)\n", rc);
3435 return rc;
c2bd5804
TH
3436 }
3437
3438 /* determine by signature whether we have ATA or ATAPI devices */
3439 classes[0] = ata_dev_try_classify(ap, 0, &err);
3440 if (slave_possible && err != 0x81)
3441 classes[1] = ata_dev_try_classify(ap, 1, &err);
3442
3a39746a 3443 out:
c2bd5804
TH
3444 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3445 return 0;
3446}
3447
3448/**
b6103f6d 3449 * sata_port_hardreset - reset port via SATA phy reset
c2bd5804 3450 * @ap: port to reset
b6103f6d 3451 * @timing: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3452 * @deadline: deadline jiffies for the operation
c2bd5804
TH
3453 *
3454 * SATA phy-reset host port using DET bits of SControl register.
c2bd5804
TH
3455 *
3456 * LOCKING:
3457 * Kernel thread context (may sleep)
3458 *
3459 * RETURNS:
3460 * 0 on success, -errno otherwise.
3461 */
d4b2bab4
TH
3462int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing,
3463 unsigned long deadline)
c2bd5804 3464{
852ee16a 3465 u32 scontrol;
81952c54 3466 int rc;
852ee16a 3467
c2bd5804
TH
3468 DPRINTK("ENTER\n");
3469
3c567b7d 3470 if (sata_set_spd_needed(ap)) {
1c3fae4d
TH
3471 /* SATA spec says nothing about how to reconfigure
3472 * spd. To be on the safe side, turn off phy during
3473 * reconfiguration. This works for at least ICH7 AHCI
3474 * and Sil3124.
3475 */
81952c54 3476 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
b6103f6d 3477 goto out;
81952c54 3478
a34b6fc0 3479 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54
TH
3480
3481 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
b6103f6d 3482 goto out;
1c3fae4d 3483
3c567b7d 3484 sata_set_spd(ap);
1c3fae4d
TH
3485 }
3486
3487 /* issue phy wake/reset */
81952c54 3488 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
b6103f6d 3489 goto out;
81952c54 3490
852ee16a 3491 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54
TH
3492
3493 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
b6103f6d 3494 goto out;
c2bd5804 3495
1c3fae4d 3496 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
3497 * 10.4.2 says at least 1 ms.
3498 */
3499 msleep(1);
3500
1c3fae4d 3501 /* bring phy back */
d4b2bab4 3502 rc = sata_phy_resume(ap, timing, deadline);
b6103f6d
TH
3503 out:
3504 DPRINTK("EXIT, rc=%d\n", rc);
3505 return rc;
3506}
3507
3508/**
3509 * sata_std_hardreset - reset host port via SATA phy reset
3510 * @ap: port to reset
3511 * @class: resulting class of attached device
d4b2bab4 3512 * @deadline: deadline jiffies for the operation
b6103f6d
TH
3513 *
3514 * SATA phy-reset host port using DET bits of SControl register,
3515 * wait for !BSY and classify the attached device.
3516 *
3517 * LOCKING:
3518 * Kernel thread context (may sleep)
3519 *
3520 * RETURNS:
3521 * 0 on success, -errno otherwise.
3522 */
d4b2bab4
TH
3523int sata_std_hardreset(struct ata_port *ap, unsigned int *class,
3524 unsigned long deadline)
b6103f6d
TH
3525{
3526 const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context);
3527 int rc;
3528
3529 DPRINTK("ENTER\n");
3530
3531 /* do hardreset */
d4b2bab4 3532 rc = sata_port_hardreset(ap, timing, deadline);
b6103f6d
TH
3533 if (rc) {
3534 ata_port_printk(ap, KERN_ERR,
3535 "COMRESET failed (errno=%d)\n", rc);
3536 return rc;
3537 }
c2bd5804 3538
c2bd5804 3539 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 3540 if (ata_port_offline(ap)) {
c2bd5804
TH
3541 *class = ATA_DEV_NONE;
3542 DPRINTK("EXIT, link offline\n");
3543 return 0;
3544 }
3545
34fee227
TH
3546 /* wait a while before checking status, see SRST for more info */
3547 msleep(150);
3548
d4b2bab4 3549 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3550 /* link occupied, -ENODEV too is an error */
3551 if (rc) {
f15a1daf 3552 ata_port_printk(ap, KERN_ERR,
d4b2bab4
TH
3553 "COMRESET failed (errno=%d)\n", rc);
3554 return rc;
c2bd5804
TH
3555 }
3556
3a39746a
TH
3557 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3558
c2bd5804
TH
3559 *class = ata_dev_try_classify(ap, 0, NULL);
3560
3561 DPRINTK("EXIT, class=%u\n", *class);
3562 return 0;
3563}
3564
3565/**
3566 * ata_std_postreset - standard postreset callback
3567 * @ap: the target ata_port
3568 * @classes: classes of attached devices
3569 *
3570 * This function is invoked after a successful reset. Note that
3571 * the device might have been reset more than once using
3572 * different reset methods before postreset is invoked.
c2bd5804 3573 *
c2bd5804
TH
3574 * LOCKING:
3575 * Kernel thread context (may sleep)
3576 */
3577void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
3578{
dc2b3515
TH
3579 u32 serror;
3580
c2bd5804
TH
3581 DPRINTK("ENTER\n");
3582
c2bd5804 3583 /* print link status */
81952c54 3584 sata_print_link_status(ap);
c2bd5804 3585
dc2b3515
TH
3586 /* clear SError */
3587 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
3588 sata_scr_write(ap, SCR_ERROR, serror);
3589
3a39746a 3590 /* re-enable interrupts */
83625006
AI
3591 if (!ap->ops->error_handler)
3592 ap->ops->irq_on(ap);
c2bd5804
TH
3593
3594 /* is double-select really necessary? */
3595 if (classes[0] != ATA_DEV_NONE)
3596 ap->ops->dev_select(ap, 1);
3597 if (classes[1] != ATA_DEV_NONE)
3598 ap->ops->dev_select(ap, 0);
3599
3a39746a
TH
3600 /* bail out if no device is present */
3601 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3602 DPRINTK("EXIT, no device\n");
3603 return;
3604 }
3605
3606 /* set up device control */
0d5ff566
TH
3607 if (ap->ioaddr.ctl_addr)
3608 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
c2bd5804
TH
3609
3610 DPRINTK("EXIT\n");
3611}
3612
623a3128
TH
3613/**
3614 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3615 * @dev: device to compare against
3616 * @new_class: class of the new device
3617 * @new_id: IDENTIFY page of the new device
3618 *
3619 * Compare @new_class and @new_id against @dev and determine
3620 * whether @dev is the device indicated by @new_class and
3621 * @new_id.
3622 *
3623 * LOCKING:
3624 * None.
3625 *
3626 * RETURNS:
3627 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3628 */
3373efd8
TH
3629static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3630 const u16 *new_id)
623a3128
TH
3631{
3632 const u16 *old_id = dev->id;
a0cf733b
TH
3633 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3634 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
3635 u64 new_n_sectors;
3636
3637 if (dev->class != new_class) {
f15a1daf
TH
3638 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3639 dev->class, new_class);
623a3128
TH
3640 return 0;
3641 }
3642
a0cf733b
TH
3643 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3644 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3645 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3646 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
3647 new_n_sectors = ata_id_n_sectors(new_id);
3648
3649 if (strcmp(model[0], model[1])) {
f15a1daf
TH
3650 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3651 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
3652 return 0;
3653 }
3654
3655 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
3656 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3657 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
3658 return 0;
3659 }
3660
3661 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
f15a1daf
TH
3662 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3663 "%llu != %llu\n",
3664 (unsigned long long)dev->n_sectors,
3665 (unsigned long long)new_n_sectors);
1e999736
AC
3666 /* Are we the boot time size - if so we appear to be the
3667 same disk at this point and our HPA got reapplied */
3668 if (ata_ignore_hpa && dev->n_sectors_boot == new_n_sectors
3669 && ata_id_hpa_enabled(new_id))
3670 return 1;
623a3128
TH
3671 return 0;
3672 }
3673
3674 return 1;
3675}
3676
3677/**
3678 * ata_dev_revalidate - Revalidate ATA device
623a3128 3679 * @dev: device to revalidate
bff04647 3680 * @readid_flags: read ID flags
623a3128
TH
3681 *
3682 * Re-read IDENTIFY page and make sure @dev is still attached to
3683 * the port.
3684 *
3685 * LOCKING:
3686 * Kernel thread context (may sleep)
3687 *
3688 * RETURNS:
3689 * 0 on success, negative errno otherwise
3690 */
bff04647 3691int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
623a3128 3692{
5eb45c02 3693 unsigned int class = dev->class;
f15a1daf 3694 u16 *id = (void *)dev->ap->sector_buf;
623a3128
TH
3695 int rc;
3696
5eb45c02
TH
3697 if (!ata_dev_enabled(dev)) {
3698 rc = -ENODEV;
3699 goto fail;
3700 }
623a3128 3701
fe635c7e 3702 /* read ID data */
bff04647 3703 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128
TH
3704 if (rc)
3705 goto fail;
3706
3707 /* is the device still there? */
3373efd8 3708 if (!ata_dev_same_device(dev, class, id)) {
623a3128
TH
3709 rc = -ENODEV;
3710 goto fail;
3711 }
3712
fe635c7e 3713 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
623a3128
TH
3714
3715 /* configure device according to the new ID */
efdaedc4 3716 rc = ata_dev_configure(dev);
5eb45c02
TH
3717 if (rc == 0)
3718 return 0;
623a3128
TH
3719
3720 fail:
f15a1daf 3721 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
3722 return rc;
3723}
3724
6919a0a6
AC
3725struct ata_blacklist_entry {
3726 const char *model_num;
3727 const char *model_rev;
3728 unsigned long horkage;
3729};
3730
3731static const struct ata_blacklist_entry ata_device_blacklist [] = {
3732 /* Devices with DMA related problems under Linux */
3733 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3734 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3735 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3736 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3737 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3738 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3739 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3740 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3741 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3742 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3743 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3744 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3745 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3746 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3747 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3748 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3749 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3750 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3751 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3752 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3753 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3754 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3755 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3756 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3757 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3758 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
3759 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3760 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3761 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
3762
18d6e9d5 3763 /* Weird ATAPI devices */
6f23a31d
AL
3764 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 |
3765 ATA_HORKAGE_DMA_RW_ONLY },
18d6e9d5 3766
6919a0a6
AC
3767 /* Devices we expect to fail diagnostics */
3768
3769 /* Devices where NCQ should be avoided */
3770 /* NCQ is slow */
3771 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
09125ea6
TH
3772 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3773 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30
PR
3774 /* NCQ is broken */
3775 { "Maxtor 6L250S0", "BANC1G10", ATA_HORKAGE_NONCQ },
96442925
JA
3776 /* NCQ hard hangs device under heavier load, needs hard power cycle */
3777 { "Maxtor 6B250S0", "BANC1B70", ATA_HORKAGE_NONCQ },
36e337d0
RH
3778 /* Blacklist entries taken from Silicon Image 3124/3132
3779 Windows driver .inf file - also several Linux problem reports */
3780 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
3781 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
3782 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
6919a0a6
AC
3783
3784 /* Devices with NCQ limits */
3785
3786 /* End Marker */
3787 { }
1da177e4 3788};
2e9edbf8 3789
6919a0a6 3790unsigned long ata_device_blacklisted(const struct ata_device *dev)
1da177e4 3791{
8bfa79fc
TH
3792 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3793 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 3794 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 3795
8bfa79fc
TH
3796 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3797 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 3798
6919a0a6 3799 while (ad->model_num) {
8bfa79fc 3800 if (!strcmp(ad->model_num, model_num)) {
6919a0a6
AC
3801 if (ad->model_rev == NULL)
3802 return ad->horkage;
8bfa79fc 3803 if (!strcmp(ad->model_rev, model_rev))
6919a0a6 3804 return ad->horkage;
f4b15fef 3805 }
6919a0a6 3806 ad++;
f4b15fef 3807 }
1da177e4
LT
3808 return 0;
3809}
3810
6919a0a6
AC
3811static int ata_dma_blacklisted(const struct ata_device *dev)
3812{
3813 /* We don't support polling DMA.
3814 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3815 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3816 */
3817 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
3818 (dev->flags & ATA_DFLAG_CDB_INTR))
3819 return 1;
3820 return (ata_device_blacklisted(dev) & ATA_HORKAGE_NODMA) ? 1 : 0;
3821}
3822
a6d5a51c
TH
3823/**
3824 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
3825 * @dev: Device to compute xfermask for
3826 *
acf356b1
TH
3827 * Compute supported xfermask of @dev and store it in
3828 * dev->*_mask. This function is responsible for applying all
3829 * known limits including host controller limits, device
3830 * blacklist, etc...
a6d5a51c
TH
3831 *
3832 * LOCKING:
3833 * None.
a6d5a51c 3834 */
3373efd8 3835static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 3836{
3373efd8 3837 struct ata_port *ap = dev->ap;
cca3974e 3838 struct ata_host *host = ap->host;
a6d5a51c 3839 unsigned long xfer_mask;
1da177e4 3840
37deecb5 3841 /* controller modes available */
565083e1
TH
3842 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3843 ap->mwdma_mask, ap->udma_mask);
3844
8343f889 3845 /* drive modes available */
37deecb5
TH
3846 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3847 dev->mwdma_mask, dev->udma_mask);
3848 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 3849
b352e57d
AC
3850 /*
3851 * CFA Advanced TrueIDE timings are not allowed on a shared
3852 * cable
3853 */
3854 if (ata_dev_pair(dev)) {
3855 /* No PIO5 or PIO6 */
3856 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3857 /* No MWDMA3 or MWDMA 4 */
3858 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3859 }
3860
37deecb5
TH
3861 if (ata_dma_blacklisted(dev)) {
3862 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
3863 ata_dev_printk(dev, KERN_WARNING,
3864 "device is on DMA blacklist, disabling DMA\n");
37deecb5 3865 }
a6d5a51c 3866
14d66ab7
PV
3867 if ((host->flags & ATA_HOST_SIMPLEX) &&
3868 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5
TH
3869 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3870 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3871 "other device, disabling DMA\n");
5444a6f4 3872 }
565083e1 3873
e424675f
JG
3874 if (ap->flags & ATA_FLAG_NO_IORDY)
3875 xfer_mask &= ata_pio_mask_no_iordy(dev);
3876
5444a6f4 3877 if (ap->ops->mode_filter)
a76b62ca 3878 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
5444a6f4 3879
8343f889
RH
3880 /* Apply cable rule here. Don't apply it early because when
3881 * we handle hot plug the cable type can itself change.
3882 * Check this last so that we know if the transfer rate was
3883 * solely limited by the cable.
3884 * Unknown or 80 wire cables reported host side are checked
3885 * drive side as well. Cases where we know a 40wire cable
3886 * is used safely for 80 are not checked here.
3887 */
3888 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
3889 /* UDMA/44 or higher would be available */
3890 if((ap->cbl == ATA_CBL_PATA40) ||
3891 (ata_drive_40wire(dev->id) &&
3892 (ap->cbl == ATA_CBL_PATA_UNK ||
3893 ap->cbl == ATA_CBL_PATA80))) {
3894 ata_dev_printk(dev, KERN_WARNING,
3895 "limited to UDMA/33 due to 40-wire cable\n");
3896 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3897 }
3898
565083e1
TH
3899 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3900 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
3901}
3902
1da177e4
LT
3903/**
3904 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
3905 * @dev: Device to which command will be sent
3906 *
780a87f7
JG
3907 * Issue SET FEATURES - XFER MODE command to device @dev
3908 * on port @ap.
3909 *
1da177e4 3910 * LOCKING:
0cba632b 3911 * PCI/etc. bus probe sem.
83206a29
TH
3912 *
3913 * RETURNS:
3914 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
3915 */
3916
3373efd8 3917static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 3918{
a0123703 3919 struct ata_taskfile tf;
83206a29 3920 unsigned int err_mask;
1da177e4
LT
3921
3922 /* set up set-features taskfile */
3923 DPRINTK("set features - xfer mode\n");
3924
3373efd8 3925 ata_tf_init(dev, &tf);
a0123703
TH
3926 tf.command = ATA_CMD_SET_FEATURES;
3927 tf.feature = SETFEATURES_XFER;
3928 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3929 tf.protocol = ATA_PROT_NODATA;
3930 tf.nsect = dev->xfer_mode;
1da177e4 3931
3373efd8 3932 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1da177e4 3933
83206a29
TH
3934 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3935 return err_mask;
1da177e4
LT
3936}
3937
8bf62ece
AL
3938/**
3939 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 3940 * @dev: Device to which command will be sent
e2a7f77a
RD
3941 * @heads: Number of heads (taskfile parameter)
3942 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
3943 *
3944 * LOCKING:
6aff8f1f
TH
3945 * Kernel thread context (may sleep)
3946 *
3947 * RETURNS:
3948 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 3949 */
3373efd8
TH
3950static unsigned int ata_dev_init_params(struct ata_device *dev,
3951 u16 heads, u16 sectors)
8bf62ece 3952{
a0123703 3953 struct ata_taskfile tf;
6aff8f1f 3954 unsigned int err_mask;
8bf62ece
AL
3955
3956 /* Number of sectors per track 1-255. Number of heads 1-16 */
3957 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 3958 return AC_ERR_INVALID;
8bf62ece
AL
3959
3960 /* set up init dev params taskfile */
3961 DPRINTK("init dev params \n");
3962
3373efd8 3963 ata_tf_init(dev, &tf);
a0123703
TH
3964 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3965 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3966 tf.protocol = ATA_PROT_NODATA;
3967 tf.nsect = sectors;
3968 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 3969
3373efd8 3970 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
8bf62ece 3971
6aff8f1f
TH
3972 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3973 return err_mask;
8bf62ece
AL
3974}
3975
1da177e4 3976/**
0cba632b
JG
3977 * ata_sg_clean - Unmap DMA memory associated with command
3978 * @qc: Command containing DMA memory to be released
3979 *
3980 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
3981 *
3982 * LOCKING:
cca3974e 3983 * spin_lock_irqsave(host lock)
1da177e4 3984 */
70e6ad0c 3985void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
3986{
3987 struct ata_port *ap = qc->ap;
cedc9a47 3988 struct scatterlist *sg = qc->__sg;
1da177e4 3989 int dir = qc->dma_dir;
cedc9a47 3990 void *pad_buf = NULL;
1da177e4 3991
a4631474
TH
3992 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
3993 WARN_ON(sg == NULL);
1da177e4
LT
3994
3995 if (qc->flags & ATA_QCFLAG_SINGLE)
f131883e 3996 WARN_ON(qc->n_elem > 1);
1da177e4 3997
2c13b7ce 3998 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 3999
cedc9a47
JG
4000 /* if we padded the buffer out to 32-bit bound, and data
4001 * xfer direction is from-device, we must copy from the
4002 * pad buffer back into the supplied buffer
4003 */
4004 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4005 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4006
4007 if (qc->flags & ATA_QCFLAG_SG) {
e1410f2d 4008 if (qc->n_elem)
2f1f610b 4009 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
cedc9a47
JG
4010 /* restore last sg */
4011 sg[qc->orig_n_elem - 1].length += qc->pad_len;
4012 if (pad_buf) {
4013 struct scatterlist *psg = &qc->pad_sgent;
4014 void *addr = kmap_atomic(psg->page, KM_IRQ0);
4015 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
dfa15988 4016 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4017 }
4018 } else {
2e242fa9 4019 if (qc->n_elem)
2f1f610b 4020 dma_unmap_single(ap->dev,
e1410f2d
JG
4021 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
4022 dir);
cedc9a47
JG
4023 /* restore sg */
4024 sg->length += qc->pad_len;
4025 if (pad_buf)
4026 memcpy(qc->buf_virt + sg->length - qc->pad_len,
4027 pad_buf, qc->pad_len);
4028 }
1da177e4
LT
4029
4030 qc->flags &= ~ATA_QCFLAG_DMAMAP;
cedc9a47 4031 qc->__sg = NULL;
1da177e4
LT
4032}
4033
4034/**
4035 * ata_fill_sg - Fill PCI IDE PRD table
4036 * @qc: Metadata associated with taskfile to be transferred
4037 *
780a87f7
JG
4038 * Fill PCI IDE PRD (scatter-gather) table with segments
4039 * associated with the current disk command.
4040 *
1da177e4 4041 * LOCKING:
cca3974e 4042 * spin_lock_irqsave(host lock)
1da177e4
LT
4043 *
4044 */
4045static void ata_fill_sg(struct ata_queued_cmd *qc)
4046{
1da177e4 4047 struct ata_port *ap = qc->ap;
cedc9a47
JG
4048 struct scatterlist *sg;
4049 unsigned int idx;
1da177e4 4050
a4631474 4051 WARN_ON(qc->__sg == NULL);
f131883e 4052 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1da177e4
LT
4053
4054 idx = 0;
cedc9a47 4055 ata_for_each_sg(sg, qc) {
1da177e4
LT
4056 u32 addr, offset;
4057 u32 sg_len, len;
4058
4059 /* determine if physical DMA addr spans 64K boundary.
4060 * Note h/w doesn't support 64-bit, so we unconditionally
4061 * truncate dma_addr_t to u32.
4062 */
4063 addr = (u32) sg_dma_address(sg);
4064 sg_len = sg_dma_len(sg);
4065
4066 while (sg_len) {
4067 offset = addr & 0xffff;
4068 len = sg_len;
4069 if ((offset + sg_len) > 0x10000)
4070 len = 0x10000 - offset;
4071
4072 ap->prd[idx].addr = cpu_to_le32(addr);
4073 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
4074 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4075
4076 idx++;
4077 sg_len -= len;
4078 addr += len;
4079 }
4080 }
4081
4082 if (idx)
4083 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4084}
4085/**
4086 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4087 * @qc: Metadata associated with taskfile to check
4088 *
780a87f7
JG
4089 * Allow low-level driver to filter ATA PACKET commands, returning
4090 * a status indicating whether or not it is OK to use DMA for the
4091 * supplied PACKET command.
4092 *
1da177e4 4093 * LOCKING:
cca3974e 4094 * spin_lock_irqsave(host lock)
0cba632b 4095 *
1da177e4
LT
4096 * RETURNS: 0 when ATAPI DMA can be used
4097 * nonzero otherwise
4098 */
4099int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4100{
4101 struct ata_port *ap = qc->ap;
4102 int rc = 0; /* Assume ATAPI DMA is OK by default */
4103
6f23a31d
AL
4104 /* some drives can only do ATAPI DMA on read/write */
4105 if (unlikely(qc->dev->horkage & ATA_HORKAGE_DMA_RW_ONLY)) {
4106 struct scsi_cmnd *cmd = qc->scsicmd;
4107 u8 *scsicmd = cmd->cmnd;
4108
4109 switch (scsicmd[0]) {
4110 case READ_10:
4111 case WRITE_10:
4112 case READ_12:
4113 case WRITE_12:
4114 case READ_6:
4115 case WRITE_6:
4116 /* atapi dma maybe ok */
4117 break;
4118 default:
4119 /* turn off atapi dma */
4120 return 1;
4121 }
4122 }
4123
1da177e4
LT
4124 if (ap->ops->check_atapi_dma)
4125 rc = ap->ops->check_atapi_dma(qc);
4126
4127 return rc;
4128}
4129/**
4130 * ata_qc_prep - Prepare taskfile for submission
4131 * @qc: Metadata associated with taskfile to be prepared
4132 *
780a87f7
JG
4133 * Prepare ATA taskfile for submission.
4134 *
1da177e4 4135 * LOCKING:
cca3974e 4136 * spin_lock_irqsave(host lock)
1da177e4
LT
4137 */
4138void ata_qc_prep(struct ata_queued_cmd *qc)
4139{
4140 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4141 return;
4142
4143 ata_fill_sg(qc);
4144}
4145
e46834cd
BK
4146void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4147
0cba632b
JG
4148/**
4149 * ata_sg_init_one - Associate command with memory buffer
4150 * @qc: Command to be associated
4151 * @buf: Memory buffer
4152 * @buflen: Length of memory buffer, in bytes.
4153 *
4154 * Initialize the data-related elements of queued_cmd @qc
4155 * to point to a single memory buffer, @buf of byte length @buflen.
4156 *
4157 * LOCKING:
cca3974e 4158 * spin_lock_irqsave(host lock)
0cba632b
JG
4159 */
4160
1da177e4
LT
4161void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4162{
1da177e4
LT
4163 qc->flags |= ATA_QCFLAG_SINGLE;
4164
cedc9a47 4165 qc->__sg = &qc->sgent;
1da177e4 4166 qc->n_elem = 1;
cedc9a47 4167 qc->orig_n_elem = 1;
1da177e4 4168 qc->buf_virt = buf;
233277ca 4169 qc->nbytes = buflen;
1da177e4 4170
61c0596c 4171 sg_init_one(&qc->sgent, buf, buflen);
1da177e4
LT
4172}
4173
0cba632b
JG
4174/**
4175 * ata_sg_init - Associate command with scatter-gather table.
4176 * @qc: Command to be associated
4177 * @sg: Scatter-gather table.
4178 * @n_elem: Number of elements in s/g table.
4179 *
4180 * Initialize the data-related elements of queued_cmd @qc
4181 * to point to a scatter-gather table @sg, containing @n_elem
4182 * elements.
4183 *
4184 * LOCKING:
cca3974e 4185 * spin_lock_irqsave(host lock)
0cba632b
JG
4186 */
4187
1da177e4
LT
4188void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4189 unsigned int n_elem)
4190{
4191 qc->flags |= ATA_QCFLAG_SG;
cedc9a47 4192 qc->__sg = sg;
1da177e4 4193 qc->n_elem = n_elem;
cedc9a47 4194 qc->orig_n_elem = n_elem;
1da177e4
LT
4195}
4196
4197/**
0cba632b
JG
4198 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4199 * @qc: Command with memory buffer to be mapped.
4200 *
4201 * DMA-map the memory buffer associated with queued_cmd @qc.
1da177e4
LT
4202 *
4203 * LOCKING:
cca3974e 4204 * spin_lock_irqsave(host lock)
1da177e4
LT
4205 *
4206 * RETURNS:
0cba632b 4207 * Zero on success, negative on error.
1da177e4
LT
4208 */
4209
4210static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4211{
4212 struct ata_port *ap = qc->ap;
4213 int dir = qc->dma_dir;
cedc9a47 4214 struct scatterlist *sg = qc->__sg;
1da177e4 4215 dma_addr_t dma_address;
2e242fa9 4216 int trim_sg = 0;
1da177e4 4217
cedc9a47
JG
4218 /* we must lengthen transfers to end on a 32-bit boundary */
4219 qc->pad_len = sg->length & 3;
4220 if (qc->pad_len) {
4221 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4222 struct scatterlist *psg = &qc->pad_sgent;
4223
a4631474 4224 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4225
4226 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4227
4228 if (qc->tf.flags & ATA_TFLAG_WRITE)
4229 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
4230 qc->pad_len);
4231
4232 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4233 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4234 /* trim sg */
4235 sg->length -= qc->pad_len;
2e242fa9
TH
4236 if (sg->length == 0)
4237 trim_sg = 1;
cedc9a47
JG
4238
4239 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4240 sg->length, qc->pad_len);
4241 }
4242
2e242fa9
TH
4243 if (trim_sg) {
4244 qc->n_elem--;
e1410f2d
JG
4245 goto skip_map;
4246 }
4247
2f1f610b 4248 dma_address = dma_map_single(ap->dev, qc->buf_virt,
32529e01 4249 sg->length, dir);
537a95d9
TH
4250 if (dma_mapping_error(dma_address)) {
4251 /* restore sg */
4252 sg->length += qc->pad_len;
1da177e4 4253 return -1;
537a95d9 4254 }
1da177e4
LT
4255
4256 sg_dma_address(sg) = dma_address;
32529e01 4257 sg_dma_len(sg) = sg->length;
1da177e4 4258
2e242fa9 4259skip_map:
1da177e4
LT
4260 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
4261 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4262
4263 return 0;
4264}
4265
4266/**
0cba632b
JG
4267 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4268 * @qc: Command with scatter-gather table to be mapped.
4269 *
4270 * DMA-map the scatter-gather table associated with queued_cmd @qc.
1da177e4
LT
4271 *
4272 * LOCKING:
cca3974e 4273 * spin_lock_irqsave(host lock)
1da177e4
LT
4274 *
4275 * RETURNS:
0cba632b 4276 * Zero on success, negative on error.
1da177e4
LT
4277 *
4278 */
4279
4280static int ata_sg_setup(struct ata_queued_cmd *qc)
4281{
4282 struct ata_port *ap = qc->ap;
cedc9a47
JG
4283 struct scatterlist *sg = qc->__sg;
4284 struct scatterlist *lsg = &sg[qc->n_elem - 1];
e1410f2d 4285 int n_elem, pre_n_elem, dir, trim_sg = 0;
1da177e4 4286
44877b4e 4287 VPRINTK("ENTER, ata%u\n", ap->print_id);
a4631474 4288 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
1da177e4 4289
cedc9a47
JG
4290 /* we must lengthen transfers to end on a 32-bit boundary */
4291 qc->pad_len = lsg->length & 3;
4292 if (qc->pad_len) {
4293 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4294 struct scatterlist *psg = &qc->pad_sgent;
4295 unsigned int offset;
4296
a4631474 4297 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4298
4299 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4300
4301 /*
4302 * psg->page/offset are used to copy to-be-written
4303 * data in this function or read data in ata_sg_clean.
4304 */
4305 offset = lsg->offset + lsg->length - qc->pad_len;
4306 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
4307 psg->offset = offset_in_page(offset);
4308
4309 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4310 void *addr = kmap_atomic(psg->page, KM_IRQ0);
4311 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
dfa15988 4312 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4313 }
4314
4315 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4316 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4317 /* trim last sg */
4318 lsg->length -= qc->pad_len;
e1410f2d
JG
4319 if (lsg->length == 0)
4320 trim_sg = 1;
cedc9a47
JG
4321
4322 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4323 qc->n_elem - 1, lsg->length, qc->pad_len);
4324 }
4325
e1410f2d
JG
4326 pre_n_elem = qc->n_elem;
4327 if (trim_sg && pre_n_elem)
4328 pre_n_elem--;
4329
4330 if (!pre_n_elem) {
4331 n_elem = 0;
4332 goto skip_map;
4333 }
4334
1da177e4 4335 dir = qc->dma_dir;
2f1f610b 4336 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
537a95d9
TH
4337 if (n_elem < 1) {
4338 /* restore last sg */
4339 lsg->length += qc->pad_len;
1da177e4 4340 return -1;
537a95d9 4341 }
1da177e4
LT
4342
4343 DPRINTK("%d sg elements mapped\n", n_elem);
4344
e1410f2d 4345skip_map:
1da177e4
LT
4346 qc->n_elem = n_elem;
4347
4348 return 0;
4349}
4350
0baab86b 4351/**
c893a3ae 4352 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
4353 * @buf: Buffer to swap
4354 * @buf_words: Number of 16-bit words in buffer.
4355 *
4356 * Swap halves of 16-bit words if needed to convert from
4357 * little-endian byte order to native cpu byte order, or
4358 * vice-versa.
4359 *
4360 * LOCKING:
6f0ef4fa 4361 * Inherited from caller.
0baab86b 4362 */
1da177e4
LT
4363void swap_buf_le16(u16 *buf, unsigned int buf_words)
4364{
4365#ifdef __BIG_ENDIAN
4366 unsigned int i;
4367
4368 for (i = 0; i < buf_words; i++)
4369 buf[i] = le16_to_cpu(buf[i]);
4370#endif /* __BIG_ENDIAN */
4371}
4372
6ae4cfb5 4373/**
0d5ff566 4374 * ata_data_xfer - Transfer data by PIO
a6b2c5d4 4375 * @adev: device to target
6ae4cfb5
AL
4376 * @buf: data buffer
4377 * @buflen: buffer length
344babaa 4378 * @write_data: read/write
6ae4cfb5
AL
4379 *
4380 * Transfer data from/to the device data register by PIO.
4381 *
4382 * LOCKING:
4383 * Inherited from caller.
6ae4cfb5 4384 */
0d5ff566
TH
4385void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
4386 unsigned int buflen, int write_data)
1da177e4 4387{
a6b2c5d4 4388 struct ata_port *ap = adev->ap;
6ae4cfb5 4389 unsigned int words = buflen >> 1;
1da177e4 4390
6ae4cfb5 4391 /* Transfer multiple of 2 bytes */
1da177e4 4392 if (write_data)
0d5ff566 4393 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
1da177e4 4394 else
0d5ff566 4395 ioread16_rep(ap->ioaddr.data_addr, buf, words);
6ae4cfb5
AL
4396
4397 /* Transfer trailing 1 byte, if any. */
4398 if (unlikely(buflen & 0x01)) {
4399 u16 align_buf[1] = { 0 };
4400 unsigned char *trailing_buf = buf + buflen - 1;
4401
4402 if (write_data) {
4403 memcpy(align_buf, trailing_buf, 1);
0d5ff566 4404 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
6ae4cfb5 4405 } else {
0d5ff566 4406 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
6ae4cfb5
AL
4407 memcpy(trailing_buf, align_buf, 1);
4408 }
4409 }
1da177e4
LT
4410}
4411
75e99585 4412/**
0d5ff566 4413 * ata_data_xfer_noirq - Transfer data by PIO
75e99585
AC
4414 * @adev: device to target
4415 * @buf: data buffer
4416 * @buflen: buffer length
4417 * @write_data: read/write
4418 *
88574551 4419 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
4420 * transfer with interrupts disabled.
4421 *
4422 * LOCKING:
4423 * Inherited from caller.
4424 */
0d5ff566
TH
4425void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
4426 unsigned int buflen, int write_data)
75e99585
AC
4427{
4428 unsigned long flags;
4429 local_irq_save(flags);
0d5ff566 4430 ata_data_xfer(adev, buf, buflen, write_data);
75e99585
AC
4431 local_irq_restore(flags);
4432}
4433
4434
6ae4cfb5 4435/**
5a5dbd18 4436 * ata_pio_sector - Transfer a sector of data.
6ae4cfb5
AL
4437 * @qc: Command on going
4438 *
5a5dbd18 4439 * Transfer qc->sect_size bytes of data from/to the ATA device.
6ae4cfb5
AL
4440 *
4441 * LOCKING:
4442 * Inherited from caller.
4443 */
4444
1da177e4
LT
4445static void ata_pio_sector(struct ata_queued_cmd *qc)
4446{
4447 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4448 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4449 struct ata_port *ap = qc->ap;
4450 struct page *page;
4451 unsigned int offset;
4452 unsigned char *buf;
4453
5a5dbd18 4454 if (qc->curbytes == qc->nbytes - qc->sect_size)
14be71f4 4455 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4456
4457 page = sg[qc->cursg].page;
726f0785 4458 offset = sg[qc->cursg].offset + qc->cursg_ofs;
1da177e4
LT
4459
4460 /* get the current page and offset */
4461 page = nth_page(page, (offset >> PAGE_SHIFT));
4462 offset %= PAGE_SIZE;
4463
1da177e4
LT
4464 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4465
91b8b313
AL
4466 if (PageHighMem(page)) {
4467 unsigned long flags;
4468
a6b2c5d4 4469 /* FIXME: use a bounce buffer */
91b8b313
AL
4470 local_irq_save(flags);
4471 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4472
91b8b313 4473 /* do the actual data transfer */
5a5dbd18 4474 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
1da177e4 4475
91b8b313
AL
4476 kunmap_atomic(buf, KM_IRQ0);
4477 local_irq_restore(flags);
4478 } else {
4479 buf = page_address(page);
5a5dbd18 4480 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
91b8b313 4481 }
1da177e4 4482
5a5dbd18
ML
4483 qc->curbytes += qc->sect_size;
4484 qc->cursg_ofs += qc->sect_size;
1da177e4 4485
726f0785 4486 if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
1da177e4
LT
4487 qc->cursg++;
4488 qc->cursg_ofs = 0;
4489 }
1da177e4 4490}
1da177e4 4491
07f6f7d0 4492/**
5a5dbd18 4493 * ata_pio_sectors - Transfer one or many sectors.
07f6f7d0
AL
4494 * @qc: Command on going
4495 *
5a5dbd18 4496 * Transfer one or many sectors of data from/to the
07f6f7d0
AL
4497 * ATA device for the DRQ request.
4498 *
4499 * LOCKING:
4500 * Inherited from caller.
4501 */
1da177e4 4502
07f6f7d0
AL
4503static void ata_pio_sectors(struct ata_queued_cmd *qc)
4504{
4505 if (is_multi_taskfile(&qc->tf)) {
4506 /* READ/WRITE MULTIPLE */
4507 unsigned int nsect;
4508
587005de 4509 WARN_ON(qc->dev->multi_count == 0);
1da177e4 4510
5a5dbd18 4511 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
726f0785 4512 qc->dev->multi_count);
07f6f7d0
AL
4513 while (nsect--)
4514 ata_pio_sector(qc);
4515 } else
4516 ata_pio_sector(qc);
4517}
4518
c71c1857
AL
4519/**
4520 * atapi_send_cdb - Write CDB bytes to hardware
4521 * @ap: Port to which ATAPI device is attached.
4522 * @qc: Taskfile currently active
4523 *
4524 * When device has indicated its readiness to accept
4525 * a CDB, this function is called. Send the CDB.
4526 *
4527 * LOCKING:
4528 * caller.
4529 */
4530
4531static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4532{
4533 /* send SCSI cdb */
4534 DPRINTK("send cdb\n");
db024d53 4535 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 4536
a6b2c5d4 4537 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
4538 ata_altstatus(ap); /* flush */
4539
4540 switch (qc->tf.protocol) {
4541 case ATA_PROT_ATAPI:
4542 ap->hsm_task_state = HSM_ST;
4543 break;
4544 case ATA_PROT_ATAPI_NODATA:
4545 ap->hsm_task_state = HSM_ST_LAST;
4546 break;
4547 case ATA_PROT_ATAPI_DMA:
4548 ap->hsm_task_state = HSM_ST_LAST;
4549 /* initiate bmdma */
4550 ap->ops->bmdma_start(qc);
4551 break;
4552 }
1da177e4
LT
4553}
4554
6ae4cfb5
AL
4555/**
4556 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
4557 * @qc: Command on going
4558 * @bytes: number of bytes
4559 *
4560 * Transfer Transfer data from/to the ATAPI device.
4561 *
4562 * LOCKING:
4563 * Inherited from caller.
4564 *
4565 */
4566
1da177e4
LT
4567static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4568{
4569 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4570 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4571 struct ata_port *ap = qc->ap;
4572 struct page *page;
4573 unsigned char *buf;
4574 unsigned int offset, count;
4575
563a6e1f 4576 if (qc->curbytes + bytes >= qc->nbytes)
14be71f4 4577 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4578
4579next_sg:
563a6e1f 4580 if (unlikely(qc->cursg >= qc->n_elem)) {
7fb6ec28 4581 /*
563a6e1f
AL
4582 * The end of qc->sg is reached and the device expects
4583 * more data to transfer. In order not to overrun qc->sg
4584 * and fulfill length specified in the byte count register,
4585 * - for read case, discard trailing data from the device
4586 * - for write case, padding zero data to the device
4587 */
4588 u16 pad_buf[1] = { 0 };
4589 unsigned int words = bytes >> 1;
4590 unsigned int i;
4591
4592 if (words) /* warning if bytes > 1 */
f15a1daf
TH
4593 ata_dev_printk(qc->dev, KERN_WARNING,
4594 "%u bytes trailing data\n", bytes);
563a6e1f
AL
4595
4596 for (i = 0; i < words; i++)
a6b2c5d4 4597 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
563a6e1f 4598
14be71f4 4599 ap->hsm_task_state = HSM_ST_LAST;
563a6e1f
AL
4600 return;
4601 }
4602
cedc9a47 4603 sg = &qc->__sg[qc->cursg];
1da177e4 4604
1da177e4
LT
4605 page = sg->page;
4606 offset = sg->offset + qc->cursg_ofs;
4607
4608 /* get the current page and offset */
4609 page = nth_page(page, (offset >> PAGE_SHIFT));
4610 offset %= PAGE_SIZE;
4611
6952df03 4612 /* don't overrun current sg */
32529e01 4613 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
4614
4615 /* don't cross page boundaries */
4616 count = min(count, (unsigned int)PAGE_SIZE - offset);
4617
7282aa4b
AL
4618 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4619
91b8b313
AL
4620 if (PageHighMem(page)) {
4621 unsigned long flags;
4622
a6b2c5d4 4623 /* FIXME: use bounce buffer */
91b8b313
AL
4624 local_irq_save(flags);
4625 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4626
91b8b313 4627 /* do the actual data transfer */
a6b2c5d4 4628 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
7282aa4b 4629
91b8b313
AL
4630 kunmap_atomic(buf, KM_IRQ0);
4631 local_irq_restore(flags);
4632 } else {
4633 buf = page_address(page);
a6b2c5d4 4634 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
91b8b313 4635 }
1da177e4
LT
4636
4637 bytes -= count;
4638 qc->curbytes += count;
4639 qc->cursg_ofs += count;
4640
32529e01 4641 if (qc->cursg_ofs == sg->length) {
1da177e4
LT
4642 qc->cursg++;
4643 qc->cursg_ofs = 0;
4644 }
4645
563a6e1f 4646 if (bytes)
1da177e4 4647 goto next_sg;
1da177e4
LT
4648}
4649
6ae4cfb5
AL
4650/**
4651 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
4652 * @qc: Command on going
4653 *
4654 * Transfer Transfer data from/to the ATAPI device.
4655 *
4656 * LOCKING:
4657 * Inherited from caller.
6ae4cfb5
AL
4658 */
4659
1da177e4
LT
4660static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4661{
4662 struct ata_port *ap = qc->ap;
4663 struct ata_device *dev = qc->dev;
4664 unsigned int ireason, bc_lo, bc_hi, bytes;
4665 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4666
eec4c3f3
AL
4667 /* Abuse qc->result_tf for temp storage of intermediate TF
4668 * here to save some kernel stack usage.
4669 * For normal completion, qc->result_tf is not relevant. For
4670 * error, qc->result_tf is later overwritten by ata_qc_complete().
4671 * So, the correctness of qc->result_tf is not affected.
4672 */
4673 ap->ops->tf_read(ap, &qc->result_tf);
4674 ireason = qc->result_tf.nsect;
4675 bc_lo = qc->result_tf.lbam;
4676 bc_hi = qc->result_tf.lbah;
1da177e4
LT
4677 bytes = (bc_hi << 8) | bc_lo;
4678
4679 /* shall be cleared to zero, indicating xfer of data */
4680 if (ireason & (1 << 0))
4681 goto err_out;
4682
4683 /* make sure transfer direction matches expected */
4684 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4685 if (do_write != i_write)
4686 goto err_out;
4687
44877b4e 4688 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
312f7da2 4689
1da177e4
LT
4690 __atapi_pio_bytes(qc, bytes);
4691
4692 return;
4693
4694err_out:
f15a1daf 4695 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
11a56d24 4696 qc->err_mask |= AC_ERR_HSM;
14be71f4 4697 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
4698}
4699
4700/**
c234fb00
AL
4701 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4702 * @ap: the target ata_port
4703 * @qc: qc on going
1da177e4 4704 *
c234fb00
AL
4705 * RETURNS:
4706 * 1 if ok in workqueue, 0 otherwise.
1da177e4 4707 */
c234fb00
AL
4708
4709static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 4710{
c234fb00
AL
4711 if (qc->tf.flags & ATA_TFLAG_POLLING)
4712 return 1;
1da177e4 4713
c234fb00
AL
4714 if (ap->hsm_task_state == HSM_ST_FIRST) {
4715 if (qc->tf.protocol == ATA_PROT_PIO &&
4716 (qc->tf.flags & ATA_TFLAG_WRITE))
4717 return 1;
1da177e4 4718
c234fb00
AL
4719 if (is_atapi_taskfile(&qc->tf) &&
4720 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4721 return 1;
fe79e683
AL
4722 }
4723
c234fb00
AL
4724 return 0;
4725}
1da177e4 4726
c17ea20d
TH
4727/**
4728 * ata_hsm_qc_complete - finish a qc running on standard HSM
4729 * @qc: Command to complete
4730 * @in_wq: 1 if called from workqueue, 0 otherwise
4731 *
4732 * Finish @qc which is running on standard HSM.
4733 *
4734 * LOCKING:
cca3974e 4735 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
4736 * Otherwise, none on entry and grabs host lock.
4737 */
4738static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4739{
4740 struct ata_port *ap = qc->ap;
4741 unsigned long flags;
4742
4743 if (ap->ops->error_handler) {
4744 if (in_wq) {
ba6a1308 4745 spin_lock_irqsave(ap->lock, flags);
c17ea20d 4746
cca3974e
JG
4747 /* EH might have kicked in while host lock is
4748 * released.
c17ea20d
TH
4749 */
4750 qc = ata_qc_from_tag(ap, qc->tag);
4751 if (qc) {
4752 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
83625006 4753 ap->ops->irq_on(ap);
c17ea20d
TH
4754 ata_qc_complete(qc);
4755 } else
4756 ata_port_freeze(ap);
4757 }
4758
ba6a1308 4759 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4760 } else {
4761 if (likely(!(qc->err_mask & AC_ERR_HSM)))
4762 ata_qc_complete(qc);
4763 else
4764 ata_port_freeze(ap);
4765 }
4766 } else {
4767 if (in_wq) {
ba6a1308 4768 spin_lock_irqsave(ap->lock, flags);
83625006 4769 ap->ops->irq_on(ap);
c17ea20d 4770 ata_qc_complete(qc);
ba6a1308 4771 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4772 } else
4773 ata_qc_complete(qc);
4774 }
1da177e4 4775
c81e29b4 4776 ata_altstatus(ap); /* flush */
c17ea20d
TH
4777}
4778
bb5cb290
AL
4779/**
4780 * ata_hsm_move - move the HSM to the next state.
4781 * @ap: the target ata_port
4782 * @qc: qc on going
4783 * @status: current device status
4784 * @in_wq: 1 if called from workqueue, 0 otherwise
4785 *
4786 * RETURNS:
4787 * 1 when poll next status needed, 0 otherwise.
4788 */
9a1004d0
TH
4789int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4790 u8 status, int in_wq)
e2cec771 4791{
bb5cb290
AL
4792 unsigned long flags = 0;
4793 int poll_next;
4794
6912ccd5
AL
4795 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4796
bb5cb290
AL
4797 /* Make sure ata_qc_issue_prot() does not throw things
4798 * like DMA polling into the workqueue. Notice that
4799 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4800 */
c234fb00 4801 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 4802
e2cec771 4803fsm_start:
999bb6f4 4804 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
44877b4e 4805 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
999bb6f4 4806
e2cec771
AL
4807 switch (ap->hsm_task_state) {
4808 case HSM_ST_FIRST:
bb5cb290
AL
4809 /* Send first data block or PACKET CDB */
4810
4811 /* If polling, we will stay in the work queue after
4812 * sending the data. Otherwise, interrupt handler
4813 * takes over after sending the data.
4814 */
4815 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4816
e2cec771 4817 /* check device status */
3655d1d3
AL
4818 if (unlikely((status & ATA_DRQ) == 0)) {
4819 /* handle BSY=0, DRQ=0 as error */
4820 if (likely(status & (ATA_ERR | ATA_DF)))
4821 /* device stops HSM for abort/error */
4822 qc->err_mask |= AC_ERR_DEV;
4823 else
4824 /* HSM violation. Let EH handle this */
4825 qc->err_mask |= AC_ERR_HSM;
4826
14be71f4 4827 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 4828 goto fsm_start;
1da177e4
LT
4829 }
4830
71601958
AL
4831 /* Device should not ask for data transfer (DRQ=1)
4832 * when it finds something wrong.
eee6c32f
AL
4833 * We ignore DRQ here and stop the HSM by
4834 * changing hsm_task_state to HSM_ST_ERR and
4835 * let the EH abort the command or reset the device.
71601958
AL
4836 */
4837 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
4838 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
4839 "error, dev_stat 0x%X\n", status);
3655d1d3 4840 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4841 ap->hsm_task_state = HSM_ST_ERR;
4842 goto fsm_start;
71601958 4843 }
1da177e4 4844
bb5cb290
AL
4845 /* Send the CDB (atapi) or the first data block (ata pio out).
4846 * During the state transition, interrupt handler shouldn't
4847 * be invoked before the data transfer is complete and
4848 * hsm_task_state is changed. Hence, the following locking.
4849 */
4850 if (in_wq)
ba6a1308 4851 spin_lock_irqsave(ap->lock, flags);
1da177e4 4852
bb5cb290
AL
4853 if (qc->tf.protocol == ATA_PROT_PIO) {
4854 /* PIO data out protocol.
4855 * send first data block.
4856 */
0565c26d 4857
bb5cb290
AL
4858 /* ata_pio_sectors() might change the state
4859 * to HSM_ST_LAST. so, the state is changed here
4860 * before ata_pio_sectors().
4861 */
4862 ap->hsm_task_state = HSM_ST;
4863 ata_pio_sectors(qc);
4864 ata_altstatus(ap); /* flush */
4865 } else
4866 /* send CDB */
4867 atapi_send_cdb(ap, qc);
4868
4869 if (in_wq)
ba6a1308 4870 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
4871
4872 /* if polling, ata_pio_task() handles the rest.
4873 * otherwise, interrupt handler takes over from here.
4874 */
e2cec771 4875 break;
1c848984 4876
e2cec771
AL
4877 case HSM_ST:
4878 /* complete command or read/write the data register */
4879 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4880 /* ATAPI PIO protocol */
4881 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
4882 /* No more data to transfer or device error.
4883 * Device error will be tagged in HSM_ST_LAST.
4884 */
e2cec771
AL
4885 ap->hsm_task_state = HSM_ST_LAST;
4886 goto fsm_start;
4887 }
1da177e4 4888
71601958
AL
4889 /* Device should not ask for data transfer (DRQ=1)
4890 * when it finds something wrong.
eee6c32f
AL
4891 * We ignore DRQ here and stop the HSM by
4892 * changing hsm_task_state to HSM_ST_ERR and
4893 * let the EH abort the command or reset the device.
71601958
AL
4894 */
4895 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
4896 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
4897 "device error, dev_stat 0x%X\n",
4898 status);
3655d1d3 4899 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4900 ap->hsm_task_state = HSM_ST_ERR;
4901 goto fsm_start;
71601958 4902 }
1da177e4 4903
e2cec771 4904 atapi_pio_bytes(qc);
7fb6ec28 4905
e2cec771
AL
4906 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4907 /* bad ireason reported by device */
4908 goto fsm_start;
1da177e4 4909
e2cec771
AL
4910 } else {
4911 /* ATA PIO protocol */
4912 if (unlikely((status & ATA_DRQ) == 0)) {
4913 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
4914 if (likely(status & (ATA_ERR | ATA_DF)))
4915 /* device stops HSM for abort/error */
4916 qc->err_mask |= AC_ERR_DEV;
4917 else
55a8e2c8
TH
4918 /* HSM violation. Let EH handle this.
4919 * Phantom devices also trigger this
4920 * condition. Mark hint.
4921 */
4922 qc->err_mask |= AC_ERR_HSM |
4923 AC_ERR_NODEV_HINT;
3655d1d3 4924
e2cec771
AL
4925 ap->hsm_task_state = HSM_ST_ERR;
4926 goto fsm_start;
4927 }
1da177e4 4928
eee6c32f
AL
4929 /* For PIO reads, some devices may ask for
4930 * data transfer (DRQ=1) alone with ERR=1.
4931 * We respect DRQ here and transfer one
4932 * block of junk data before changing the
4933 * hsm_task_state to HSM_ST_ERR.
4934 *
4935 * For PIO writes, ERR=1 DRQ=1 doesn't make
4936 * sense since the data block has been
4937 * transferred to the device.
71601958
AL
4938 */
4939 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
4940 /* data might be corrputed */
4941 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
4942
4943 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4944 ata_pio_sectors(qc);
4945 ata_altstatus(ap);
4946 status = ata_wait_idle(ap);
4947 }
4948
3655d1d3
AL
4949 if (status & (ATA_BUSY | ATA_DRQ))
4950 qc->err_mask |= AC_ERR_HSM;
4951
eee6c32f
AL
4952 /* ata_pio_sectors() might change the
4953 * state to HSM_ST_LAST. so, the state
4954 * is changed after ata_pio_sectors().
4955 */
4956 ap->hsm_task_state = HSM_ST_ERR;
4957 goto fsm_start;
71601958
AL
4958 }
4959
e2cec771
AL
4960 ata_pio_sectors(qc);
4961
4962 if (ap->hsm_task_state == HSM_ST_LAST &&
4963 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4964 /* all data read */
4965 ata_altstatus(ap);
52a32205 4966 status = ata_wait_idle(ap);
e2cec771
AL
4967 goto fsm_start;
4968 }
4969 }
4970
4971 ata_altstatus(ap); /* flush */
bb5cb290 4972 poll_next = 1;
1da177e4
LT
4973 break;
4974
14be71f4 4975 case HSM_ST_LAST:
6912ccd5
AL
4976 if (unlikely(!ata_ok(status))) {
4977 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
4978 ap->hsm_task_state = HSM_ST_ERR;
4979 goto fsm_start;
4980 }
4981
4982 /* no more data to transfer */
4332a771 4983 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
44877b4e 4984 ap->print_id, qc->dev->devno, status);
e2cec771 4985
6912ccd5
AL
4986 WARN_ON(qc->err_mask);
4987
e2cec771 4988 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 4989
e2cec771 4990 /* complete taskfile transaction */
c17ea20d 4991 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
4992
4993 poll_next = 0;
1da177e4
LT
4994 break;
4995
14be71f4 4996 case HSM_ST_ERR:
e2cec771
AL
4997 /* make sure qc->err_mask is available to
4998 * know what's wrong and recover
4999 */
5000 WARN_ON(qc->err_mask == 0);
5001
5002 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 5003
999bb6f4 5004 /* complete taskfile transaction */
c17ea20d 5005 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5006
5007 poll_next = 0;
e2cec771
AL
5008 break;
5009 default:
bb5cb290 5010 poll_next = 0;
6912ccd5 5011 BUG();
1da177e4
LT
5012 }
5013
bb5cb290 5014 return poll_next;
1da177e4
LT
5015}
5016
65f27f38 5017static void ata_pio_task(struct work_struct *work)
8061f5f0 5018{
65f27f38
DH
5019 struct ata_port *ap =
5020 container_of(work, struct ata_port, port_task.work);
5021 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 5022 u8 status;
a1af3734 5023 int poll_next;
8061f5f0 5024
7fb6ec28 5025fsm_start:
a1af3734 5026 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 5027
a1af3734
AL
5028 /*
5029 * This is purely heuristic. This is a fast path.
5030 * Sometimes when we enter, BSY will be cleared in
5031 * a chk-status or two. If not, the drive is probably seeking
5032 * or something. Snooze for a couple msecs, then
5033 * chk-status again. If still busy, queue delayed work.
5034 */
5035 status = ata_busy_wait(ap, ATA_BUSY, 5);
5036 if (status & ATA_BUSY) {
5037 msleep(2);
5038 status = ata_busy_wait(ap, ATA_BUSY, 10);
5039 if (status & ATA_BUSY) {
31ce6dae 5040 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
a1af3734
AL
5041 return;
5042 }
8061f5f0
TH
5043 }
5044
a1af3734
AL
5045 /* move the HSM */
5046 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 5047
a1af3734
AL
5048 /* another command or interrupt handler
5049 * may be running at this point.
5050 */
5051 if (poll_next)
7fb6ec28 5052 goto fsm_start;
8061f5f0
TH
5053}
5054
1da177e4
LT
5055/**
5056 * ata_qc_new - Request an available ATA command, for queueing
5057 * @ap: Port associated with device @dev
5058 * @dev: Device from whom we request an available command structure
5059 *
5060 * LOCKING:
0cba632b 5061 * None.
1da177e4
LT
5062 */
5063
5064static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5065{
5066 struct ata_queued_cmd *qc = NULL;
5067 unsigned int i;
5068
e3180499 5069 /* no command while frozen */
b51e9e5d 5070 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
5071 return NULL;
5072
2ab7db1f
TH
5073 /* the last tag is reserved for internal command. */
5074 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 5075 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 5076 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
5077 break;
5078 }
5079
5080 if (qc)
5081 qc->tag = i;
5082
5083 return qc;
5084}
5085
5086/**
5087 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
5088 * @dev: Device from whom we request an available command structure
5089 *
5090 * LOCKING:
0cba632b 5091 * None.
1da177e4
LT
5092 */
5093
3373efd8 5094struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 5095{
3373efd8 5096 struct ata_port *ap = dev->ap;
1da177e4
LT
5097 struct ata_queued_cmd *qc;
5098
5099 qc = ata_qc_new(ap);
5100 if (qc) {
1da177e4
LT
5101 qc->scsicmd = NULL;
5102 qc->ap = ap;
5103 qc->dev = dev;
1da177e4 5104
2c13b7ce 5105 ata_qc_reinit(qc);
1da177e4
LT
5106 }
5107
5108 return qc;
5109}
5110
1da177e4
LT
5111/**
5112 * ata_qc_free - free unused ata_queued_cmd
5113 * @qc: Command to complete
5114 *
5115 * Designed to free unused ata_queued_cmd object
5116 * in case something prevents using it.
5117 *
5118 * LOCKING:
cca3974e 5119 * spin_lock_irqsave(host lock)
1da177e4
LT
5120 */
5121void ata_qc_free(struct ata_queued_cmd *qc)
5122{
4ba946e9
TH
5123 struct ata_port *ap = qc->ap;
5124 unsigned int tag;
5125
a4631474 5126 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 5127
4ba946e9
TH
5128 qc->flags = 0;
5129 tag = qc->tag;
5130 if (likely(ata_tag_valid(tag))) {
4ba946e9 5131 qc->tag = ATA_TAG_POISON;
6cec4a39 5132 clear_bit(tag, &ap->qc_allocated);
4ba946e9 5133 }
1da177e4
LT
5134}
5135
76014427 5136void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 5137{
dedaf2b0
TH
5138 struct ata_port *ap = qc->ap;
5139
a4631474
TH
5140 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5141 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
5142
5143 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5144 ata_sg_clean(qc);
5145
7401abf2 5146 /* command should be marked inactive atomically with qc completion */
dedaf2b0
TH
5147 if (qc->tf.protocol == ATA_PROT_NCQ)
5148 ap->sactive &= ~(1 << qc->tag);
5149 else
5150 ap->active_tag = ATA_TAG_POISON;
7401abf2 5151
3f3791d3
AL
5152 /* atapi: mark qc as inactive to prevent the interrupt handler
5153 * from completing the command twice later, before the error handler
5154 * is called. (when rc != 0 and atapi request sense is needed)
5155 */
5156 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 5157 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 5158
1da177e4 5159 /* call completion callback */
77853bf2 5160 qc->complete_fn(qc);
1da177e4
LT
5161}
5162
39599a53
TH
5163static void fill_result_tf(struct ata_queued_cmd *qc)
5164{
5165 struct ata_port *ap = qc->ap;
5166
39599a53 5167 qc->result_tf.flags = qc->tf.flags;
4742d54f 5168 ap->ops->tf_read(ap, &qc->result_tf);
39599a53
TH
5169}
5170
f686bcb8
TH
5171/**
5172 * ata_qc_complete - Complete an active ATA command
5173 * @qc: Command to complete
5174 * @err_mask: ATA Status register contents
5175 *
5176 * Indicate to the mid and upper layers that an ATA
5177 * command has completed, with either an ok or not-ok status.
5178 *
5179 * LOCKING:
cca3974e 5180 * spin_lock_irqsave(host lock)
f686bcb8
TH
5181 */
5182void ata_qc_complete(struct ata_queued_cmd *qc)
5183{
5184 struct ata_port *ap = qc->ap;
5185
5186 /* XXX: New EH and old EH use different mechanisms to
5187 * synchronize EH with regular execution path.
5188 *
5189 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5190 * Normal execution path is responsible for not accessing a
5191 * failed qc. libata core enforces the rule by returning NULL
5192 * from ata_qc_from_tag() for failed qcs.
5193 *
5194 * Old EH depends on ata_qc_complete() nullifying completion
5195 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5196 * not synchronize with interrupt handler. Only PIO task is
5197 * taken care of.
5198 */
5199 if (ap->ops->error_handler) {
b51e9e5d 5200 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
5201
5202 if (unlikely(qc->err_mask))
5203 qc->flags |= ATA_QCFLAG_FAILED;
5204
5205 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5206 if (!ata_tag_internal(qc->tag)) {
5207 /* always fill result TF for failed qc */
39599a53 5208 fill_result_tf(qc);
f686bcb8
TH
5209 ata_qc_schedule_eh(qc);
5210 return;
5211 }
5212 }
5213
5214 /* read result TF if requested */
5215 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5216 fill_result_tf(qc);
f686bcb8
TH
5217
5218 __ata_qc_complete(qc);
5219 } else {
5220 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5221 return;
5222
5223 /* read result TF if failed or requested */
5224 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5225 fill_result_tf(qc);
f686bcb8
TH
5226
5227 __ata_qc_complete(qc);
5228 }
5229}
5230
dedaf2b0
TH
5231/**
5232 * ata_qc_complete_multiple - Complete multiple qcs successfully
5233 * @ap: port in question
5234 * @qc_active: new qc_active mask
5235 * @finish_qc: LLDD callback invoked before completing a qc
5236 *
5237 * Complete in-flight commands. This functions is meant to be
5238 * called from low-level driver's interrupt routine to complete
5239 * requests normally. ap->qc_active and @qc_active is compared
5240 * and commands are completed accordingly.
5241 *
5242 * LOCKING:
cca3974e 5243 * spin_lock_irqsave(host lock)
dedaf2b0
TH
5244 *
5245 * RETURNS:
5246 * Number of completed commands on success, -errno otherwise.
5247 */
5248int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5249 void (*finish_qc)(struct ata_queued_cmd *))
5250{
5251 int nr_done = 0;
5252 u32 done_mask;
5253 int i;
5254
5255 done_mask = ap->qc_active ^ qc_active;
5256
5257 if (unlikely(done_mask & qc_active)) {
5258 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5259 "(%08x->%08x)\n", ap->qc_active, qc_active);
5260 return -EINVAL;
5261 }
5262
5263 for (i = 0; i < ATA_MAX_QUEUE; i++) {
5264 struct ata_queued_cmd *qc;
5265
5266 if (!(done_mask & (1 << i)))
5267 continue;
5268
5269 if ((qc = ata_qc_from_tag(ap, i))) {
5270 if (finish_qc)
5271 finish_qc(qc);
5272 ata_qc_complete(qc);
5273 nr_done++;
5274 }
5275 }
5276
5277 return nr_done;
5278}
5279
1da177e4
LT
5280static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
5281{
5282 struct ata_port *ap = qc->ap;
5283
5284 switch (qc->tf.protocol) {
3dc1d881 5285 case ATA_PROT_NCQ:
1da177e4
LT
5286 case ATA_PROT_DMA:
5287 case ATA_PROT_ATAPI_DMA:
5288 return 1;
5289
5290 case ATA_PROT_ATAPI:
5291 case ATA_PROT_PIO:
1da177e4
LT
5292 if (ap->flags & ATA_FLAG_PIO_DMA)
5293 return 1;
5294
5295 /* fall through */
5296
5297 default:
5298 return 0;
5299 }
5300
5301 /* never reached */
5302}
5303
5304/**
5305 * ata_qc_issue - issue taskfile to device
5306 * @qc: command to issue to device
5307 *
5308 * Prepare an ATA command to submission to device.
5309 * This includes mapping the data into a DMA-able
5310 * area, filling in the S/G table, and finally
5311 * writing the taskfile to hardware, starting the command.
5312 *
5313 * LOCKING:
cca3974e 5314 * spin_lock_irqsave(host lock)
1da177e4 5315 */
8e0e694a 5316void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
5317{
5318 struct ata_port *ap = qc->ap;
5319
dedaf2b0
TH
5320 /* Make sure only one non-NCQ command is outstanding. The
5321 * check is skipped for old EH because it reuses active qc to
5322 * request ATAPI sense.
5323 */
5324 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
5325
5326 if (qc->tf.protocol == ATA_PROT_NCQ) {
5327 WARN_ON(ap->sactive & (1 << qc->tag));
5328 ap->sactive |= 1 << qc->tag;
5329 } else {
5330 WARN_ON(ap->sactive);
5331 ap->active_tag = qc->tag;
5332 }
5333
e4a70e76 5334 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 5335 ap->qc_active |= 1 << qc->tag;
e4a70e76 5336
1da177e4
LT
5337 if (ata_should_dma_map(qc)) {
5338 if (qc->flags & ATA_QCFLAG_SG) {
5339 if (ata_sg_setup(qc))
8e436af9 5340 goto sg_err;
1da177e4
LT
5341 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
5342 if (ata_sg_setup_one(qc))
8e436af9 5343 goto sg_err;
1da177e4
LT
5344 }
5345 } else {
5346 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5347 }
5348
5349 ap->ops->qc_prep(qc);
5350
8e0e694a
TH
5351 qc->err_mask |= ap->ops->qc_issue(qc);
5352 if (unlikely(qc->err_mask))
5353 goto err;
5354 return;
1da177e4 5355
8e436af9
TH
5356sg_err:
5357 qc->flags &= ~ATA_QCFLAG_DMAMAP;
8e0e694a
TH
5358 qc->err_mask |= AC_ERR_SYSTEM;
5359err:
5360 ata_qc_complete(qc);
1da177e4
LT
5361}
5362
5363/**
5364 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
5365 * @qc: command to issue to device
5366 *
5367 * Using various libata functions and hooks, this function
5368 * starts an ATA command. ATA commands are grouped into
5369 * classes called "protocols", and issuing each type of protocol
5370 * is slightly different.
5371 *
0baab86b
EF
5372 * May be used as the qc_issue() entry in ata_port_operations.
5373 *
1da177e4 5374 * LOCKING:
cca3974e 5375 * spin_lock_irqsave(host lock)
1da177e4
LT
5376 *
5377 * RETURNS:
9a3d9eb0 5378 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
5379 */
5380
9a3d9eb0 5381unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
5382{
5383 struct ata_port *ap = qc->ap;
5384
e50362ec
AL
5385 /* Use polling pio if the LLD doesn't handle
5386 * interrupt driven pio and atapi CDB interrupt.
5387 */
5388 if (ap->flags & ATA_FLAG_PIO_POLLING) {
5389 switch (qc->tf.protocol) {
5390 case ATA_PROT_PIO:
e3472cbe 5391 case ATA_PROT_NODATA:
e50362ec
AL
5392 case ATA_PROT_ATAPI:
5393 case ATA_PROT_ATAPI_NODATA:
5394 qc->tf.flags |= ATA_TFLAG_POLLING;
5395 break;
5396 case ATA_PROT_ATAPI_DMA:
5397 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 5398 /* see ata_dma_blacklisted() */
e50362ec
AL
5399 BUG();
5400 break;
5401 default:
5402 break;
5403 }
5404 }
5405
3d3cca37
TH
5406 /* Some controllers show flaky interrupt behavior after
5407 * setting xfer mode. Use polling instead.
5408 */
5409 if (unlikely(qc->tf.command == ATA_CMD_SET_FEATURES &&
5410 qc->tf.feature == SETFEATURES_XFER) &&
5411 (ap->flags & ATA_FLAG_SETXFER_POLLING))
5412 qc->tf.flags |= ATA_TFLAG_POLLING;
5413
312f7da2 5414 /* select the device */
1da177e4
LT
5415 ata_dev_select(ap, qc->dev->devno, 1, 0);
5416
312f7da2 5417 /* start the command */
1da177e4
LT
5418 switch (qc->tf.protocol) {
5419 case ATA_PROT_NODATA:
312f7da2
AL
5420 if (qc->tf.flags & ATA_TFLAG_POLLING)
5421 ata_qc_set_polling(qc);
5422
e5338254 5423 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
5424 ap->hsm_task_state = HSM_ST_LAST;
5425
5426 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5427 ata_port_queue_task(ap, ata_pio_task, qc, 0);
312f7da2 5428
1da177e4
LT
5429 break;
5430
5431 case ATA_PROT_DMA:
587005de 5432 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5433
1da177e4
LT
5434 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5435 ap->ops->bmdma_setup(qc); /* set up bmdma */
5436 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 5437 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
5438 break;
5439
312f7da2
AL
5440 case ATA_PROT_PIO:
5441 if (qc->tf.flags & ATA_TFLAG_POLLING)
5442 ata_qc_set_polling(qc);
1da177e4 5443
e5338254 5444 ata_tf_to_host(ap, &qc->tf);
312f7da2 5445
54f00389
AL
5446 if (qc->tf.flags & ATA_TFLAG_WRITE) {
5447 /* PIO data out protocol */
5448 ap->hsm_task_state = HSM_ST_FIRST;
31ce6dae 5449 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5450
5451 /* always send first data block using
e27486db 5452 * the ata_pio_task() codepath.
54f00389 5453 */
312f7da2 5454 } else {
54f00389
AL
5455 /* PIO data in protocol */
5456 ap->hsm_task_state = HSM_ST;
5457
5458 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5459 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5460
5461 /* if polling, ata_pio_task() handles the rest.
5462 * otherwise, interrupt handler takes over from here.
5463 */
312f7da2
AL
5464 }
5465
1da177e4
LT
5466 break;
5467
1da177e4 5468 case ATA_PROT_ATAPI:
1da177e4 5469 case ATA_PROT_ATAPI_NODATA:
312f7da2
AL
5470 if (qc->tf.flags & ATA_TFLAG_POLLING)
5471 ata_qc_set_polling(qc);
5472
e5338254 5473 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 5474
312f7da2
AL
5475 ap->hsm_task_state = HSM_ST_FIRST;
5476
5477 /* send cdb by polling if no cdb interrupt */
5478 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
5479 (qc->tf.flags & ATA_TFLAG_POLLING))
31ce6dae 5480 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5481 break;
5482
5483 case ATA_PROT_ATAPI_DMA:
587005de 5484 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5485
1da177e4
LT
5486 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5487 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
5488 ap->hsm_task_state = HSM_ST_FIRST;
5489
5490 /* send cdb by polling if no cdb interrupt */
5491 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
31ce6dae 5492 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5493 break;
5494
5495 default:
5496 WARN_ON(1);
9a3d9eb0 5497 return AC_ERR_SYSTEM;
1da177e4
LT
5498 }
5499
5500 return 0;
5501}
5502
1da177e4
LT
5503/**
5504 * ata_host_intr - Handle host interrupt for given (port, task)
5505 * @ap: Port on which interrupt arrived (possibly...)
5506 * @qc: Taskfile currently active in engine
5507 *
5508 * Handle host interrupt for given queued command. Currently,
5509 * only DMA interrupts are handled. All other commands are
5510 * handled via polling with interrupts disabled (nIEN bit).
5511 *
5512 * LOCKING:
cca3974e 5513 * spin_lock_irqsave(host lock)
1da177e4
LT
5514 *
5515 * RETURNS:
5516 * One if interrupt was handled, zero if not (shared irq).
5517 */
5518
5519inline unsigned int ata_host_intr (struct ata_port *ap,
5520 struct ata_queued_cmd *qc)
5521{
ea54763f 5522 struct ata_eh_info *ehi = &ap->eh_info;
312f7da2 5523 u8 status, host_stat = 0;
1da177e4 5524
312f7da2 5525 VPRINTK("ata%u: protocol %d task_state %d\n",
44877b4e 5526 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 5527
312f7da2
AL
5528 /* Check whether we are expecting interrupt in this state */
5529 switch (ap->hsm_task_state) {
5530 case HSM_ST_FIRST:
6912ccd5
AL
5531 /* Some pre-ATAPI-4 devices assert INTRQ
5532 * at this state when ready to receive CDB.
5533 */
1da177e4 5534
312f7da2
AL
5535 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5536 * The flag was turned on only for atapi devices.
5537 * No need to check is_atapi_taskfile(&qc->tf) again.
5538 */
5539 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 5540 goto idle_irq;
1da177e4 5541 break;
312f7da2
AL
5542 case HSM_ST_LAST:
5543 if (qc->tf.protocol == ATA_PROT_DMA ||
5544 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5545 /* check status of DMA engine */
5546 host_stat = ap->ops->bmdma_status(ap);
44877b4e
TH
5547 VPRINTK("ata%u: host_stat 0x%X\n",
5548 ap->print_id, host_stat);
312f7da2
AL
5549
5550 /* if it's not our irq... */
5551 if (!(host_stat & ATA_DMA_INTR))
5552 goto idle_irq;
5553
5554 /* before we do anything else, clear DMA-Start bit */
5555 ap->ops->bmdma_stop(qc);
a4f16610
AL
5556
5557 if (unlikely(host_stat & ATA_DMA_ERR)) {
5558 /* error when transfering data to/from memory */
5559 qc->err_mask |= AC_ERR_HOST_BUS;
5560 ap->hsm_task_state = HSM_ST_ERR;
5561 }
312f7da2
AL
5562 }
5563 break;
5564 case HSM_ST:
5565 break;
1da177e4
LT
5566 default:
5567 goto idle_irq;
5568 }
5569
312f7da2
AL
5570 /* check altstatus */
5571 status = ata_altstatus(ap);
5572 if (status & ATA_BUSY)
5573 goto idle_irq;
1da177e4 5574
312f7da2
AL
5575 /* check main status, clearing INTRQ */
5576 status = ata_chk_status(ap);
5577 if (unlikely(status & ATA_BUSY))
5578 goto idle_irq;
1da177e4 5579
312f7da2
AL
5580 /* ack bmdma irq events */
5581 ap->ops->irq_clear(ap);
1da177e4 5582
bb5cb290 5583 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
5584
5585 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5586 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5587 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5588
1da177e4
LT
5589 return 1; /* irq handled */
5590
5591idle_irq:
5592 ap->stats.idle_irq++;
5593
5594#ifdef ATA_IRQ_TRAP
5595 if ((ap->stats.idle_irq % 1000) == 0) {
83625006 5596 ap->ops->irq_ack(ap, 0); /* debug trap */
f15a1daf 5597 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 5598 return 1;
1da177e4
LT
5599 }
5600#endif
5601 return 0; /* irq not handled */
5602}
5603
5604/**
5605 * ata_interrupt - Default ATA host interrupt handler
0cba632b 5606 * @irq: irq line (unused)
cca3974e 5607 * @dev_instance: pointer to our ata_host information structure
1da177e4 5608 *
0cba632b
JG
5609 * Default interrupt handler for PCI IDE devices. Calls
5610 * ata_host_intr() for each port that is not disabled.
5611 *
1da177e4 5612 * LOCKING:
cca3974e 5613 * Obtains host lock during operation.
1da177e4
LT
5614 *
5615 * RETURNS:
0cba632b 5616 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
5617 */
5618
7d12e780 5619irqreturn_t ata_interrupt (int irq, void *dev_instance)
1da177e4 5620{
cca3974e 5621 struct ata_host *host = dev_instance;
1da177e4
LT
5622 unsigned int i;
5623 unsigned int handled = 0;
5624 unsigned long flags;
5625
5626 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 5627 spin_lock_irqsave(&host->lock, flags);
1da177e4 5628
cca3974e 5629 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5630 struct ata_port *ap;
5631
cca3974e 5632 ap = host->ports[i];
c1389503 5633 if (ap &&
029f5468 5634 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
5635 struct ata_queued_cmd *qc;
5636
5637 qc = ata_qc_from_tag(ap, ap->active_tag);
312f7da2 5638 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 5639 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
5640 handled |= ata_host_intr(ap, qc);
5641 }
5642 }
5643
cca3974e 5644 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
5645
5646 return IRQ_RETVAL(handled);
5647}
5648
34bf2170
TH
5649/**
5650 * sata_scr_valid - test whether SCRs are accessible
5651 * @ap: ATA port to test SCR accessibility for
5652 *
5653 * Test whether SCRs are accessible for @ap.
5654 *
5655 * LOCKING:
5656 * None.
5657 *
5658 * RETURNS:
5659 * 1 if SCRs are accessible, 0 otherwise.
5660 */
5661int sata_scr_valid(struct ata_port *ap)
5662{
5663 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
5664}
5665
5666/**
5667 * sata_scr_read - read SCR register of the specified port
5668 * @ap: ATA port to read SCR for
5669 * @reg: SCR to read
5670 * @val: Place to store read value
5671 *
5672 * Read SCR register @reg of @ap into *@val. This function is
5673 * guaranteed to succeed if the cable type of the port is SATA
5674 * and the port implements ->scr_read.
5675 *
5676 * LOCKING:
5677 * None.
5678 *
5679 * RETURNS:
5680 * 0 on success, negative errno on failure.
5681 */
5682int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
5683{
5684 if (sata_scr_valid(ap)) {
5685 *val = ap->ops->scr_read(ap, reg);
5686 return 0;
5687 }
5688 return -EOPNOTSUPP;
5689}
5690
5691/**
5692 * sata_scr_write - write SCR register of the specified port
5693 * @ap: ATA port to write SCR for
5694 * @reg: SCR to write
5695 * @val: value to write
5696 *
5697 * Write @val to SCR register @reg of @ap. This function is
5698 * guaranteed to succeed if the cable type of the port is SATA
5699 * and the port implements ->scr_read.
5700 *
5701 * LOCKING:
5702 * None.
5703 *
5704 * RETURNS:
5705 * 0 on success, negative errno on failure.
5706 */
5707int sata_scr_write(struct ata_port *ap, int reg, u32 val)
5708{
5709 if (sata_scr_valid(ap)) {
5710 ap->ops->scr_write(ap, reg, val);
5711 return 0;
5712 }
5713 return -EOPNOTSUPP;
5714}
5715
5716/**
5717 * sata_scr_write_flush - write SCR register of the specified port and flush
5718 * @ap: ATA port to write SCR for
5719 * @reg: SCR to write
5720 * @val: value to write
5721 *
5722 * This function is identical to sata_scr_write() except that this
5723 * function performs flush after writing to the register.
5724 *
5725 * LOCKING:
5726 * None.
5727 *
5728 * RETURNS:
5729 * 0 on success, negative errno on failure.
5730 */
5731int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
5732{
5733 if (sata_scr_valid(ap)) {
5734 ap->ops->scr_write(ap, reg, val);
5735 ap->ops->scr_read(ap, reg);
5736 return 0;
5737 }
5738 return -EOPNOTSUPP;
5739}
5740
5741/**
5742 * ata_port_online - test whether the given port is online
5743 * @ap: ATA port to test
5744 *
5745 * Test whether @ap is online. Note that this function returns 0
5746 * if online status of @ap cannot be obtained, so
5747 * ata_port_online(ap) != !ata_port_offline(ap).
5748 *
5749 * LOCKING:
5750 * None.
5751 *
5752 * RETURNS:
5753 * 1 if the port online status is available and online.
5754 */
5755int ata_port_online(struct ata_port *ap)
5756{
5757 u32 sstatus;
5758
5759 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
5760 return 1;
5761 return 0;
5762}
5763
5764/**
5765 * ata_port_offline - test whether the given port is offline
5766 * @ap: ATA port to test
5767 *
5768 * Test whether @ap is offline. Note that this function returns
5769 * 0 if offline status of @ap cannot be obtained, so
5770 * ata_port_online(ap) != !ata_port_offline(ap).
5771 *
5772 * LOCKING:
5773 * None.
5774 *
5775 * RETURNS:
5776 * 1 if the port offline status is available and offline.
5777 */
5778int ata_port_offline(struct ata_port *ap)
5779{
5780 u32 sstatus;
5781
5782 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
5783 return 1;
5784 return 0;
5785}
0baab86b 5786
77b08fb5 5787int ata_flush_cache(struct ata_device *dev)
9b847548 5788{
977e6b9f 5789 unsigned int err_mask;
9b847548
JA
5790 u8 cmd;
5791
5792 if (!ata_try_flush_cache(dev))
5793 return 0;
5794
6fc49adb 5795 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
5796 cmd = ATA_CMD_FLUSH_EXT;
5797 else
5798 cmd = ATA_CMD_FLUSH;
5799
977e6b9f
TH
5800 err_mask = ata_do_simple_cmd(dev, cmd);
5801 if (err_mask) {
5802 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5803 return -EIO;
5804 }
5805
5806 return 0;
9b847548
JA
5807}
5808
6ffa01d8 5809#ifdef CONFIG_PM
cca3974e
JG
5810static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5811 unsigned int action, unsigned int ehi_flags,
5812 int wait)
500530f6
TH
5813{
5814 unsigned long flags;
5815 int i, rc;
5816
cca3974e
JG
5817 for (i = 0; i < host->n_ports; i++) {
5818 struct ata_port *ap = host->ports[i];
500530f6
TH
5819
5820 /* Previous resume operation might still be in
5821 * progress. Wait for PM_PENDING to clear.
5822 */
5823 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5824 ata_port_wait_eh(ap);
5825 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5826 }
5827
5828 /* request PM ops to EH */
5829 spin_lock_irqsave(ap->lock, flags);
5830
5831 ap->pm_mesg = mesg;
5832 if (wait) {
5833 rc = 0;
5834 ap->pm_result = &rc;
5835 }
5836
5837 ap->pflags |= ATA_PFLAG_PM_PENDING;
5838 ap->eh_info.action |= action;
5839 ap->eh_info.flags |= ehi_flags;
5840
5841 ata_port_schedule_eh(ap);
5842
5843 spin_unlock_irqrestore(ap->lock, flags);
5844
5845 /* wait and check result */
5846 if (wait) {
5847 ata_port_wait_eh(ap);
5848 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5849 if (rc)
5850 return rc;
5851 }
5852 }
5853
5854 return 0;
5855}
5856
5857/**
cca3974e
JG
5858 * ata_host_suspend - suspend host
5859 * @host: host to suspend
500530f6
TH
5860 * @mesg: PM message
5861 *
cca3974e 5862 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
5863 * function requests EH to perform PM operations and waits for EH
5864 * to finish.
5865 *
5866 * LOCKING:
5867 * Kernel thread context (may sleep).
5868 *
5869 * RETURNS:
5870 * 0 on success, -errno on failure.
5871 */
cca3974e 5872int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6 5873{
9666f400 5874 int rc;
500530f6 5875
cca3974e 5876 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
9666f400
TH
5877 if (rc == 0)
5878 host->dev->power.power_state = mesg;
500530f6
TH
5879 return rc;
5880}
5881
5882/**
cca3974e
JG
5883 * ata_host_resume - resume host
5884 * @host: host to resume
500530f6 5885 *
cca3974e 5886 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
5887 * function requests EH to perform PM operations and returns.
5888 * Note that all resume operations are performed parallely.
5889 *
5890 * LOCKING:
5891 * Kernel thread context (may sleep).
5892 */
cca3974e 5893void ata_host_resume(struct ata_host *host)
500530f6 5894{
cca3974e
JG
5895 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
5896 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5897 host->dev->power.power_state = PMSG_ON;
500530f6 5898}
6ffa01d8 5899#endif
500530f6 5900
c893a3ae
RD
5901/**
5902 * ata_port_start - Set port up for dma.
5903 * @ap: Port to initialize
5904 *
5905 * Called just after data structures for each port are
5906 * initialized. Allocates space for PRD table.
5907 *
5908 * May be used as the port_start() entry in ata_port_operations.
5909 *
5910 * LOCKING:
5911 * Inherited from caller.
5912 */
f0d36efd 5913int ata_port_start(struct ata_port *ap)
1da177e4 5914{
2f1f610b 5915 struct device *dev = ap->dev;
6037d6bb 5916 int rc;
1da177e4 5917
f0d36efd
TH
5918 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5919 GFP_KERNEL);
1da177e4
LT
5920 if (!ap->prd)
5921 return -ENOMEM;
5922
6037d6bb 5923 rc = ata_pad_alloc(ap, dev);
f0d36efd 5924 if (rc)
6037d6bb 5925 return rc;
1da177e4 5926
f0d36efd
TH
5927 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
5928 (unsigned long long)ap->prd_dma);
1da177e4
LT
5929 return 0;
5930}
5931
3ef3b43d
TH
5932/**
5933 * ata_dev_init - Initialize an ata_device structure
5934 * @dev: Device structure to initialize
5935 *
5936 * Initialize @dev in preparation for probing.
5937 *
5938 * LOCKING:
5939 * Inherited from caller.
5940 */
5941void ata_dev_init(struct ata_device *dev)
5942{
5943 struct ata_port *ap = dev->ap;
72fa4b74
TH
5944 unsigned long flags;
5945
5a04bf4b
TH
5946 /* SATA spd limit is bound to the first device */
5947 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5948
72fa4b74
TH
5949 /* High bits of dev->flags are used to record warm plug
5950 * requests which occur asynchronously. Synchronize using
cca3974e 5951 * host lock.
72fa4b74 5952 */
ba6a1308 5953 spin_lock_irqsave(ap->lock, flags);
72fa4b74 5954 dev->flags &= ~ATA_DFLAG_INIT_MASK;
ba6a1308 5955 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 5956
72fa4b74
TH
5957 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5958 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
5959 dev->pio_mask = UINT_MAX;
5960 dev->mwdma_mask = UINT_MAX;
5961 dev->udma_mask = UINT_MAX;
5962}
5963
1da177e4 5964/**
f3187195
TH
5965 * ata_port_alloc - allocate and initialize basic ATA port resources
5966 * @host: ATA host this allocated port belongs to
1da177e4 5967 *
f3187195
TH
5968 * Allocate and initialize basic ATA port resources.
5969 *
5970 * RETURNS:
5971 * Allocate ATA port on success, NULL on failure.
0cba632b 5972 *
1da177e4 5973 * LOCKING:
f3187195 5974 * Inherited from calling layer (may sleep).
1da177e4 5975 */
f3187195 5976struct ata_port *ata_port_alloc(struct ata_host *host)
1da177e4 5977{
f3187195 5978 struct ata_port *ap;
1da177e4
LT
5979 unsigned int i;
5980
f3187195
TH
5981 DPRINTK("ENTER\n");
5982
5983 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5984 if (!ap)
5985 return NULL;
5986
f4d6d004 5987 ap->pflags |= ATA_PFLAG_INITIALIZING;
cca3974e 5988 ap->lock = &host->lock;
198e0fed 5989 ap->flags = ATA_FLAG_DISABLED;
f3187195 5990 ap->print_id = -1;
1da177e4 5991 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 5992 ap->host = host;
f3187195
TH
5993 ap->dev = host->dev;
5994
5a04bf4b 5995 ap->hw_sata_spd_limit = UINT_MAX;
1da177e4
LT
5996 ap->active_tag = ATA_TAG_POISON;
5997 ap->last_ctl = 0xFF;
bd5d825c
BP
5998
5999#if defined(ATA_VERBOSE_DEBUG)
6000 /* turn on all debugging levels */
6001 ap->msg_enable = 0x00FF;
6002#elif defined(ATA_DEBUG)
6003 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 6004#else
0dd4b21f 6005 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 6006#endif
1da177e4 6007
65f27f38
DH
6008 INIT_DELAYED_WORK(&ap->port_task, NULL);
6009 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6010 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 6011 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 6012 init_waitqueue_head(&ap->eh_wait_q);
1da177e4 6013
838df628 6014 ap->cbl = ATA_CBL_NONE;
838df628 6015
acf356b1
TH
6016 for (i = 0; i < ATA_MAX_DEVICES; i++) {
6017 struct ata_device *dev = &ap->device[i];
38d87234 6018 dev->ap = ap;
72fa4b74 6019 dev->devno = i;
3ef3b43d 6020 ata_dev_init(dev);
acf356b1 6021 }
1da177e4
LT
6022
6023#ifdef ATA_IRQ_TRAP
6024 ap->stats.unhandled_irq = 1;
6025 ap->stats.idle_irq = 1;
6026#endif
1da177e4 6027 return ap;
1da177e4
LT
6028}
6029
f0d36efd
TH
6030static void ata_host_release(struct device *gendev, void *res)
6031{
6032 struct ata_host *host = dev_get_drvdata(gendev);
6033 int i;
6034
6035 for (i = 0; i < host->n_ports; i++) {
6036 struct ata_port *ap = host->ports[i];
6037
ecef7253
TH
6038 if (!ap)
6039 continue;
6040
6041 if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop)
f0d36efd 6042 ap->ops->port_stop(ap);
f0d36efd
TH
6043 }
6044
ecef7253 6045 if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop)
f0d36efd 6046 host->ops->host_stop(host);
1aa56cca 6047
1aa506e4
TH
6048 for (i = 0; i < host->n_ports; i++) {
6049 struct ata_port *ap = host->ports[i];
6050
4911487a
TH
6051 if (!ap)
6052 continue;
6053
6054 if (ap->scsi_host)
1aa506e4
TH
6055 scsi_host_put(ap->scsi_host);
6056
4911487a 6057 kfree(ap);
1aa506e4
TH
6058 host->ports[i] = NULL;
6059 }
6060
1aa56cca 6061 dev_set_drvdata(gendev, NULL);
f0d36efd
TH
6062}
6063
f3187195
TH
6064/**
6065 * ata_host_alloc - allocate and init basic ATA host resources
6066 * @dev: generic device this host is associated with
6067 * @max_ports: maximum number of ATA ports associated with this host
6068 *
6069 * Allocate and initialize basic ATA host resources. LLD calls
6070 * this function to allocate a host, initializes it fully and
6071 * attaches it using ata_host_register().
6072 *
6073 * @max_ports ports are allocated and host->n_ports is
6074 * initialized to @max_ports. The caller is allowed to decrease
6075 * host->n_ports before calling ata_host_register(). The unused
6076 * ports will be automatically freed on registration.
6077 *
6078 * RETURNS:
6079 * Allocate ATA host on success, NULL on failure.
6080 *
6081 * LOCKING:
6082 * Inherited from calling layer (may sleep).
6083 */
6084struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6085{
6086 struct ata_host *host;
6087 size_t sz;
6088 int i;
6089
6090 DPRINTK("ENTER\n");
6091
6092 if (!devres_open_group(dev, NULL, GFP_KERNEL))
6093 return NULL;
6094
6095 /* alloc a container for our list of ATA ports (buses) */
6096 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6097 /* alloc a container for our list of ATA ports (buses) */
6098 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6099 if (!host)
6100 goto err_out;
6101
6102 devres_add(dev, host);
6103 dev_set_drvdata(dev, host);
6104
6105 spin_lock_init(&host->lock);
6106 host->dev = dev;
6107 host->n_ports = max_ports;
6108
6109 /* allocate ports bound to this host */
6110 for (i = 0; i < max_ports; i++) {
6111 struct ata_port *ap;
6112
6113 ap = ata_port_alloc(host);
6114 if (!ap)
6115 goto err_out;
6116
6117 ap->port_no = i;
6118 host->ports[i] = ap;
6119 }
6120
6121 devres_remove_group(dev, NULL);
6122 return host;
6123
6124 err_out:
6125 devres_release_group(dev, NULL);
6126 return NULL;
6127}
6128
f5cda257
TH
6129/**
6130 * ata_host_alloc_pinfo - alloc host and init with port_info array
6131 * @dev: generic device this host is associated with
6132 * @ppi: array of ATA port_info to initialize host with
6133 * @n_ports: number of ATA ports attached to this host
6134 *
6135 * Allocate ATA host and initialize with info from @ppi. If NULL
6136 * terminated, @ppi may contain fewer entries than @n_ports. The
6137 * last entry will be used for the remaining ports.
6138 *
6139 * RETURNS:
6140 * Allocate ATA host on success, NULL on failure.
6141 *
6142 * LOCKING:
6143 * Inherited from calling layer (may sleep).
6144 */
6145struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6146 const struct ata_port_info * const * ppi,
6147 int n_ports)
6148{
6149 const struct ata_port_info *pi;
6150 struct ata_host *host;
6151 int i, j;
6152
6153 host = ata_host_alloc(dev, n_ports);
6154 if (!host)
6155 return NULL;
6156
6157 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6158 struct ata_port *ap = host->ports[i];
6159
6160 if (ppi[j])
6161 pi = ppi[j++];
6162
6163 ap->pio_mask = pi->pio_mask;
6164 ap->mwdma_mask = pi->mwdma_mask;
6165 ap->udma_mask = pi->udma_mask;
6166 ap->flags |= pi->flags;
6167 ap->ops = pi->port_ops;
6168
6169 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6170 host->ops = pi->port_ops;
6171 if (!host->private_data && pi->private_data)
6172 host->private_data = pi->private_data;
6173 }
6174
6175 return host;
6176}
6177
ecef7253
TH
6178/**
6179 * ata_host_start - start and freeze ports of an ATA host
6180 * @host: ATA host to start ports for
6181 *
6182 * Start and then freeze ports of @host. Started status is
6183 * recorded in host->flags, so this function can be called
6184 * multiple times. Ports are guaranteed to get started only
f3187195
TH
6185 * once. If host->ops isn't initialized yet, its set to the
6186 * first non-dummy port ops.
ecef7253
TH
6187 *
6188 * LOCKING:
6189 * Inherited from calling layer (may sleep).
6190 *
6191 * RETURNS:
6192 * 0 if all ports are started successfully, -errno otherwise.
6193 */
6194int ata_host_start(struct ata_host *host)
6195{
6196 int i, rc;
6197
6198 if (host->flags & ATA_HOST_STARTED)
6199 return 0;
6200
6201 for (i = 0; i < host->n_ports; i++) {
6202 struct ata_port *ap = host->ports[i];
6203
f3187195
TH
6204 if (!host->ops && !ata_port_is_dummy(ap))
6205 host->ops = ap->ops;
6206
ecef7253
TH
6207 if (ap->ops->port_start) {
6208 rc = ap->ops->port_start(ap);
6209 if (rc) {
6210 ata_port_printk(ap, KERN_ERR, "failed to "
6211 "start port (errno=%d)\n", rc);
6212 goto err_out;
6213 }
6214 }
6215
6216 ata_eh_freeze_port(ap);
6217 }
6218
6219 host->flags |= ATA_HOST_STARTED;
6220 return 0;
6221
6222 err_out:
6223 while (--i >= 0) {
6224 struct ata_port *ap = host->ports[i];
6225
6226 if (ap->ops->port_stop)
6227 ap->ops->port_stop(ap);
6228 }
6229 return rc;
6230}
6231
b03732f0 6232/**
cca3974e
JG
6233 * ata_sas_host_init - Initialize a host struct
6234 * @host: host to initialize
6235 * @dev: device host is attached to
6236 * @flags: host flags
6237 * @ops: port_ops
b03732f0
BK
6238 *
6239 * LOCKING:
6240 * PCI/etc. bus probe sem.
6241 *
6242 */
f3187195 6243/* KILLME - the only user left is ipr */
cca3974e
JG
6244void ata_host_init(struct ata_host *host, struct device *dev,
6245 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 6246{
cca3974e
JG
6247 spin_lock_init(&host->lock);
6248 host->dev = dev;
6249 host->flags = flags;
6250 host->ops = ops;
b03732f0
BK
6251}
6252
f3187195
TH
6253/**
6254 * ata_host_register - register initialized ATA host
6255 * @host: ATA host to register
6256 * @sht: template for SCSI host
6257 *
6258 * Register initialized ATA host. @host is allocated using
6259 * ata_host_alloc() and fully initialized by LLD. This function
6260 * starts ports, registers @host with ATA and SCSI layers and
6261 * probe registered devices.
6262 *
6263 * LOCKING:
6264 * Inherited from calling layer (may sleep).
6265 *
6266 * RETURNS:
6267 * 0 on success, -errno otherwise.
6268 */
6269int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6270{
6271 int i, rc;
6272
6273 /* host must have been started */
6274 if (!(host->flags & ATA_HOST_STARTED)) {
6275 dev_printk(KERN_ERR, host->dev,
6276 "BUG: trying to register unstarted host\n");
6277 WARN_ON(1);
6278 return -EINVAL;
6279 }
6280
6281 /* Blow away unused ports. This happens when LLD can't
6282 * determine the exact number of ports to allocate at
6283 * allocation time.
6284 */
6285 for (i = host->n_ports; host->ports[i]; i++)
6286 kfree(host->ports[i]);
6287
6288 /* give ports names and add SCSI hosts */
6289 for (i = 0; i < host->n_ports; i++)
6290 host->ports[i]->print_id = ata_print_id++;
6291
6292 rc = ata_scsi_add_hosts(host, sht);
6293 if (rc)
6294 return rc;
6295
6296 /* set cable, sata_spd_limit and report */
6297 for (i = 0; i < host->n_ports; i++) {
6298 struct ata_port *ap = host->ports[i];
6299 int irq_line;
6300 u32 scontrol;
6301 unsigned long xfer_mask;
6302
6303 /* set SATA cable type if still unset */
6304 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6305 ap->cbl = ATA_CBL_SATA;
6306
6307 /* init sata_spd_limit to the current value */
6308 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
6309 int spd = (scontrol >> 4) & 0xf;
6310 ap->hw_sata_spd_limit &= (1 << spd) - 1;
6311 }
6312 ap->sata_spd_limit = ap->hw_sata_spd_limit;
6313
6314 /* report the secondary IRQ for second channel legacy */
6315 irq_line = host->irq;
6316 if (i == 1 && host->irq2)
6317 irq_line = host->irq2;
6318
6319 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6320 ap->udma_mask);
6321
6322 /* print per-port info to dmesg */
6323 if (!ata_port_is_dummy(ap))
6324 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%p "
6325 "ctl 0x%p bmdma 0x%p irq %d\n",
6326 ap->cbl == ATA_CBL_SATA ? 'S' : 'P',
6327 ata_mode_string(xfer_mask),
6328 ap->ioaddr.cmd_addr,
6329 ap->ioaddr.ctl_addr,
6330 ap->ioaddr.bmdma_addr,
6331 irq_line);
6332 else
6333 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
6334 }
6335
6336 /* perform each probe synchronously */
6337 DPRINTK("probe begin\n");
6338 for (i = 0; i < host->n_ports; i++) {
6339 struct ata_port *ap = host->ports[i];
6340 int rc;
6341
6342 /* probe */
6343 if (ap->ops->error_handler) {
6344 struct ata_eh_info *ehi = &ap->eh_info;
6345 unsigned long flags;
6346
6347 ata_port_probe(ap);
6348
6349 /* kick EH for boot probing */
6350 spin_lock_irqsave(ap->lock, flags);
6351
6352 ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
6353 ehi->action |= ATA_EH_SOFTRESET;
6354 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6355
f4d6d004 6356 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
f3187195
TH
6357 ap->pflags |= ATA_PFLAG_LOADING;
6358 ata_port_schedule_eh(ap);
6359
6360 spin_unlock_irqrestore(ap->lock, flags);
6361
6362 /* wait for EH to finish */
6363 ata_port_wait_eh(ap);
6364 } else {
6365 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6366 rc = ata_bus_probe(ap);
6367 DPRINTK("ata%u: bus probe end\n", ap->print_id);
6368
6369 if (rc) {
6370 /* FIXME: do something useful here?
6371 * Current libata behavior will
6372 * tear down everything when
6373 * the module is removed
6374 * or the h/w is unplugged.
6375 */
6376 }
6377 }
6378 }
6379
6380 /* probes are done, now scan each port's disk(s) */
6381 DPRINTK("host probe begin\n");
6382 for (i = 0; i < host->n_ports; i++) {
6383 struct ata_port *ap = host->ports[i];
6384
6385 ata_scsi_scan_host(ap);
6386 }
6387
6388 return 0;
6389}
6390
f5cda257
TH
6391/**
6392 * ata_host_activate - start host, request IRQ and register it
6393 * @host: target ATA host
6394 * @irq: IRQ to request
6395 * @irq_handler: irq_handler used when requesting IRQ
6396 * @irq_flags: irq_flags used when requesting IRQ
6397 * @sht: scsi_host_template to use when registering the host
6398 *
6399 * After allocating an ATA host and initializing it, most libata
6400 * LLDs perform three steps to activate the host - start host,
6401 * request IRQ and register it. This helper takes necessasry
6402 * arguments and performs the three steps in one go.
6403 *
6404 * LOCKING:
6405 * Inherited from calling layer (may sleep).
6406 *
6407 * RETURNS:
6408 * 0 on success, -errno otherwise.
6409 */
6410int ata_host_activate(struct ata_host *host, int irq,
6411 irq_handler_t irq_handler, unsigned long irq_flags,
6412 struct scsi_host_template *sht)
6413{
6414 int rc;
6415
6416 rc = ata_host_start(host);
6417 if (rc)
6418 return rc;
6419
6420 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6421 dev_driver_string(host->dev), host);
6422 if (rc)
6423 return rc;
6424
6425 rc = ata_host_register(host, sht);
6426 /* if failed, just free the IRQ and leave ports alone */
6427 if (rc)
6428 devm_free_irq(host->dev, irq, host);
6429
6430 return rc;
6431}
6432
720ba126
TH
6433/**
6434 * ata_port_detach - Detach ATA port in prepration of device removal
6435 * @ap: ATA port to be detached
6436 *
6437 * Detach all ATA devices and the associated SCSI devices of @ap;
6438 * then, remove the associated SCSI host. @ap is guaranteed to
6439 * be quiescent on return from this function.
6440 *
6441 * LOCKING:
6442 * Kernel thread context (may sleep).
6443 */
6444void ata_port_detach(struct ata_port *ap)
6445{
6446 unsigned long flags;
6447 int i;
6448
6449 if (!ap->ops->error_handler)
c3cf30a9 6450 goto skip_eh;
720ba126
TH
6451
6452 /* tell EH we're leaving & flush EH */
ba6a1308 6453 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 6454 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 6455 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6456
6457 ata_port_wait_eh(ap);
6458
6459 /* EH is now guaranteed to see UNLOADING, so no new device
6460 * will be attached. Disable all existing devices.
6461 */
ba6a1308 6462 spin_lock_irqsave(ap->lock, flags);
720ba126
TH
6463
6464 for (i = 0; i < ATA_MAX_DEVICES; i++)
6465 ata_dev_disable(&ap->device[i]);
6466
ba6a1308 6467 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6468
6469 /* Final freeze & EH. All in-flight commands are aborted. EH
6470 * will be skipped and retrials will be terminated with bad
6471 * target.
6472 */
ba6a1308 6473 spin_lock_irqsave(ap->lock, flags);
720ba126 6474 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 6475 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6476
6477 ata_port_wait_eh(ap);
6478
6479 /* Flush hotplug task. The sequence is similar to
6480 * ata_port_flush_task().
6481 */
28e53bdd 6482 cancel_work_sync(&ap->hotplug_task.work); /* akpm: why? */
720ba126 6483 cancel_delayed_work(&ap->hotplug_task);
28e53bdd 6484 cancel_work_sync(&ap->hotplug_task.work);
720ba126 6485
c3cf30a9 6486 skip_eh:
720ba126 6487 /* remove the associated SCSI host */
cca3974e 6488 scsi_remove_host(ap->scsi_host);
720ba126
TH
6489}
6490
0529c159
TH
6491/**
6492 * ata_host_detach - Detach all ports of an ATA host
6493 * @host: Host to detach
6494 *
6495 * Detach all ports of @host.
6496 *
6497 * LOCKING:
6498 * Kernel thread context (may sleep).
6499 */
6500void ata_host_detach(struct ata_host *host)
6501{
6502 int i;
6503
6504 for (i = 0; i < host->n_ports; i++)
6505 ata_port_detach(host->ports[i]);
6506}
6507
1da177e4
LT
6508/**
6509 * ata_std_ports - initialize ioaddr with standard port offsets.
6510 * @ioaddr: IO address structure to be initialized
0baab86b
EF
6511 *
6512 * Utility function which initializes data_addr, error_addr,
6513 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
6514 * device_addr, status_addr, and command_addr to standard offsets
6515 * relative to cmd_addr.
6516 *
6517 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 6518 */
0baab86b 6519
1da177e4
LT
6520void ata_std_ports(struct ata_ioports *ioaddr)
6521{
6522 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
6523 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
6524 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
6525 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
6526 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
6527 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
6528 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
6529 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
6530 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
6531 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
6532}
6533
0baab86b 6534
374b1873
JG
6535#ifdef CONFIG_PCI
6536
1da177e4
LT
6537/**
6538 * ata_pci_remove_one - PCI layer callback for device removal
6539 * @pdev: PCI device that was removed
6540 *
b878ca5d
TH
6541 * PCI layer indicates to libata via this hook that hot-unplug or
6542 * module unload event has occurred. Detach all ports. Resource
6543 * release is handled via devres.
1da177e4
LT
6544 *
6545 * LOCKING:
6546 * Inherited from PCI layer (may sleep).
6547 */
f0d36efd 6548void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4
LT
6549{
6550 struct device *dev = pci_dev_to_dev(pdev);
cca3974e 6551 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 6552
b878ca5d 6553 ata_host_detach(host);
1da177e4
LT
6554}
6555
6556/* move to PCI subsystem */
057ace5e 6557int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
6558{
6559 unsigned long tmp = 0;
6560
6561 switch (bits->width) {
6562 case 1: {
6563 u8 tmp8 = 0;
6564 pci_read_config_byte(pdev, bits->reg, &tmp8);
6565 tmp = tmp8;
6566 break;
6567 }
6568 case 2: {
6569 u16 tmp16 = 0;
6570 pci_read_config_word(pdev, bits->reg, &tmp16);
6571 tmp = tmp16;
6572 break;
6573 }
6574 case 4: {
6575 u32 tmp32 = 0;
6576 pci_read_config_dword(pdev, bits->reg, &tmp32);
6577 tmp = tmp32;
6578 break;
6579 }
6580
6581 default:
6582 return -EINVAL;
6583 }
6584
6585 tmp &= bits->mask;
6586
6587 return (tmp == bits->val) ? 1 : 0;
6588}
9b847548 6589
6ffa01d8 6590#ifdef CONFIG_PM
3c5100c1 6591void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
6592{
6593 pci_save_state(pdev);
4c90d971 6594 pci_disable_device(pdev);
500530f6 6595
4c90d971 6596 if (mesg.event == PM_EVENT_SUSPEND)
500530f6 6597 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
6598}
6599
553c4aa6 6600int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 6601{
553c4aa6
TH
6602 int rc;
6603
9b847548
JA
6604 pci_set_power_state(pdev, PCI_D0);
6605 pci_restore_state(pdev);
553c4aa6 6606
b878ca5d 6607 rc = pcim_enable_device(pdev);
553c4aa6
TH
6608 if (rc) {
6609 dev_printk(KERN_ERR, &pdev->dev,
6610 "failed to enable device after resume (%d)\n", rc);
6611 return rc;
6612 }
6613
9b847548 6614 pci_set_master(pdev);
553c4aa6 6615 return 0;
500530f6
TH
6616}
6617
3c5100c1 6618int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 6619{
cca3974e 6620 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
6621 int rc = 0;
6622
cca3974e 6623 rc = ata_host_suspend(host, mesg);
500530f6
TH
6624 if (rc)
6625 return rc;
6626
3c5100c1 6627 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
6628
6629 return 0;
6630}
6631
6632int ata_pci_device_resume(struct pci_dev *pdev)
6633{
cca3974e 6634 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 6635 int rc;
500530f6 6636
553c4aa6
TH
6637 rc = ata_pci_device_do_resume(pdev);
6638 if (rc == 0)
6639 ata_host_resume(host);
6640 return rc;
9b847548 6641}
6ffa01d8
TH
6642#endif /* CONFIG_PM */
6643
1da177e4
LT
6644#endif /* CONFIG_PCI */
6645
6646
1da177e4
LT
6647static int __init ata_init(void)
6648{
a8601e5f 6649 ata_probe_timeout *= HZ;
1da177e4
LT
6650 ata_wq = create_workqueue("ata");
6651 if (!ata_wq)
6652 return -ENOMEM;
6653
453b07ac
TH
6654 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6655 if (!ata_aux_wq) {
6656 destroy_workqueue(ata_wq);
6657 return -ENOMEM;
6658 }
6659
1da177e4
LT
6660 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6661 return 0;
6662}
6663
6664static void __exit ata_exit(void)
6665{
6666 destroy_workqueue(ata_wq);
453b07ac 6667 destroy_workqueue(ata_aux_wq);
1da177e4
LT
6668}
6669
a4625085 6670subsys_initcall(ata_init);
1da177e4
LT
6671module_exit(ata_exit);
6672
67846b30 6673static unsigned long ratelimit_time;
34af946a 6674static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
6675
6676int ata_ratelimit(void)
6677{
6678 int rc;
6679 unsigned long flags;
6680
6681 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6682
6683 if (time_after(jiffies, ratelimit_time)) {
6684 rc = 1;
6685 ratelimit_time = jiffies + (HZ/5);
6686 } else
6687 rc = 0;
6688
6689 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6690
6691 return rc;
6692}
6693
c22daff4
TH
6694/**
6695 * ata_wait_register - wait until register value changes
6696 * @reg: IO-mapped register
6697 * @mask: Mask to apply to read register value
6698 * @val: Wait condition
6699 * @interval_msec: polling interval in milliseconds
6700 * @timeout_msec: timeout in milliseconds
6701 *
6702 * Waiting for some bits of register to change is a common
6703 * operation for ATA controllers. This function reads 32bit LE
6704 * IO-mapped register @reg and tests for the following condition.
6705 *
6706 * (*@reg & mask) != val
6707 *
6708 * If the condition is met, it returns; otherwise, the process is
6709 * repeated after @interval_msec until timeout.
6710 *
6711 * LOCKING:
6712 * Kernel thread context (may sleep)
6713 *
6714 * RETURNS:
6715 * The final register value.
6716 */
6717u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6718 unsigned long interval_msec,
6719 unsigned long timeout_msec)
6720{
6721 unsigned long timeout;
6722 u32 tmp;
6723
6724 tmp = ioread32(reg);
6725
6726 /* Calculate timeout _after_ the first read to make sure
6727 * preceding writes reach the controller before starting to
6728 * eat away the timeout.
6729 */
6730 timeout = jiffies + (timeout_msec * HZ) / 1000;
6731
6732 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6733 msleep(interval_msec);
6734 tmp = ioread32(reg);
6735 }
6736
6737 return tmp;
6738}
6739
dd5b06c4
TH
6740/*
6741 * Dummy port_ops
6742 */
6743static void ata_dummy_noret(struct ata_port *ap) { }
6744static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
6745static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6746
6747static u8 ata_dummy_check_status(struct ata_port *ap)
6748{
6749 return ATA_DRDY;
6750}
6751
6752static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6753{
6754 return AC_ERR_SYSTEM;
6755}
6756
6757const struct ata_port_operations ata_dummy_port_ops = {
6758 .port_disable = ata_port_disable,
6759 .check_status = ata_dummy_check_status,
6760 .check_altstatus = ata_dummy_check_status,
6761 .dev_select = ata_noop_dev_select,
6762 .qc_prep = ata_noop_qc_prep,
6763 .qc_issue = ata_dummy_qc_issue,
6764 .freeze = ata_dummy_noret,
6765 .thaw = ata_dummy_noret,
6766 .error_handler = ata_dummy_noret,
6767 .post_internal_cmd = ata_dummy_qc_noret,
6768 .irq_clear = ata_dummy_noret,
6769 .port_start = ata_dummy_ret0,
6770 .port_stop = ata_dummy_noret,
6771};
6772
21b0ad4f
TH
6773const struct ata_port_info ata_dummy_port_info = {
6774 .port_ops = &ata_dummy_port_ops,
6775};
6776
1da177e4
LT
6777/*
6778 * libata is essentially a library of internal helper functions for
6779 * low-level ATA host controller drivers. As such, the API/ABI is
6780 * likely to change as new drivers are added and updated.
6781 * Do not depend on ABI/API stability.
6782 */
6783
e9c83914
TH
6784EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6785EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6786EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 6787EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
21b0ad4f 6788EXPORT_SYMBOL_GPL(ata_dummy_port_info);
1da177e4
LT
6789EXPORT_SYMBOL_GPL(ata_std_bios_param);
6790EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 6791EXPORT_SYMBOL_GPL(ata_host_init);
f3187195 6792EXPORT_SYMBOL_GPL(ata_host_alloc);
f5cda257 6793EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
ecef7253 6794EXPORT_SYMBOL_GPL(ata_host_start);
f3187195 6795EXPORT_SYMBOL_GPL(ata_host_register);
f5cda257 6796EXPORT_SYMBOL_GPL(ata_host_activate);
0529c159 6797EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4
LT
6798EXPORT_SYMBOL_GPL(ata_sg_init);
6799EXPORT_SYMBOL_GPL(ata_sg_init_one);
9a1004d0 6800EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 6801EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 6802EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 6803EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
6804EXPORT_SYMBOL_GPL(ata_tf_load);
6805EXPORT_SYMBOL_GPL(ata_tf_read);
6806EXPORT_SYMBOL_GPL(ata_noop_dev_select);
6807EXPORT_SYMBOL_GPL(ata_std_dev_select);
43727fbc 6808EXPORT_SYMBOL_GPL(sata_print_link_status);
1da177e4
LT
6809EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6810EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6811EXPORT_SYMBOL_GPL(ata_check_status);
6812EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
6813EXPORT_SYMBOL_GPL(ata_exec_command);
6814EXPORT_SYMBOL_GPL(ata_port_start);
1da177e4 6815EXPORT_SYMBOL_GPL(ata_interrupt);
04351821 6816EXPORT_SYMBOL_GPL(ata_do_set_mode);
0d5ff566
TH
6817EXPORT_SYMBOL_GPL(ata_data_xfer);
6818EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
1da177e4 6819EXPORT_SYMBOL_GPL(ata_qc_prep);
e46834cd 6820EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
6821EXPORT_SYMBOL_GPL(ata_bmdma_setup);
6822EXPORT_SYMBOL_GPL(ata_bmdma_start);
6823EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
6824EXPORT_SYMBOL_GPL(ata_bmdma_status);
6825EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
6826EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
6827EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
6828EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
6829EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
6830EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 6831EXPORT_SYMBOL_GPL(ata_port_probe);
10305f0f 6832EXPORT_SYMBOL_GPL(ata_dev_disable);
3c567b7d 6833EXPORT_SYMBOL_GPL(sata_set_spd);
d7bb4cc7
TH
6834EXPORT_SYMBOL_GPL(sata_phy_debounce);
6835EXPORT_SYMBOL_GPL(sata_phy_resume);
1da177e4
LT
6836EXPORT_SYMBOL_GPL(sata_phy_reset);
6837EXPORT_SYMBOL_GPL(__sata_phy_reset);
6838EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 6839EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 6840EXPORT_SYMBOL_GPL(ata_std_softreset);
b6103f6d 6841EXPORT_SYMBOL_GPL(sata_port_hardreset);
c2bd5804
TH
6842EXPORT_SYMBOL_GPL(sata_std_hardreset);
6843EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
6844EXPORT_SYMBOL_GPL(ata_dev_classify);
6845EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 6846EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 6847EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 6848EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 6849EXPORT_SYMBOL_GPL(ata_busy_sleep);
d4b2bab4 6850EXPORT_SYMBOL_GPL(ata_wait_ready);
86e45b6b 6851EXPORT_SYMBOL_GPL(ata_port_queue_task);
1da177e4
LT
6852EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6853EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 6854EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 6855EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 6856EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4 6857EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
6858EXPORT_SYMBOL_GPL(sata_scr_valid);
6859EXPORT_SYMBOL_GPL(sata_scr_read);
6860EXPORT_SYMBOL_GPL(sata_scr_write);
6861EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6862EXPORT_SYMBOL_GPL(ata_port_online);
6863EXPORT_SYMBOL_GPL(ata_port_offline);
6ffa01d8 6864#ifdef CONFIG_PM
cca3974e
JG
6865EXPORT_SYMBOL_GPL(ata_host_suspend);
6866EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 6867#endif /* CONFIG_PM */
6a62a04d
TH
6868EXPORT_SYMBOL_GPL(ata_id_string);
6869EXPORT_SYMBOL_GPL(ata_id_c_string);
10305f0f 6870EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
6919a0a6 6871EXPORT_SYMBOL_GPL(ata_device_blacklisted);
1da177e4
LT
6872EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6873
1bc4ccff 6874EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
452503f9
AC
6875EXPORT_SYMBOL_GPL(ata_timing_compute);
6876EXPORT_SYMBOL_GPL(ata_timing_merge);
6877
1da177e4
LT
6878#ifdef CONFIG_PCI
6879EXPORT_SYMBOL_GPL(pci_test_config_bits);
d491b27b 6880EXPORT_SYMBOL_GPL(ata_pci_init_native_host);
1626aeb8 6881EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
21b0ad4f 6882EXPORT_SYMBOL_GPL(ata_pci_prepare_native_host);
1da177e4
LT
6883EXPORT_SYMBOL_GPL(ata_pci_init_one);
6884EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6ffa01d8 6885#ifdef CONFIG_PM
500530f6
TH
6886EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6887EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
6888EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6889EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 6890#endif /* CONFIG_PM */
67951ade
AC
6891EXPORT_SYMBOL_GPL(ata_pci_default_filter);
6892EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 6893#endif /* CONFIG_PCI */
9b847548 6894
ece1d636 6895EXPORT_SYMBOL_GPL(ata_eng_timeout);
7b70fc03
TH
6896EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6897EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499
TH
6898EXPORT_SYMBOL_GPL(ata_port_freeze);
6899EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6900EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
6901EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6902EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 6903EXPORT_SYMBOL_GPL(ata_do_eh);
83625006
AI
6904EXPORT_SYMBOL_GPL(ata_irq_on);
6905EXPORT_SYMBOL_GPL(ata_dummy_irq_on);
6906EXPORT_SYMBOL_GPL(ata_irq_ack);
6907EXPORT_SYMBOL_GPL(ata_dummy_irq_ack);
a619f981 6908EXPORT_SYMBOL_GPL(ata_dev_try_classify);
be0d18df
AC
6909
6910EXPORT_SYMBOL_GPL(ata_cable_40wire);
6911EXPORT_SYMBOL_GPL(ata_cable_80wire);
6912EXPORT_SYMBOL_GPL(ata_cable_unknown);
6913EXPORT_SYMBOL_GPL(ata_cable_sata);