]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/ata/libata-core.c
Linux 2.6.25-rc5
[net-next-2.6.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
92c52c52
AC
33 * Standards documents from:
34 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
35 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
36 * http://www.sata-io.org (SATA)
37 * http://www.compactflash.org (CF)
38 * http://www.qic.org (QIC157 - Tape and DSC)
39 * http://www.ce-ata.org (CE-ATA: not supported)
40 *
1da177e4
LT
41 */
42
1da177e4
LT
43#include <linux/kernel.h>
44#include <linux/module.h>
45#include <linux/pci.h>
46#include <linux/init.h>
47#include <linux/list.h>
48#include <linux/mm.h>
49#include <linux/highmem.h>
50#include <linux/spinlock.h>
51#include <linux/blkdev.h>
52#include <linux/delay.h>
53#include <linux/timer.h>
54#include <linux/interrupt.h>
55#include <linux/completion.h>
56#include <linux/suspend.h>
57#include <linux/workqueue.h>
67846b30 58#include <linux/jiffies.h>
378f058c 59#include <linux/scatterlist.h>
2dcb407e 60#include <linux/io.h>
1da177e4 61#include <scsi/scsi.h>
193515d5 62#include <scsi/scsi_cmnd.h>
1da177e4
LT
63#include <scsi/scsi_host.h>
64#include <linux/libata.h>
1da177e4
LT
65#include <asm/semaphore.h>
66#include <asm/byteorder.h>
140b5e59 67#include <linux/cdrom.h>
1da177e4
LT
68
69#include "libata.h"
70
fda0efc5 71
d7bb4cc7 72/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
73const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
74const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
75const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 76
3373efd8
TH
77static unsigned int ata_dev_init_params(struct ata_device *dev,
78 u16 heads, u16 sectors);
79static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
218f3d30
JG
80static unsigned int ata_dev_set_feature(struct ata_device *dev,
81 u8 enable, u8 feature);
3373efd8 82static void ata_dev_xfermask(struct ata_device *dev);
75683fe7 83static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
1da177e4 84
f3187195 85unsigned int ata_print_id = 1;
1da177e4
LT
86static struct workqueue_struct *ata_wq;
87
453b07ac
TH
88struct workqueue_struct *ata_aux_wq;
89
33267325
TH
90struct ata_force_param {
91 const char *name;
92 unsigned int cbl;
93 int spd_limit;
94 unsigned long xfer_mask;
95 unsigned int horkage_on;
96 unsigned int horkage_off;
97};
98
99struct ata_force_ent {
100 int port;
101 int device;
102 struct ata_force_param param;
103};
104
105static struct ata_force_ent *ata_force_tbl;
106static int ata_force_tbl_size;
107
108static char ata_force_param_buf[PAGE_SIZE] __initdata;
109module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0444);
110MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
111
418dc1f5 112int atapi_enabled = 1;
1623c81e
JG
113module_param(atapi_enabled, int, 0444);
114MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
115
c5c61bda 116static int atapi_dmadir = 0;
95de719a
AL
117module_param(atapi_dmadir, int, 0444);
118MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
119
baf4fdfa
ML
120int atapi_passthru16 = 1;
121module_param(atapi_passthru16, int, 0444);
122MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
123
c3c013a2
JG
124int libata_fua = 0;
125module_param_named(fua, libata_fua, int, 0444);
126MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
127
2dcb407e 128static int ata_ignore_hpa;
1e999736
AC
129module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
130MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
131
b3a70601
AC
132static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
133module_param_named(dma, libata_dma_mask, int, 0444);
134MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
135
a8601e5f
AM
136static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
137module_param(ata_probe_timeout, int, 0444);
138MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
139
6ebe9d86 140int libata_noacpi = 0;
d7d0dad6 141module_param_named(noacpi, libata_noacpi, int, 0444);
6ebe9d86 142MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
11ef697b 143
ae8d4ee7
AC
144int libata_allow_tpm = 0;
145module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
146MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands");
147
1da177e4
LT
148MODULE_AUTHOR("Jeff Garzik");
149MODULE_DESCRIPTION("Library module for ATA devices");
150MODULE_LICENSE("GPL");
151MODULE_VERSION(DRV_VERSION);
152
0baab86b 153
33267325
TH
154/**
155 * ata_force_cbl - force cable type according to libata.force
4cdfa1b3 156 * @ap: ATA port of interest
33267325
TH
157 *
158 * Force cable type according to libata.force and whine about it.
159 * The last entry which has matching port number is used, so it
160 * can be specified as part of device force parameters. For
161 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
162 * same effect.
163 *
164 * LOCKING:
165 * EH context.
166 */
167void ata_force_cbl(struct ata_port *ap)
168{
169 int i;
170
171 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
172 const struct ata_force_ent *fe = &ata_force_tbl[i];
173
174 if (fe->port != -1 && fe->port != ap->print_id)
175 continue;
176
177 if (fe->param.cbl == ATA_CBL_NONE)
178 continue;
179
180 ap->cbl = fe->param.cbl;
181 ata_port_printk(ap, KERN_NOTICE,
182 "FORCE: cable set to %s\n", fe->param.name);
183 return;
184 }
185}
186
187/**
188 * ata_force_spd_limit - force SATA spd limit according to libata.force
189 * @link: ATA link of interest
190 *
191 * Force SATA spd limit according to libata.force and whine about
192 * it. When only the port part is specified (e.g. 1:), the limit
193 * applies to all links connected to both the host link and all
194 * fan-out ports connected via PMP. If the device part is
195 * specified as 0 (e.g. 1.00:), it specifies the first fan-out
196 * link not the host link. Device number 15 always points to the
197 * host link whether PMP is attached or not.
198 *
199 * LOCKING:
200 * EH context.
201 */
202static void ata_force_spd_limit(struct ata_link *link)
203{
204 int linkno, i;
205
206 if (ata_is_host_link(link))
207 linkno = 15;
208 else
209 linkno = link->pmp;
210
211 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
212 const struct ata_force_ent *fe = &ata_force_tbl[i];
213
214 if (fe->port != -1 && fe->port != link->ap->print_id)
215 continue;
216
217 if (fe->device != -1 && fe->device != linkno)
218 continue;
219
220 if (!fe->param.spd_limit)
221 continue;
222
223 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
224 ata_link_printk(link, KERN_NOTICE,
225 "FORCE: PHY spd limit set to %s\n", fe->param.name);
226 return;
227 }
228}
229
230/**
231 * ata_force_xfermask - force xfermask according to libata.force
232 * @dev: ATA device of interest
233 *
234 * Force xfer_mask according to libata.force and whine about it.
235 * For consistency with link selection, device number 15 selects
236 * the first device connected to the host link.
237 *
238 * LOCKING:
239 * EH context.
240 */
241static void ata_force_xfermask(struct ata_device *dev)
242{
243 int devno = dev->link->pmp + dev->devno;
244 int alt_devno = devno;
245 int i;
246
247 /* allow n.15 for the first device attached to host port */
248 if (ata_is_host_link(dev->link) && devno == 0)
249 alt_devno = 15;
250
251 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
252 const struct ata_force_ent *fe = &ata_force_tbl[i];
253 unsigned long pio_mask, mwdma_mask, udma_mask;
254
255 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
256 continue;
257
258 if (fe->device != -1 && fe->device != devno &&
259 fe->device != alt_devno)
260 continue;
261
262 if (!fe->param.xfer_mask)
263 continue;
264
265 ata_unpack_xfermask(fe->param.xfer_mask,
266 &pio_mask, &mwdma_mask, &udma_mask);
267 if (udma_mask)
268 dev->udma_mask = udma_mask;
269 else if (mwdma_mask) {
270 dev->udma_mask = 0;
271 dev->mwdma_mask = mwdma_mask;
272 } else {
273 dev->udma_mask = 0;
274 dev->mwdma_mask = 0;
275 dev->pio_mask = pio_mask;
276 }
277
278 ata_dev_printk(dev, KERN_NOTICE,
279 "FORCE: xfer_mask set to %s\n", fe->param.name);
280 return;
281 }
282}
283
284/**
285 * ata_force_horkage - force horkage according to libata.force
286 * @dev: ATA device of interest
287 *
288 * Force horkage according to libata.force and whine about it.
289 * For consistency with link selection, device number 15 selects
290 * the first device connected to the host link.
291 *
292 * LOCKING:
293 * EH context.
294 */
295static void ata_force_horkage(struct ata_device *dev)
296{
297 int devno = dev->link->pmp + dev->devno;
298 int alt_devno = devno;
299 int i;
300
301 /* allow n.15 for the first device attached to host port */
302 if (ata_is_host_link(dev->link) && devno == 0)
303 alt_devno = 15;
304
305 for (i = 0; i < ata_force_tbl_size; i++) {
306 const struct ata_force_ent *fe = &ata_force_tbl[i];
307
308 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
309 continue;
310
311 if (fe->device != -1 && fe->device != devno &&
312 fe->device != alt_devno)
313 continue;
314
315 if (!(~dev->horkage & fe->param.horkage_on) &&
316 !(dev->horkage & fe->param.horkage_off))
317 continue;
318
319 dev->horkage |= fe->param.horkage_on;
320 dev->horkage &= ~fe->param.horkage_off;
321
322 ata_dev_printk(dev, KERN_NOTICE,
323 "FORCE: horkage modified (%s)\n", fe->param.name);
324 }
325}
326
1da177e4
LT
327/**
328 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
329 * @tf: Taskfile to convert
1da177e4 330 * @pmp: Port multiplier port
9977126c
TH
331 * @is_cmd: This FIS is for command
332 * @fis: Buffer into which data will output
1da177e4
LT
333 *
334 * Converts a standard ATA taskfile to a Serial ATA
335 * FIS structure (Register - Host to Device).
336 *
337 * LOCKING:
338 * Inherited from caller.
339 */
9977126c 340void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
1da177e4 341{
9977126c
TH
342 fis[0] = 0x27; /* Register - Host to Device FIS */
343 fis[1] = pmp & 0xf; /* Port multiplier number*/
344 if (is_cmd)
345 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
346
1da177e4
LT
347 fis[2] = tf->command;
348 fis[3] = tf->feature;
349
350 fis[4] = tf->lbal;
351 fis[5] = tf->lbam;
352 fis[6] = tf->lbah;
353 fis[7] = tf->device;
354
355 fis[8] = tf->hob_lbal;
356 fis[9] = tf->hob_lbam;
357 fis[10] = tf->hob_lbah;
358 fis[11] = tf->hob_feature;
359
360 fis[12] = tf->nsect;
361 fis[13] = tf->hob_nsect;
362 fis[14] = 0;
363 fis[15] = tf->ctl;
364
365 fis[16] = 0;
366 fis[17] = 0;
367 fis[18] = 0;
368 fis[19] = 0;
369}
370
371/**
372 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
373 * @fis: Buffer from which data will be input
374 * @tf: Taskfile to output
375 *
e12a1be6 376 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
377 *
378 * LOCKING:
379 * Inherited from caller.
380 */
381
057ace5e 382void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
383{
384 tf->command = fis[2]; /* status */
385 tf->feature = fis[3]; /* error */
386
387 tf->lbal = fis[4];
388 tf->lbam = fis[5];
389 tf->lbah = fis[6];
390 tf->device = fis[7];
391
392 tf->hob_lbal = fis[8];
393 tf->hob_lbam = fis[9];
394 tf->hob_lbah = fis[10];
395
396 tf->nsect = fis[12];
397 tf->hob_nsect = fis[13];
398}
399
8cbd6df1
AL
400static const u8 ata_rw_cmds[] = {
401 /* pio multi */
402 ATA_CMD_READ_MULTI,
403 ATA_CMD_WRITE_MULTI,
404 ATA_CMD_READ_MULTI_EXT,
405 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
406 0,
407 0,
408 0,
409 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
410 /* pio */
411 ATA_CMD_PIO_READ,
412 ATA_CMD_PIO_WRITE,
413 ATA_CMD_PIO_READ_EXT,
414 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
415 0,
416 0,
417 0,
418 0,
8cbd6df1
AL
419 /* dma */
420 ATA_CMD_READ,
421 ATA_CMD_WRITE,
422 ATA_CMD_READ_EXT,
9a3dccc4
TH
423 ATA_CMD_WRITE_EXT,
424 0,
425 0,
426 0,
427 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 428};
1da177e4
LT
429
430/**
8cbd6df1 431 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
432 * @tf: command to examine and configure
433 * @dev: device tf belongs to
1da177e4 434 *
2e9edbf8 435 * Examine the device configuration and tf->flags to calculate
8cbd6df1 436 * the proper read/write commands and protocol to use.
1da177e4
LT
437 *
438 * LOCKING:
439 * caller.
440 */
bd056d7e 441static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 442{
9a3dccc4 443 u8 cmd;
1da177e4 444
9a3dccc4 445 int index, fua, lba48, write;
2e9edbf8 446
9a3dccc4 447 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
448 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
449 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 450
8cbd6df1
AL
451 if (dev->flags & ATA_DFLAG_PIO) {
452 tf->protocol = ATA_PROT_PIO;
9a3dccc4 453 index = dev->multi_count ? 0 : 8;
9af5c9c9 454 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
455 /* Unable to use DMA due to host limitation */
456 tf->protocol = ATA_PROT_PIO;
0565c26d 457 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
458 } else {
459 tf->protocol = ATA_PROT_DMA;
9a3dccc4 460 index = 16;
8cbd6df1 461 }
1da177e4 462
9a3dccc4
TH
463 cmd = ata_rw_cmds[index + fua + lba48 + write];
464 if (cmd) {
465 tf->command = cmd;
466 return 0;
467 }
468 return -1;
1da177e4
LT
469}
470
35b649fe
TH
471/**
472 * ata_tf_read_block - Read block address from ATA taskfile
473 * @tf: ATA taskfile of interest
474 * @dev: ATA device @tf belongs to
475 *
476 * LOCKING:
477 * None.
478 *
479 * Read block address from @tf. This function can handle all
480 * three address formats - LBA, LBA48 and CHS. tf->protocol and
481 * flags select the address format to use.
482 *
483 * RETURNS:
484 * Block address read from @tf.
485 */
486u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
487{
488 u64 block = 0;
489
490 if (tf->flags & ATA_TFLAG_LBA) {
491 if (tf->flags & ATA_TFLAG_LBA48) {
492 block |= (u64)tf->hob_lbah << 40;
493 block |= (u64)tf->hob_lbam << 32;
494 block |= tf->hob_lbal << 24;
495 } else
496 block |= (tf->device & 0xf) << 24;
497
498 block |= tf->lbah << 16;
499 block |= tf->lbam << 8;
500 block |= tf->lbal;
501 } else {
502 u32 cyl, head, sect;
503
504 cyl = tf->lbam | (tf->lbah << 8);
505 head = tf->device & 0xf;
506 sect = tf->lbal;
507
508 block = (cyl * dev->heads + head) * dev->sectors + sect;
509 }
510
511 return block;
512}
513
bd056d7e
TH
514/**
515 * ata_build_rw_tf - Build ATA taskfile for given read/write request
516 * @tf: Target ATA taskfile
517 * @dev: ATA device @tf belongs to
518 * @block: Block address
519 * @n_block: Number of blocks
520 * @tf_flags: RW/FUA etc...
521 * @tag: tag
522 *
523 * LOCKING:
524 * None.
525 *
526 * Build ATA taskfile @tf for read/write request described by
527 * @block, @n_block, @tf_flags and @tag on @dev.
528 *
529 * RETURNS:
530 *
531 * 0 on success, -ERANGE if the request is too large for @dev,
532 * -EINVAL if the request is invalid.
533 */
534int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
535 u64 block, u32 n_block, unsigned int tf_flags,
536 unsigned int tag)
537{
538 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
539 tf->flags |= tf_flags;
540
6d1245bf 541 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
542 /* yay, NCQ */
543 if (!lba_48_ok(block, n_block))
544 return -ERANGE;
545
546 tf->protocol = ATA_PROT_NCQ;
547 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
548
549 if (tf->flags & ATA_TFLAG_WRITE)
550 tf->command = ATA_CMD_FPDMA_WRITE;
551 else
552 tf->command = ATA_CMD_FPDMA_READ;
553
554 tf->nsect = tag << 3;
555 tf->hob_feature = (n_block >> 8) & 0xff;
556 tf->feature = n_block & 0xff;
557
558 tf->hob_lbah = (block >> 40) & 0xff;
559 tf->hob_lbam = (block >> 32) & 0xff;
560 tf->hob_lbal = (block >> 24) & 0xff;
561 tf->lbah = (block >> 16) & 0xff;
562 tf->lbam = (block >> 8) & 0xff;
563 tf->lbal = block & 0xff;
564
565 tf->device = 1 << 6;
566 if (tf->flags & ATA_TFLAG_FUA)
567 tf->device |= 1 << 7;
568 } else if (dev->flags & ATA_DFLAG_LBA) {
569 tf->flags |= ATA_TFLAG_LBA;
570
571 if (lba_28_ok(block, n_block)) {
572 /* use LBA28 */
573 tf->device |= (block >> 24) & 0xf;
574 } else if (lba_48_ok(block, n_block)) {
575 if (!(dev->flags & ATA_DFLAG_LBA48))
576 return -ERANGE;
577
578 /* use LBA48 */
579 tf->flags |= ATA_TFLAG_LBA48;
580
581 tf->hob_nsect = (n_block >> 8) & 0xff;
582
583 tf->hob_lbah = (block >> 40) & 0xff;
584 tf->hob_lbam = (block >> 32) & 0xff;
585 tf->hob_lbal = (block >> 24) & 0xff;
586 } else
587 /* request too large even for LBA48 */
588 return -ERANGE;
589
590 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
591 return -EINVAL;
592
593 tf->nsect = n_block & 0xff;
594
595 tf->lbah = (block >> 16) & 0xff;
596 tf->lbam = (block >> 8) & 0xff;
597 tf->lbal = block & 0xff;
598
599 tf->device |= ATA_LBA;
600 } else {
601 /* CHS */
602 u32 sect, head, cyl, track;
603
604 /* The request -may- be too large for CHS addressing. */
605 if (!lba_28_ok(block, n_block))
606 return -ERANGE;
607
608 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
609 return -EINVAL;
610
611 /* Convert LBA to CHS */
612 track = (u32)block / dev->sectors;
613 cyl = track / dev->heads;
614 head = track % dev->heads;
615 sect = (u32)block % dev->sectors + 1;
616
617 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
618 (u32)block, track, cyl, head, sect);
619
620 /* Check whether the converted CHS can fit.
621 Cylinder: 0-65535
622 Head: 0-15
623 Sector: 1-255*/
624 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
625 return -ERANGE;
626
627 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
628 tf->lbal = sect;
629 tf->lbam = cyl;
630 tf->lbah = cyl >> 8;
631 tf->device |= head;
632 }
633
634 return 0;
635}
636
cb95d562
TH
637/**
638 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
639 * @pio_mask: pio_mask
640 * @mwdma_mask: mwdma_mask
641 * @udma_mask: udma_mask
642 *
643 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
644 * unsigned int xfer_mask.
645 *
646 * LOCKING:
647 * None.
648 *
649 * RETURNS:
650 * Packed xfer_mask.
651 */
7dc951ae
TH
652unsigned long ata_pack_xfermask(unsigned long pio_mask,
653 unsigned long mwdma_mask,
654 unsigned long udma_mask)
cb95d562
TH
655{
656 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
657 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
658 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
659}
660
c0489e4e
TH
661/**
662 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
663 * @xfer_mask: xfer_mask to unpack
664 * @pio_mask: resulting pio_mask
665 * @mwdma_mask: resulting mwdma_mask
666 * @udma_mask: resulting udma_mask
667 *
668 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
669 * Any NULL distination masks will be ignored.
670 */
7dc951ae
TH
671void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
672 unsigned long *mwdma_mask, unsigned long *udma_mask)
c0489e4e
TH
673{
674 if (pio_mask)
675 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
676 if (mwdma_mask)
677 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
678 if (udma_mask)
679 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
680}
681
cb95d562 682static const struct ata_xfer_ent {
be9a50c8 683 int shift, bits;
cb95d562
TH
684 u8 base;
685} ata_xfer_tbl[] = {
70cd071e
TH
686 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
687 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
688 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
cb95d562
TH
689 { -1, },
690};
691
692/**
693 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
694 * @xfer_mask: xfer_mask of interest
695 *
696 * Return matching XFER_* value for @xfer_mask. Only the highest
697 * bit of @xfer_mask is considered.
698 *
699 * LOCKING:
700 * None.
701 *
702 * RETURNS:
70cd071e 703 * Matching XFER_* value, 0xff if no match found.
cb95d562 704 */
7dc951ae 705u8 ata_xfer_mask2mode(unsigned long xfer_mask)
cb95d562
TH
706{
707 int highbit = fls(xfer_mask) - 1;
708 const struct ata_xfer_ent *ent;
709
710 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
711 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
712 return ent->base + highbit - ent->shift;
70cd071e 713 return 0xff;
cb95d562
TH
714}
715
716/**
717 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
718 * @xfer_mode: XFER_* of interest
719 *
720 * Return matching xfer_mask for @xfer_mode.
721 *
722 * LOCKING:
723 * None.
724 *
725 * RETURNS:
726 * Matching xfer_mask, 0 if no match found.
727 */
7dc951ae 728unsigned long ata_xfer_mode2mask(u8 xfer_mode)
cb95d562
TH
729{
730 const struct ata_xfer_ent *ent;
731
732 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
733 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
70cd071e
TH
734 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
735 & ~((1 << ent->shift) - 1);
cb95d562
TH
736 return 0;
737}
738
739/**
740 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
741 * @xfer_mode: XFER_* of interest
742 *
743 * Return matching xfer_shift for @xfer_mode.
744 *
745 * LOCKING:
746 * None.
747 *
748 * RETURNS:
749 * Matching xfer_shift, -1 if no match found.
750 */
7dc951ae 751int ata_xfer_mode2shift(unsigned long xfer_mode)
cb95d562
TH
752{
753 const struct ata_xfer_ent *ent;
754
755 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
756 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
757 return ent->shift;
758 return -1;
759}
760
1da177e4 761/**
1da7b0d0
TH
762 * ata_mode_string - convert xfer_mask to string
763 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
764 *
765 * Determine string which represents the highest speed
1da7b0d0 766 * (highest bit in @modemask).
1da177e4
LT
767 *
768 * LOCKING:
769 * None.
770 *
771 * RETURNS:
772 * Constant C string representing highest speed listed in
1da7b0d0 773 * @mode_mask, or the constant C string "<n/a>".
1da177e4 774 */
7dc951ae 775const char *ata_mode_string(unsigned long xfer_mask)
1da177e4 776{
75f554bc
TH
777 static const char * const xfer_mode_str[] = {
778 "PIO0",
779 "PIO1",
780 "PIO2",
781 "PIO3",
782 "PIO4",
b352e57d
AC
783 "PIO5",
784 "PIO6",
75f554bc
TH
785 "MWDMA0",
786 "MWDMA1",
787 "MWDMA2",
b352e57d
AC
788 "MWDMA3",
789 "MWDMA4",
75f554bc
TH
790 "UDMA/16",
791 "UDMA/25",
792 "UDMA/33",
793 "UDMA/44",
794 "UDMA/66",
795 "UDMA/100",
796 "UDMA/133",
797 "UDMA7",
798 };
1da7b0d0 799 int highbit;
1da177e4 800
1da7b0d0
TH
801 highbit = fls(xfer_mask) - 1;
802 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
803 return xfer_mode_str[highbit];
1da177e4 804 return "<n/a>";
1da177e4
LT
805}
806
4c360c81
TH
807static const char *sata_spd_string(unsigned int spd)
808{
809 static const char * const spd_str[] = {
810 "1.5 Gbps",
811 "3.0 Gbps",
812 };
813
814 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
815 return "<unknown>";
816 return spd_str[spd - 1];
817}
818
3373efd8 819void ata_dev_disable(struct ata_device *dev)
0b8efb0a 820{
09d7f9b0 821 if (ata_dev_enabled(dev)) {
9af5c9c9 822 if (ata_msg_drv(dev->link->ap))
09d7f9b0 823 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
562f0c2d 824 ata_acpi_on_disable(dev);
4ae72a1e
TH
825 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
826 ATA_DNXFER_QUIET);
0b8efb0a
TH
827 dev->class++;
828 }
829}
830
ca77329f
KCA
831static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
832{
833 struct ata_link *link = dev->link;
834 struct ata_port *ap = link->ap;
835 u32 scontrol;
836 unsigned int err_mask;
837 int rc;
838
839 /*
840 * disallow DIPM for drivers which haven't set
841 * ATA_FLAG_IPM. This is because when DIPM is enabled,
842 * phy ready will be set in the interrupt status on
843 * state changes, which will cause some drivers to
844 * think there are errors - additionally drivers will
845 * need to disable hot plug.
846 */
847 if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
848 ap->pm_policy = NOT_AVAILABLE;
849 return -EINVAL;
850 }
851
852 /*
853 * For DIPM, we will only enable it for the
854 * min_power setting.
855 *
856 * Why? Because Disks are too stupid to know that
857 * If the host rejects a request to go to SLUMBER
858 * they should retry at PARTIAL, and instead it
859 * just would give up. So, for medium_power to
860 * work at all, we need to only allow HIPM.
861 */
862 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
863 if (rc)
864 return rc;
865
866 switch (policy) {
867 case MIN_POWER:
868 /* no restrictions on IPM transitions */
869 scontrol &= ~(0x3 << 8);
870 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
871 if (rc)
872 return rc;
873
874 /* enable DIPM */
875 if (dev->flags & ATA_DFLAG_DIPM)
876 err_mask = ata_dev_set_feature(dev,
877 SETFEATURES_SATA_ENABLE, SATA_DIPM);
878 break;
879 case MEDIUM_POWER:
880 /* allow IPM to PARTIAL */
881 scontrol &= ~(0x1 << 8);
882 scontrol |= (0x2 << 8);
883 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
884 if (rc)
885 return rc;
886
f5456b63
KCA
887 /*
888 * we don't have to disable DIPM since IPM flags
889 * disallow transitions to SLUMBER, which effectively
890 * disable DIPM if it does not support PARTIAL
891 */
ca77329f
KCA
892 break;
893 case NOT_AVAILABLE:
894 case MAX_PERFORMANCE:
895 /* disable all IPM transitions */
896 scontrol |= (0x3 << 8);
897 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
898 if (rc)
899 return rc;
900
f5456b63
KCA
901 /*
902 * we don't have to disable DIPM since IPM flags
903 * disallow all transitions which effectively
904 * disable DIPM anyway.
905 */
ca77329f
KCA
906 break;
907 }
908
909 /* FIXME: handle SET FEATURES failure */
910 (void) err_mask;
911
912 return 0;
913}
914
915/**
916 * ata_dev_enable_pm - enable SATA interface power management
48166fd9
SH
917 * @dev: device to enable power management
918 * @policy: the link power management policy
ca77329f
KCA
919 *
920 * Enable SATA Interface power management. This will enable
921 * Device Interface Power Management (DIPM) for min_power
922 * policy, and then call driver specific callbacks for
923 * enabling Host Initiated Power management.
924 *
925 * Locking: Caller.
926 * Returns: -EINVAL if IPM is not supported, 0 otherwise.
927 */
928void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
929{
930 int rc = 0;
931 struct ata_port *ap = dev->link->ap;
932
933 /* set HIPM first, then DIPM */
934 if (ap->ops->enable_pm)
935 rc = ap->ops->enable_pm(ap, policy);
936 if (rc)
937 goto enable_pm_out;
938 rc = ata_dev_set_dipm(dev, policy);
939
940enable_pm_out:
941 if (rc)
942 ap->pm_policy = MAX_PERFORMANCE;
943 else
944 ap->pm_policy = policy;
945 return /* rc */; /* hopefully we can use 'rc' eventually */
946}
947
1992a5ed 948#ifdef CONFIG_PM
ca77329f
KCA
949/**
950 * ata_dev_disable_pm - disable SATA interface power management
48166fd9 951 * @dev: device to disable power management
ca77329f
KCA
952 *
953 * Disable SATA Interface power management. This will disable
954 * Device Interface Power Management (DIPM) without changing
955 * policy, call driver specific callbacks for disabling Host
956 * Initiated Power management.
957 *
958 * Locking: Caller.
959 * Returns: void
960 */
961static void ata_dev_disable_pm(struct ata_device *dev)
962{
963 struct ata_port *ap = dev->link->ap;
964
965 ata_dev_set_dipm(dev, MAX_PERFORMANCE);
966 if (ap->ops->disable_pm)
967 ap->ops->disable_pm(ap);
968}
1992a5ed 969#endif /* CONFIG_PM */
ca77329f
KCA
970
971void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
972{
973 ap->pm_policy = policy;
974 ap->link.eh_info.action |= ATA_EHI_LPM;
975 ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
976 ata_port_schedule_eh(ap);
977}
978
1992a5ed 979#ifdef CONFIG_PM
ca77329f
KCA
980static void ata_lpm_enable(struct ata_host *host)
981{
982 struct ata_link *link;
983 struct ata_port *ap;
984 struct ata_device *dev;
985 int i;
986
987 for (i = 0; i < host->n_ports; i++) {
988 ap = host->ports[i];
989 ata_port_for_each_link(link, ap) {
990 ata_link_for_each_dev(dev, link)
991 ata_dev_disable_pm(dev);
992 }
993 }
994}
995
996static void ata_lpm_disable(struct ata_host *host)
997{
998 int i;
999
1000 for (i = 0; i < host->n_ports; i++) {
1001 struct ata_port *ap = host->ports[i];
1002 ata_lpm_schedule(ap, ap->pm_policy);
1003 }
1004}
1992a5ed 1005#endif /* CONFIG_PM */
ca77329f
KCA
1006
1007
1da177e4 1008/**
0d5ff566 1009 * ata_devchk - PATA device presence detection
1da177e4
LT
1010 * @ap: ATA channel to examine
1011 * @device: Device to examine (starting at zero)
1012 *
1013 * This technique was originally described in
1014 * Hale Landis's ATADRVR (www.ata-atapi.com), and
1015 * later found its way into the ATA/ATAPI spec.
1016 *
1017 * Write a pattern to the ATA shadow registers,
1018 * and if a device is present, it will respond by
1019 * correctly storing and echoing back the
1020 * ATA shadow register contents.
1021 *
1022 * LOCKING:
1023 * caller.
1024 */
1025
0d5ff566 1026static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1da177e4
LT
1027{
1028 struct ata_ioports *ioaddr = &ap->ioaddr;
1029 u8 nsect, lbal;
1030
1031 ap->ops->dev_select(ap, device);
1032
0d5ff566
TH
1033 iowrite8(0x55, ioaddr->nsect_addr);
1034 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 1035
0d5ff566
TH
1036 iowrite8(0xaa, ioaddr->nsect_addr);
1037 iowrite8(0x55, ioaddr->lbal_addr);
1da177e4 1038
0d5ff566
TH
1039 iowrite8(0x55, ioaddr->nsect_addr);
1040 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 1041
0d5ff566
TH
1042 nsect = ioread8(ioaddr->nsect_addr);
1043 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
1044
1045 if ((nsect == 0x55) && (lbal == 0xaa))
1046 return 1; /* we found a device */
1047
1048 return 0; /* nothing found */
1049}
1050
1da177e4
LT
1051/**
1052 * ata_dev_classify - determine device type based on ATA-spec signature
1053 * @tf: ATA taskfile register set for device to be identified
1054 *
1055 * Determine from taskfile register contents whether a device is
1056 * ATA or ATAPI, as per "Signature and persistence" section
1057 * of ATA/PI spec (volume 1, sect 5.14).
1058 *
1059 * LOCKING:
1060 * None.
1061 *
1062 * RETURNS:
633273a3
TH
1063 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
1064 * %ATA_DEV_UNKNOWN the event of failure.
1da177e4 1065 */
057ace5e 1066unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
1067{
1068 /* Apple's open source Darwin code hints that some devices only
1069 * put a proper signature into the LBA mid/high registers,
1070 * So, we only check those. It's sufficient for uniqueness.
633273a3
TH
1071 *
1072 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1073 * signatures for ATA and ATAPI devices attached on SerialATA,
1074 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
1075 * spec has never mentioned about using different signatures
1076 * for ATA/ATAPI devices. Then, Serial ATA II: Port
1077 * Multiplier specification began to use 0x69/0x96 to identify
1078 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1079 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1080 * 0x69/0x96 shortly and described them as reserved for
1081 * SerialATA.
1082 *
1083 * We follow the current spec and consider that 0x69/0x96
1084 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1da177e4 1085 */
633273a3 1086 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1da177e4
LT
1087 DPRINTK("found ATA device by sig\n");
1088 return ATA_DEV_ATA;
1089 }
1090
633273a3 1091 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1da177e4
LT
1092 DPRINTK("found ATAPI device by sig\n");
1093 return ATA_DEV_ATAPI;
1094 }
1095
633273a3
TH
1096 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1097 DPRINTK("found PMP device by sig\n");
1098 return ATA_DEV_PMP;
1099 }
1100
1101 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
2dcb407e 1102 printk(KERN_INFO "ata: SEMB device ignored\n");
633273a3
TH
1103 return ATA_DEV_SEMB_UNSUP; /* not yet */
1104 }
1105
1da177e4
LT
1106 DPRINTK("unknown device\n");
1107 return ATA_DEV_UNKNOWN;
1108}
1109
1110/**
1111 * ata_dev_try_classify - Parse returned ATA device signature
3f19859e
TH
1112 * @dev: ATA device to classify (starting at zero)
1113 * @present: device seems present
b4dc7623 1114 * @r_err: Value of error register on completion
1da177e4
LT
1115 *
1116 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
1117 * an ATA/ATAPI-defined set of values is placed in the ATA
1118 * shadow registers, indicating the results of device detection
1119 * and diagnostics.
1120 *
1121 * Select the ATA device, and read the values from the ATA shadow
1122 * registers. Then parse according to the Error register value,
1123 * and the spec-defined values examined by ata_dev_classify().
1124 *
1125 * LOCKING:
1126 * caller.
b4dc7623
TH
1127 *
1128 * RETURNS:
1129 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4 1130 */
3f19859e
TH
1131unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
1132 u8 *r_err)
1da177e4 1133{
3f19859e 1134 struct ata_port *ap = dev->link->ap;
1da177e4
LT
1135 struct ata_taskfile tf;
1136 unsigned int class;
1137 u8 err;
1138
3f19859e 1139 ap->ops->dev_select(ap, dev->devno);
1da177e4
LT
1140
1141 memset(&tf, 0, sizeof(tf));
1142
1da177e4 1143 ap->ops->tf_read(ap, &tf);
0169e284 1144 err = tf.feature;
b4dc7623
TH
1145 if (r_err)
1146 *r_err = err;
1da177e4 1147
c5038fc0
AC
1148 /* see if device passed diags: continue and warn later */
1149 if (err == 0)
93590859 1150 /* diagnostic fail : do nothing _YET_ */
3f19859e 1151 dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
93590859 1152 else if (err == 1)
1da177e4 1153 /* do nothing */ ;
3f19859e 1154 else if ((dev->devno == 0) && (err == 0x81))
1da177e4
LT
1155 /* do nothing */ ;
1156 else
b4dc7623 1157 return ATA_DEV_NONE;
1da177e4 1158
b4dc7623 1159 /* determine if device is ATA or ATAPI */
1da177e4 1160 class = ata_dev_classify(&tf);
b4dc7623 1161
d7fbee05
TH
1162 if (class == ATA_DEV_UNKNOWN) {
1163 /* If the device failed diagnostic, it's likely to
1164 * have reported incorrect device signature too.
1165 * Assume ATA device if the device seems present but
1166 * device signature is invalid with diagnostic
1167 * failure.
1168 */
1169 if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
1170 class = ATA_DEV_ATA;
1171 else
1172 class = ATA_DEV_NONE;
1173 } else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
1174 class = ATA_DEV_NONE;
1175
b4dc7623 1176 return class;
1da177e4
LT
1177}
1178
1179/**
6a62a04d 1180 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
1181 * @id: IDENTIFY DEVICE results we will examine
1182 * @s: string into which data is output
1183 * @ofs: offset into identify device page
1184 * @len: length of string to return. must be an even number.
1185 *
1186 * The strings in the IDENTIFY DEVICE page are broken up into
1187 * 16-bit chunks. Run through the string, and output each
1188 * 8-bit chunk linearly, regardless of platform.
1189 *
1190 * LOCKING:
1191 * caller.
1192 */
1193
6a62a04d
TH
1194void ata_id_string(const u16 *id, unsigned char *s,
1195 unsigned int ofs, unsigned int len)
1da177e4
LT
1196{
1197 unsigned int c;
1198
1199 while (len > 0) {
1200 c = id[ofs] >> 8;
1201 *s = c;
1202 s++;
1203
1204 c = id[ofs] & 0xff;
1205 *s = c;
1206 s++;
1207
1208 ofs++;
1209 len -= 2;
1210 }
1211}
1212
0e949ff3 1213/**
6a62a04d 1214 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
1215 * @id: IDENTIFY DEVICE results we will examine
1216 * @s: string into which data is output
1217 * @ofs: offset into identify device page
1218 * @len: length of string to return. must be an odd number.
1219 *
6a62a04d 1220 * This function is identical to ata_id_string except that it
0e949ff3
TH
1221 * trims trailing spaces and terminates the resulting string with
1222 * null. @len must be actual maximum length (even number) + 1.
1223 *
1224 * LOCKING:
1225 * caller.
1226 */
6a62a04d
TH
1227void ata_id_c_string(const u16 *id, unsigned char *s,
1228 unsigned int ofs, unsigned int len)
0e949ff3
TH
1229{
1230 unsigned char *p;
1231
1232 WARN_ON(!(len & 1));
1233
6a62a04d 1234 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
1235
1236 p = s + strnlen(s, len - 1);
1237 while (p > s && p[-1] == ' ')
1238 p--;
1239 *p = '\0';
1240}
0baab86b 1241
db6f8759
TH
1242static u64 ata_id_n_sectors(const u16 *id)
1243{
1244 if (ata_id_has_lba(id)) {
1245 if (ata_id_has_lba48(id))
1246 return ata_id_u64(id, 100);
1247 else
1248 return ata_id_u32(id, 60);
1249 } else {
1250 if (ata_id_current_chs_valid(id))
1251 return ata_id_u32(id, 57);
1252 else
1253 return id[1] * id[3] * id[6];
1254 }
1255}
1256
1e999736
AC
1257static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
1258{
1259 u64 sectors = 0;
1260
1261 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1262 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1263 sectors |= (tf->hob_lbal & 0xff) << 24;
1264 sectors |= (tf->lbah & 0xff) << 16;
1265 sectors |= (tf->lbam & 0xff) << 8;
1266 sectors |= (tf->lbal & 0xff);
1267
1268 return ++sectors;
1269}
1270
1271static u64 ata_tf_to_lba(struct ata_taskfile *tf)
1272{
1273 u64 sectors = 0;
1274
1275 sectors |= (tf->device & 0x0f) << 24;
1276 sectors |= (tf->lbah & 0xff) << 16;
1277 sectors |= (tf->lbam & 0xff) << 8;
1278 sectors |= (tf->lbal & 0xff);
1279
1280 return ++sectors;
1281}
1282
1283/**
c728a914
TH
1284 * ata_read_native_max_address - Read native max address
1285 * @dev: target device
1286 * @max_sectors: out parameter for the result native max address
1e999736 1287 *
c728a914
TH
1288 * Perform an LBA48 or LBA28 native size query upon the device in
1289 * question.
1e999736 1290 *
c728a914
TH
1291 * RETURNS:
1292 * 0 on success, -EACCES if command is aborted by the drive.
1293 * -EIO on other errors.
1e999736 1294 */
c728a914 1295static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1e999736 1296{
c728a914 1297 unsigned int err_mask;
1e999736 1298 struct ata_taskfile tf;
c728a914 1299 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1300
1301 ata_tf_init(dev, &tf);
1302
c728a914 1303 /* always clear all address registers */
1e999736 1304 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1e999736 1305
c728a914
TH
1306 if (lba48) {
1307 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1308 tf.flags |= ATA_TFLAG_LBA48;
1309 } else
1310 tf.command = ATA_CMD_READ_NATIVE_MAX;
1e999736 1311
1e999736 1312 tf.protocol |= ATA_PROT_NODATA;
c728a914
TH
1313 tf.device |= ATA_LBA;
1314
2b789108 1315 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914
TH
1316 if (err_mask) {
1317 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1318 "max address (err_mask=0x%x)\n", err_mask);
1319 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1320 return -EACCES;
1321 return -EIO;
1322 }
1e999736 1323
c728a914
TH
1324 if (lba48)
1325 *max_sectors = ata_tf_to_lba48(&tf);
1326 else
1327 *max_sectors = ata_tf_to_lba(&tf);
2dcb407e 1328 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
93328e11 1329 (*max_sectors)--;
c728a914 1330 return 0;
1e999736
AC
1331}
1332
1333/**
c728a914
TH
1334 * ata_set_max_sectors - Set max sectors
1335 * @dev: target device
6b38d1d1 1336 * @new_sectors: new max sectors value to set for the device
1e999736 1337 *
c728a914
TH
1338 * Set max sectors of @dev to @new_sectors.
1339 *
1340 * RETURNS:
1341 * 0 on success, -EACCES if command is aborted or denied (due to
1342 * previous non-volatile SET_MAX) by the drive. -EIO on other
1343 * errors.
1e999736 1344 */
05027adc 1345static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1e999736 1346{
c728a914 1347 unsigned int err_mask;
1e999736 1348 struct ata_taskfile tf;
c728a914 1349 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1350
1351 new_sectors--;
1352
1353 ata_tf_init(dev, &tf);
1354
1e999736 1355 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
c728a914
TH
1356
1357 if (lba48) {
1358 tf.command = ATA_CMD_SET_MAX_EXT;
1359 tf.flags |= ATA_TFLAG_LBA48;
1360
1361 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1362 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1363 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1e582ba4 1364 } else {
c728a914
TH
1365 tf.command = ATA_CMD_SET_MAX;
1366
1e582ba4
TH
1367 tf.device |= (new_sectors >> 24) & 0xf;
1368 }
1369
1e999736 1370 tf.protocol |= ATA_PROT_NODATA;
c728a914 1371 tf.device |= ATA_LBA;
1e999736
AC
1372
1373 tf.lbal = (new_sectors >> 0) & 0xff;
1374 tf.lbam = (new_sectors >> 8) & 0xff;
1375 tf.lbah = (new_sectors >> 16) & 0xff;
1e999736 1376
2b789108 1377 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914
TH
1378 if (err_mask) {
1379 ata_dev_printk(dev, KERN_WARNING, "failed to set "
1380 "max address (err_mask=0x%x)\n", err_mask);
1381 if (err_mask == AC_ERR_DEV &&
1382 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1383 return -EACCES;
1384 return -EIO;
1385 }
1386
c728a914 1387 return 0;
1e999736
AC
1388}
1389
1390/**
1391 * ata_hpa_resize - Resize a device with an HPA set
1392 * @dev: Device to resize
1393 *
1394 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1395 * it if required to the full size of the media. The caller must check
1396 * the drive has the HPA feature set enabled.
05027adc
TH
1397 *
1398 * RETURNS:
1399 * 0 on success, -errno on failure.
1e999736 1400 */
05027adc 1401static int ata_hpa_resize(struct ata_device *dev)
1e999736 1402{
05027adc
TH
1403 struct ata_eh_context *ehc = &dev->link->eh_context;
1404 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1405 u64 sectors = ata_id_n_sectors(dev->id);
1406 u64 native_sectors;
c728a914 1407 int rc;
a617c09f 1408
05027adc
TH
1409 /* do we need to do it? */
1410 if (dev->class != ATA_DEV_ATA ||
1411 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1412 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
c728a914 1413 return 0;
1e999736 1414
05027adc
TH
1415 /* read native max address */
1416 rc = ata_read_native_max_address(dev, &native_sectors);
1417 if (rc) {
1418 /* If HPA isn't going to be unlocked, skip HPA
1419 * resizing from the next try.
1420 */
1421 if (!ata_ignore_hpa) {
1422 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1423 "broken, will skip HPA handling\n");
1424 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1425
1426 /* we can continue if device aborted the command */
1427 if (rc == -EACCES)
1428 rc = 0;
1e999736 1429 }
37301a55 1430
05027adc
TH
1431 return rc;
1432 }
1433
1434 /* nothing to do? */
1435 if (native_sectors <= sectors || !ata_ignore_hpa) {
1436 if (!print_info || native_sectors == sectors)
1437 return 0;
1438
1439 if (native_sectors > sectors)
1440 ata_dev_printk(dev, KERN_INFO,
1441 "HPA detected: current %llu, native %llu\n",
1442 (unsigned long long)sectors,
1443 (unsigned long long)native_sectors);
1444 else if (native_sectors < sectors)
1445 ata_dev_printk(dev, KERN_WARNING,
1446 "native sectors (%llu) is smaller than "
1447 "sectors (%llu)\n",
1448 (unsigned long long)native_sectors,
1449 (unsigned long long)sectors);
1450 return 0;
1451 }
1452
1453 /* let's unlock HPA */
1454 rc = ata_set_max_sectors(dev, native_sectors);
1455 if (rc == -EACCES) {
1456 /* if device aborted the command, skip HPA resizing */
1457 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1458 "(%llu -> %llu), skipping HPA handling\n",
1459 (unsigned long long)sectors,
1460 (unsigned long long)native_sectors);
1461 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1462 return 0;
1463 } else if (rc)
1464 return rc;
1465
1466 /* re-read IDENTIFY data */
1467 rc = ata_dev_reread_id(dev, 0);
1468 if (rc) {
1469 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1470 "data after HPA resizing\n");
1471 return rc;
1472 }
1473
1474 if (print_info) {
1475 u64 new_sectors = ata_id_n_sectors(dev->id);
1476 ata_dev_printk(dev, KERN_INFO,
1477 "HPA unlocked: %llu -> %llu, native %llu\n",
1478 (unsigned long long)sectors,
1479 (unsigned long long)new_sectors,
1480 (unsigned long long)native_sectors);
1481 }
1482
1483 return 0;
1e999736
AC
1484}
1485
0baab86b
EF
1486/**
1487 * ata_noop_dev_select - Select device 0/1 on ATA bus
1488 * @ap: ATA channel to manipulate
1489 * @device: ATA device (numbered from zero) to select
1490 *
1491 * This function performs no actual function.
1492 *
1493 * May be used as the dev_select() entry in ata_port_operations.
1494 *
1495 * LOCKING:
1496 * caller.
1497 */
2dcb407e 1498void ata_noop_dev_select(struct ata_port *ap, unsigned int device)
1da177e4
LT
1499{
1500}
1501
0baab86b 1502
1da177e4
LT
1503/**
1504 * ata_std_dev_select - Select device 0/1 on ATA bus
1505 * @ap: ATA channel to manipulate
1506 * @device: ATA device (numbered from zero) to select
1507 *
1508 * Use the method defined in the ATA specification to
1509 * make either device 0, or device 1, active on the
0baab86b
EF
1510 * ATA channel. Works with both PIO and MMIO.
1511 *
1512 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
1513 *
1514 * LOCKING:
1515 * caller.
1516 */
1517
2dcb407e 1518void ata_std_dev_select(struct ata_port *ap, unsigned int device)
1da177e4
LT
1519{
1520 u8 tmp;
1521
1522 if (device == 0)
1523 tmp = ATA_DEVICE_OBS;
1524 else
1525 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1526
0d5ff566 1527 iowrite8(tmp, ap->ioaddr.device_addr);
1da177e4
LT
1528 ata_pause(ap); /* needed; also flushes, for mmio */
1529}
1530
1531/**
1532 * ata_dev_select - Select device 0/1 on ATA bus
1533 * @ap: ATA channel to manipulate
1534 * @device: ATA device (numbered from zero) to select
1535 * @wait: non-zero to wait for Status register BSY bit to clear
1536 * @can_sleep: non-zero if context allows sleeping
1537 *
1538 * Use the method defined in the ATA specification to
1539 * make either device 0, or device 1, active on the
1540 * ATA channel.
1541 *
1542 * This is a high-level version of ata_std_dev_select(),
1543 * which additionally provides the services of inserting
1544 * the proper pauses and status polling, where needed.
1545 *
1546 * LOCKING:
1547 * caller.
1548 */
1549
1550void ata_dev_select(struct ata_port *ap, unsigned int device,
1551 unsigned int wait, unsigned int can_sleep)
1552{
88574551 1553 if (ata_msg_probe(ap))
44877b4e
TH
1554 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
1555 "device %u, wait %u\n", device, wait);
1da177e4
LT
1556
1557 if (wait)
1558 ata_wait_idle(ap);
1559
1560 ap->ops->dev_select(ap, device);
1561
1562 if (wait) {
9af5c9c9 1563 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
1da177e4
LT
1564 msleep(150);
1565 ata_wait_idle(ap);
1566 }
1567}
1568
1569/**
1570 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 1571 * @id: IDENTIFY DEVICE page to dump
1da177e4 1572 *
0bd3300a
TH
1573 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1574 * page.
1da177e4
LT
1575 *
1576 * LOCKING:
1577 * caller.
1578 */
1579
0bd3300a 1580static inline void ata_dump_id(const u16 *id)
1da177e4
LT
1581{
1582 DPRINTK("49==0x%04x "
1583 "53==0x%04x "
1584 "63==0x%04x "
1585 "64==0x%04x "
1586 "75==0x%04x \n",
0bd3300a
TH
1587 id[49],
1588 id[53],
1589 id[63],
1590 id[64],
1591 id[75]);
1da177e4
LT
1592 DPRINTK("80==0x%04x "
1593 "81==0x%04x "
1594 "82==0x%04x "
1595 "83==0x%04x "
1596 "84==0x%04x \n",
0bd3300a
TH
1597 id[80],
1598 id[81],
1599 id[82],
1600 id[83],
1601 id[84]);
1da177e4
LT
1602 DPRINTK("88==0x%04x "
1603 "93==0x%04x\n",
0bd3300a
TH
1604 id[88],
1605 id[93]);
1da177e4
LT
1606}
1607
cb95d562
TH
1608/**
1609 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1610 * @id: IDENTIFY data to compute xfer mask from
1611 *
1612 * Compute the xfermask for this device. This is not as trivial
1613 * as it seems if we must consider early devices correctly.
1614 *
1615 * FIXME: pre IDE drive timing (do we care ?).
1616 *
1617 * LOCKING:
1618 * None.
1619 *
1620 * RETURNS:
1621 * Computed xfermask
1622 */
7dc951ae 1623unsigned long ata_id_xfermask(const u16 *id)
cb95d562 1624{
7dc951ae 1625 unsigned long pio_mask, mwdma_mask, udma_mask;
cb95d562
TH
1626
1627 /* Usual case. Word 53 indicates word 64 is valid */
1628 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1629 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1630 pio_mask <<= 3;
1631 pio_mask |= 0x7;
1632 } else {
1633 /* If word 64 isn't valid then Word 51 high byte holds
1634 * the PIO timing number for the maximum. Turn it into
1635 * a mask.
1636 */
7a0f1c8a 1637 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb 1638 if (mode < 5) /* Valid PIO range */
2dcb407e 1639 pio_mask = (2 << mode) - 1;
46767aeb
AC
1640 else
1641 pio_mask = 1;
cb95d562
TH
1642
1643 /* But wait.. there's more. Design your standards by
1644 * committee and you too can get a free iordy field to
1645 * process. However its the speeds not the modes that
1646 * are supported... Note drivers using the timing API
1647 * will get this right anyway
1648 */
1649 }
1650
1651 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1652
b352e57d
AC
1653 if (ata_id_is_cfa(id)) {
1654 /*
1655 * Process compact flash extended modes
1656 */
1657 int pio = id[163] & 0x7;
1658 int dma = (id[163] >> 3) & 7;
1659
1660 if (pio)
1661 pio_mask |= (1 << 5);
1662 if (pio > 1)
1663 pio_mask |= (1 << 6);
1664 if (dma)
1665 mwdma_mask |= (1 << 3);
1666 if (dma > 1)
1667 mwdma_mask |= (1 << 4);
1668 }
1669
fb21f0d0
TH
1670 udma_mask = 0;
1671 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1672 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1673
1674 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1675}
1676
86e45b6b 1677/**
442eacc3 1678 * ata_pio_queue_task - Queue port_task
86e45b6b 1679 * @ap: The ata_port to queue port_task for
e2a7f77a 1680 * @fn: workqueue function to be scheduled
65f27f38 1681 * @data: data for @fn to use
e2a7f77a 1682 * @delay: delay time for workqueue function
86e45b6b
TH
1683 *
1684 * Schedule @fn(@data) for execution after @delay jiffies using
1685 * port_task. There is one port_task per port and it's the
1686 * user(low level driver)'s responsibility to make sure that only
1687 * one task is active at any given time.
1688 *
1689 * libata core layer takes care of synchronization between
442eacc3 1690 * port_task and EH. ata_pio_queue_task() may be ignored for EH
86e45b6b
TH
1691 * synchronization.
1692 *
1693 * LOCKING:
1694 * Inherited from caller.
1695 */
442eacc3
JG
1696static void ata_pio_queue_task(struct ata_port *ap, void *data,
1697 unsigned long delay)
86e45b6b 1698{
65f27f38 1699 ap->port_task_data = data;
86e45b6b 1700
45a66c1c
ON
1701 /* may fail if ata_port_flush_task() in progress */
1702 queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1703}
1704
1705/**
1706 * ata_port_flush_task - Flush port_task
1707 * @ap: The ata_port to flush port_task for
1708 *
1709 * After this function completes, port_task is guranteed not to
1710 * be running or scheduled.
1711 *
1712 * LOCKING:
1713 * Kernel thread context (may sleep)
1714 */
1715void ata_port_flush_task(struct ata_port *ap)
1716{
86e45b6b
TH
1717 DPRINTK("ENTER\n");
1718
45a66c1c 1719 cancel_rearming_delayed_work(&ap->port_task);
86e45b6b 1720
0dd4b21f
BP
1721 if (ata_msg_ctl(ap))
1722 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
86e45b6b
TH
1723}
1724
7102d230 1725static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1726{
77853bf2 1727 struct completion *waiting = qc->private_data;
a2a7a662 1728
a2a7a662 1729 complete(waiting);
a2a7a662
TH
1730}
1731
1732/**
2432697b 1733 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1734 * @dev: Device to which the command is sent
1735 * @tf: Taskfile registers for the command and the result
d69cf37d 1736 * @cdb: CDB for packet command
a2a7a662 1737 * @dma_dir: Data tranfer direction of the command
5c1ad8b3 1738 * @sgl: sg list for the data buffer of the command
2432697b 1739 * @n_elem: Number of sg entries
2b789108 1740 * @timeout: Timeout in msecs (0 for default)
a2a7a662
TH
1741 *
1742 * Executes libata internal command with timeout. @tf contains
1743 * command on entry and result on return. Timeout and error
1744 * conditions are reported via return value. No recovery action
1745 * is taken after a command times out. It's caller's duty to
1746 * clean up after timeout.
1747 *
1748 * LOCKING:
1749 * None. Should be called with kernel context, might sleep.
551e8889
TH
1750 *
1751 * RETURNS:
1752 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1753 */
2432697b
TH
1754unsigned ata_exec_internal_sg(struct ata_device *dev,
1755 struct ata_taskfile *tf, const u8 *cdb,
87260216 1756 int dma_dir, struct scatterlist *sgl,
2b789108 1757 unsigned int n_elem, unsigned long timeout)
a2a7a662 1758{
9af5c9c9
TH
1759 struct ata_link *link = dev->link;
1760 struct ata_port *ap = link->ap;
a2a7a662
TH
1761 u8 command = tf->command;
1762 struct ata_queued_cmd *qc;
2ab7db1f 1763 unsigned int tag, preempted_tag;
dedaf2b0 1764 u32 preempted_sactive, preempted_qc_active;
da917d69 1765 int preempted_nr_active_links;
60be6b9a 1766 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1767 unsigned long flags;
77853bf2 1768 unsigned int err_mask;
d95a717f 1769 int rc;
a2a7a662 1770
ba6a1308 1771 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1772
e3180499 1773 /* no internal command while frozen */
b51e9e5d 1774 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1775 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1776 return AC_ERR_SYSTEM;
1777 }
1778
2ab7db1f 1779 /* initialize internal qc */
a2a7a662 1780
2ab7db1f
TH
1781 /* XXX: Tag 0 is used for drivers with legacy EH as some
1782 * drivers choke if any other tag is given. This breaks
1783 * ata_tag_internal() test for those drivers. Don't use new
1784 * EH stuff without converting to it.
1785 */
1786 if (ap->ops->error_handler)
1787 tag = ATA_TAG_INTERNAL;
1788 else
1789 tag = 0;
1790
6cec4a39 1791 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1792 BUG();
f69499f4 1793 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1794
1795 qc->tag = tag;
1796 qc->scsicmd = NULL;
1797 qc->ap = ap;
1798 qc->dev = dev;
1799 ata_qc_reinit(qc);
1800
9af5c9c9
TH
1801 preempted_tag = link->active_tag;
1802 preempted_sactive = link->sactive;
dedaf2b0 1803 preempted_qc_active = ap->qc_active;
da917d69 1804 preempted_nr_active_links = ap->nr_active_links;
9af5c9c9
TH
1805 link->active_tag = ATA_TAG_POISON;
1806 link->sactive = 0;
dedaf2b0 1807 ap->qc_active = 0;
da917d69 1808 ap->nr_active_links = 0;
2ab7db1f
TH
1809
1810 /* prepare & issue qc */
a2a7a662 1811 qc->tf = *tf;
d69cf37d
TH
1812 if (cdb)
1813 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1814 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1815 qc->dma_dir = dma_dir;
1816 if (dma_dir != DMA_NONE) {
2432697b 1817 unsigned int i, buflen = 0;
87260216 1818 struct scatterlist *sg;
2432697b 1819
87260216
JA
1820 for_each_sg(sgl, sg, n_elem, i)
1821 buflen += sg->length;
2432697b 1822
87260216 1823 ata_sg_init(qc, sgl, n_elem);
49c80429 1824 qc->nbytes = buflen;
a2a7a662
TH
1825 }
1826
77853bf2 1827 qc->private_data = &wait;
a2a7a662
TH
1828 qc->complete_fn = ata_qc_complete_internal;
1829
8e0e694a 1830 ata_qc_issue(qc);
a2a7a662 1831
ba6a1308 1832 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1833
2b789108
TH
1834 if (!timeout)
1835 timeout = ata_probe_timeout * 1000 / HZ;
1836
1837 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
d95a717f
TH
1838
1839 ata_port_flush_task(ap);
41ade50c 1840
d95a717f 1841 if (!rc) {
ba6a1308 1842 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1843
1844 /* We're racing with irq here. If we lose, the
1845 * following test prevents us from completing the qc
d95a717f
TH
1846 * twice. If we win, the port is frozen and will be
1847 * cleaned up by ->post_internal_cmd().
a2a7a662 1848 */
77853bf2 1849 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1850 qc->err_mask |= AC_ERR_TIMEOUT;
1851
1852 if (ap->ops->error_handler)
1853 ata_port_freeze(ap);
1854 else
1855 ata_qc_complete(qc);
f15a1daf 1856
0dd4b21f
BP
1857 if (ata_msg_warn(ap))
1858 ata_dev_printk(dev, KERN_WARNING,
88574551 1859 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1860 }
1861
ba6a1308 1862 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1863 }
1864
d95a717f
TH
1865 /* do post_internal_cmd */
1866 if (ap->ops->post_internal_cmd)
1867 ap->ops->post_internal_cmd(qc);
1868
a51d644a
TH
1869 /* perform minimal error analysis */
1870 if (qc->flags & ATA_QCFLAG_FAILED) {
1871 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1872 qc->err_mask |= AC_ERR_DEV;
1873
1874 if (!qc->err_mask)
1875 qc->err_mask |= AC_ERR_OTHER;
1876
1877 if (qc->err_mask & ~AC_ERR_OTHER)
1878 qc->err_mask &= ~AC_ERR_OTHER;
d95a717f
TH
1879 }
1880
15869303 1881 /* finish up */
ba6a1308 1882 spin_lock_irqsave(ap->lock, flags);
15869303 1883
e61e0672 1884 *tf = qc->result_tf;
77853bf2
TH
1885 err_mask = qc->err_mask;
1886
1887 ata_qc_free(qc);
9af5c9c9
TH
1888 link->active_tag = preempted_tag;
1889 link->sactive = preempted_sactive;
dedaf2b0 1890 ap->qc_active = preempted_qc_active;
da917d69 1891 ap->nr_active_links = preempted_nr_active_links;
77853bf2 1892
1f7dd3e9
TH
1893 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1894 * Until those drivers are fixed, we detect the condition
1895 * here, fail the command with AC_ERR_SYSTEM and reenable the
1896 * port.
1897 *
1898 * Note that this doesn't change any behavior as internal
1899 * command failure results in disabling the device in the
1900 * higher layer for LLDDs without new reset/EH callbacks.
1901 *
1902 * Kill the following code as soon as those drivers are fixed.
1903 */
198e0fed 1904 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1905 err_mask |= AC_ERR_SYSTEM;
1906 ata_port_probe(ap);
1907 }
1908
ba6a1308 1909 spin_unlock_irqrestore(ap->lock, flags);
15869303 1910
77853bf2 1911 return err_mask;
a2a7a662
TH
1912}
1913
2432697b 1914/**
33480a0e 1915 * ata_exec_internal - execute libata internal command
2432697b
TH
1916 * @dev: Device to which the command is sent
1917 * @tf: Taskfile registers for the command and the result
1918 * @cdb: CDB for packet command
1919 * @dma_dir: Data tranfer direction of the command
1920 * @buf: Data buffer of the command
1921 * @buflen: Length of data buffer
2b789108 1922 * @timeout: Timeout in msecs (0 for default)
2432697b
TH
1923 *
1924 * Wrapper around ata_exec_internal_sg() which takes simple
1925 * buffer instead of sg list.
1926 *
1927 * LOCKING:
1928 * None. Should be called with kernel context, might sleep.
1929 *
1930 * RETURNS:
1931 * Zero on success, AC_ERR_* mask on failure
1932 */
1933unsigned ata_exec_internal(struct ata_device *dev,
1934 struct ata_taskfile *tf, const u8 *cdb,
2b789108
TH
1935 int dma_dir, void *buf, unsigned int buflen,
1936 unsigned long timeout)
2432697b 1937{
33480a0e
TH
1938 struct scatterlist *psg = NULL, sg;
1939 unsigned int n_elem = 0;
2432697b 1940
33480a0e
TH
1941 if (dma_dir != DMA_NONE) {
1942 WARN_ON(!buf);
1943 sg_init_one(&sg, buf, buflen);
1944 psg = &sg;
1945 n_elem++;
1946 }
2432697b 1947
2b789108
TH
1948 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1949 timeout);
2432697b
TH
1950}
1951
977e6b9f
TH
1952/**
1953 * ata_do_simple_cmd - execute simple internal command
1954 * @dev: Device to which the command is sent
1955 * @cmd: Opcode to execute
1956 *
1957 * Execute a 'simple' command, that only consists of the opcode
1958 * 'cmd' itself, without filling any other registers
1959 *
1960 * LOCKING:
1961 * Kernel thread context (may sleep).
1962 *
1963 * RETURNS:
1964 * Zero on success, AC_ERR_* mask on failure
e58eb583 1965 */
77b08fb5 1966unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1967{
1968 struct ata_taskfile tf;
e58eb583
TH
1969
1970 ata_tf_init(dev, &tf);
1971
1972 tf.command = cmd;
1973 tf.flags |= ATA_TFLAG_DEVICE;
1974 tf.protocol = ATA_PROT_NODATA;
1975
2b789108 1976 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
e58eb583
TH
1977}
1978
1bc4ccff
AC
1979/**
1980 * ata_pio_need_iordy - check if iordy needed
1981 * @adev: ATA device
1982 *
1983 * Check if the current speed of the device requires IORDY. Used
1984 * by various controllers for chip configuration.
1985 */
a617c09f 1986
1bc4ccff
AC
1987unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1988{
432729f0
AC
1989 /* Controller doesn't support IORDY. Probably a pointless check
1990 as the caller should know this */
9af5c9c9 1991 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1bc4ccff 1992 return 0;
432729f0
AC
1993 /* PIO3 and higher it is mandatory */
1994 if (adev->pio_mode > XFER_PIO_2)
1995 return 1;
1996 /* We turn it on when possible */
1997 if (ata_id_has_iordy(adev->id))
1bc4ccff 1998 return 1;
432729f0
AC
1999 return 0;
2000}
2e9edbf8 2001
432729f0
AC
2002/**
2003 * ata_pio_mask_no_iordy - Return the non IORDY mask
2004 * @adev: ATA device
2005 *
2006 * Compute the highest mode possible if we are not using iordy. Return
2007 * -1 if no iordy mode is available.
2008 */
a617c09f 2009
432729f0
AC
2010static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
2011{
1bc4ccff 2012 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1bc4ccff 2013 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
432729f0 2014 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1bc4ccff
AC
2015 /* Is the speed faster than the drive allows non IORDY ? */
2016 if (pio) {
2017 /* This is cycle times not frequency - watch the logic! */
2018 if (pio > 240) /* PIO2 is 240nS per cycle */
432729f0
AC
2019 return 3 << ATA_SHIFT_PIO;
2020 return 7 << ATA_SHIFT_PIO;
1bc4ccff
AC
2021 }
2022 }
432729f0 2023 return 3 << ATA_SHIFT_PIO;
1bc4ccff
AC
2024}
2025
1da177e4 2026/**
49016aca 2027 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
2028 * @dev: target device
2029 * @p_class: pointer to class of the target device (may be changed)
bff04647 2030 * @flags: ATA_READID_* flags
fe635c7e 2031 * @id: buffer to read IDENTIFY data into
1da177e4 2032 *
49016aca
TH
2033 * Read ID data from the specified device. ATA_CMD_ID_ATA is
2034 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
2035 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
2036 * for pre-ATA4 drives.
1da177e4 2037 *
50a99018 2038 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
2dcb407e 2039 * now we abort if we hit that case.
50a99018 2040 *
1da177e4 2041 * LOCKING:
49016aca
TH
2042 * Kernel thread context (may sleep)
2043 *
2044 * RETURNS:
2045 * 0 on success, -errno otherwise.
1da177e4 2046 */
a9beec95 2047int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 2048 unsigned int flags, u16 *id)
1da177e4 2049{
9af5c9c9 2050 struct ata_port *ap = dev->link->ap;
49016aca 2051 unsigned int class = *p_class;
a0123703 2052 struct ata_taskfile tf;
49016aca
TH
2053 unsigned int err_mask = 0;
2054 const char *reason;
54936f8b 2055 int may_fallback = 1, tried_spinup = 0;
49016aca 2056 int rc;
1da177e4 2057
0dd4b21f 2058 if (ata_msg_ctl(ap))
44877b4e 2059 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 2060
49016aca 2061 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
49016aca 2062 retry:
3373efd8 2063 ata_tf_init(dev, &tf);
a0123703 2064
49016aca
TH
2065 switch (class) {
2066 case ATA_DEV_ATA:
a0123703 2067 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
2068 break;
2069 case ATA_DEV_ATAPI:
a0123703 2070 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
2071 break;
2072 default:
2073 rc = -ENODEV;
2074 reason = "unsupported class";
2075 goto err_out;
1da177e4
LT
2076 }
2077
a0123703 2078 tf.protocol = ATA_PROT_PIO;
81afe893
TH
2079
2080 /* Some devices choke if TF registers contain garbage. Make
2081 * sure those are properly initialized.
2082 */
2083 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2084
2085 /* Device presence detection is unreliable on some
2086 * controllers. Always poll IDENTIFY if available.
2087 */
2088 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 2089
3373efd8 2090 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
2b789108 2091 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
a0123703 2092 if (err_mask) {
800b3996 2093 if (err_mask & AC_ERR_NODEV_HINT) {
55a8e2c8 2094 DPRINTK("ata%u.%d: NODEV after polling detection\n",
44877b4e 2095 ap->print_id, dev->devno);
55a8e2c8
TH
2096 return -ENOENT;
2097 }
2098
54936f8b
TH
2099 /* Device or controller might have reported the wrong
2100 * device class. Give a shot at the other IDENTIFY if
2101 * the current one is aborted by the device.
2102 */
2103 if (may_fallback &&
2104 (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
2105 may_fallback = 0;
2106
2107 if (class == ATA_DEV_ATA)
2108 class = ATA_DEV_ATAPI;
2109 else
2110 class = ATA_DEV_ATA;
2111 goto retry;
2112 }
2113
49016aca
TH
2114 rc = -EIO;
2115 reason = "I/O error";
1da177e4
LT
2116 goto err_out;
2117 }
2118
54936f8b
TH
2119 /* Falling back doesn't make sense if ID data was read
2120 * successfully at least once.
2121 */
2122 may_fallback = 0;
2123
49016aca 2124 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 2125
49016aca 2126 /* sanity check */
a4f5749b 2127 rc = -EINVAL;
6070068b 2128 reason = "device reports invalid type";
a4f5749b
TH
2129
2130 if (class == ATA_DEV_ATA) {
2131 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
2132 goto err_out;
2133 } else {
2134 if (ata_id_is_ata(id))
2135 goto err_out;
49016aca
TH
2136 }
2137
169439c2
ML
2138 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
2139 tried_spinup = 1;
2140 /*
2141 * Drive powered-up in standby mode, and requires a specific
2142 * SET_FEATURES spin-up subcommand before it will accept
2143 * anything other than the original IDENTIFY command.
2144 */
218f3d30 2145 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
fb0582f9 2146 if (err_mask && id[2] != 0x738c) {
169439c2
ML
2147 rc = -EIO;
2148 reason = "SPINUP failed";
2149 goto err_out;
2150 }
2151 /*
2152 * If the drive initially returned incomplete IDENTIFY info,
2153 * we now must reissue the IDENTIFY command.
2154 */
2155 if (id[2] == 0x37c8)
2156 goto retry;
2157 }
2158
bff04647 2159 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
2160 /*
2161 * The exact sequence expected by certain pre-ATA4 drives is:
2162 * SRST RESET
50a99018
AC
2163 * IDENTIFY (optional in early ATA)
2164 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
49016aca
TH
2165 * anything else..
2166 * Some drives were very specific about that exact sequence.
50a99018
AC
2167 *
2168 * Note that ATA4 says lba is mandatory so the second check
2169 * shoud never trigger.
49016aca
TH
2170 */
2171 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 2172 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
2173 if (err_mask) {
2174 rc = -EIO;
2175 reason = "INIT_DEV_PARAMS failed";
2176 goto err_out;
2177 }
2178
2179 /* current CHS translation info (id[53-58]) might be
2180 * changed. reread the identify device info.
2181 */
bff04647 2182 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
2183 goto retry;
2184 }
2185 }
2186
2187 *p_class = class;
fe635c7e 2188
49016aca
TH
2189 return 0;
2190
2191 err_out:
88574551 2192 if (ata_msg_warn(ap))
0dd4b21f 2193 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 2194 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
2195 return rc;
2196}
2197
3373efd8 2198static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 2199{
9af5c9c9
TH
2200 struct ata_port *ap = dev->link->ap;
2201 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
2202}
2203
a6e6ce8e
TH
2204static void ata_dev_config_ncq(struct ata_device *dev,
2205 char *desc, size_t desc_sz)
2206{
9af5c9c9 2207 struct ata_port *ap = dev->link->ap;
a6e6ce8e
TH
2208 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2209
2210 if (!ata_id_has_ncq(dev->id)) {
2211 desc[0] = '\0';
2212 return;
2213 }
75683fe7 2214 if (dev->horkage & ATA_HORKAGE_NONCQ) {
6919a0a6
AC
2215 snprintf(desc, desc_sz, "NCQ (not used)");
2216 return;
2217 }
a6e6ce8e 2218 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 2219 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
2220 dev->flags |= ATA_DFLAG_NCQ;
2221 }
2222
2223 if (hdepth >= ddepth)
2224 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
2225 else
2226 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
2227}
2228
49016aca 2229/**
ffeae418 2230 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
2231 * @dev: Target device to configure
2232 *
2233 * Configure @dev according to @dev->id. Generic and low-level
2234 * driver specific fixups are also applied.
49016aca
TH
2235 *
2236 * LOCKING:
ffeae418
TH
2237 * Kernel thread context (may sleep)
2238 *
2239 * RETURNS:
2240 * 0 on success, -errno otherwise
49016aca 2241 */
efdaedc4 2242int ata_dev_configure(struct ata_device *dev)
49016aca 2243{
9af5c9c9
TH
2244 struct ata_port *ap = dev->link->ap;
2245 struct ata_eh_context *ehc = &dev->link->eh_context;
6746544c 2246 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1148c3a7 2247 const u16 *id = dev->id;
7dc951ae 2248 unsigned long xfer_mask;
b352e57d 2249 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
2250 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2251 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 2252 int rc;
49016aca 2253
0dd4b21f 2254 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
44877b4e
TH
2255 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
2256 __FUNCTION__);
ffeae418 2257 return 0;
49016aca
TH
2258 }
2259
0dd4b21f 2260 if (ata_msg_probe(ap))
44877b4e 2261 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 2262
75683fe7
TH
2263 /* set horkage */
2264 dev->horkage |= ata_dev_blacklisted(dev);
33267325 2265 ata_force_horkage(dev);
75683fe7 2266
6746544c
TH
2267 /* let ACPI work its magic */
2268 rc = ata_acpi_on_devcfg(dev);
2269 if (rc)
2270 return rc;
08573a86 2271
05027adc
TH
2272 /* massage HPA, do it early as it might change IDENTIFY data */
2273 rc = ata_hpa_resize(dev);
2274 if (rc)
2275 return rc;
2276
c39f5ebe 2277 /* print device capabilities */
0dd4b21f 2278 if (ata_msg_probe(ap))
88574551
TH
2279 ata_dev_printk(dev, KERN_DEBUG,
2280 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2281 "85:%04x 86:%04x 87:%04x 88:%04x\n",
0dd4b21f 2282 __FUNCTION__,
f15a1daf
TH
2283 id[49], id[82], id[83], id[84],
2284 id[85], id[86], id[87], id[88]);
c39f5ebe 2285
208a9933 2286 /* initialize to-be-configured parameters */
ea1dd4e1 2287 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
2288 dev->max_sectors = 0;
2289 dev->cdb_len = 0;
2290 dev->n_sectors = 0;
2291 dev->cylinders = 0;
2292 dev->heads = 0;
2293 dev->sectors = 0;
2294
1da177e4
LT
2295 /*
2296 * common ATA, ATAPI feature tests
2297 */
2298
ff8854b2 2299 /* find max transfer mode; for printk only */
1148c3a7 2300 xfer_mask = ata_id_xfermask(id);
1da177e4 2301
0dd4b21f
BP
2302 if (ata_msg_probe(ap))
2303 ata_dump_id(id);
1da177e4 2304
ef143d57
AL
2305 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2306 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2307 sizeof(fwrevbuf));
2308
2309 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2310 sizeof(modelbuf));
2311
1da177e4
LT
2312 /* ATA-specific feature tests */
2313 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
2314 if (ata_id_is_cfa(id)) {
2315 if (id[162] & 1) /* CPRM may make this media unusable */
44877b4e
TH
2316 ata_dev_printk(dev, KERN_WARNING,
2317 "supports DRM functions and may "
2318 "not be fully accessable.\n");
b352e57d 2319 snprintf(revbuf, 7, "CFA");
ae8d4ee7 2320 } else {
2dcb407e 2321 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
ae8d4ee7
AC
2322 /* Warn the user if the device has TPM extensions */
2323 if (ata_id_has_tpm(id))
2324 ata_dev_printk(dev, KERN_WARNING,
2325 "supports DRM functions and may "
2326 "not be fully accessable.\n");
2327 }
b352e57d 2328
1148c3a7 2329 dev->n_sectors = ata_id_n_sectors(id);
2940740b 2330
3f64f565
EM
2331 if (dev->id[59] & 0x100)
2332 dev->multi_count = dev->id[59] & 0xff;
2333
1148c3a7 2334 if (ata_id_has_lba(id)) {
4c2d721a 2335 const char *lba_desc;
a6e6ce8e 2336 char ncq_desc[20];
8bf62ece 2337
4c2d721a
TH
2338 lba_desc = "LBA";
2339 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 2340 if (ata_id_has_lba48(id)) {
8bf62ece 2341 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 2342 lba_desc = "LBA48";
6fc49adb
TH
2343
2344 if (dev->n_sectors >= (1UL << 28) &&
2345 ata_id_has_flush_ext(id))
2346 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 2347 }
8bf62ece 2348
a6e6ce8e
TH
2349 /* config NCQ */
2350 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2351
8bf62ece 2352 /* print device info to dmesg */
3f64f565
EM
2353 if (ata_msg_drv(ap) && print_info) {
2354 ata_dev_printk(dev, KERN_INFO,
2355 "%s: %s, %s, max %s\n",
2356 revbuf, modelbuf, fwrevbuf,
2357 ata_mode_string(xfer_mask));
2358 ata_dev_printk(dev, KERN_INFO,
2359 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 2360 (unsigned long long)dev->n_sectors,
3f64f565
EM
2361 dev->multi_count, lba_desc, ncq_desc);
2362 }
ffeae418 2363 } else {
8bf62ece
AL
2364 /* CHS */
2365
2366 /* Default translation */
1148c3a7
TH
2367 dev->cylinders = id[1];
2368 dev->heads = id[3];
2369 dev->sectors = id[6];
8bf62ece 2370
1148c3a7 2371 if (ata_id_current_chs_valid(id)) {
8bf62ece 2372 /* Current CHS translation is valid. */
1148c3a7
TH
2373 dev->cylinders = id[54];
2374 dev->heads = id[55];
2375 dev->sectors = id[56];
8bf62ece
AL
2376 }
2377
2378 /* print device info to dmesg */
3f64f565 2379 if (ata_msg_drv(ap) && print_info) {
88574551 2380 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2381 "%s: %s, %s, max %s\n",
2382 revbuf, modelbuf, fwrevbuf,
2383 ata_mode_string(xfer_mask));
a84471fe 2384 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2385 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2386 (unsigned long long)dev->n_sectors,
2387 dev->multi_count, dev->cylinders,
2388 dev->heads, dev->sectors);
2389 }
07f6f7d0
AL
2390 }
2391
6e7846e9 2392 dev->cdb_len = 16;
1da177e4
LT
2393 }
2394
2395 /* ATAPI-specific feature tests */
2c13b7ce 2396 else if (dev->class == ATA_DEV_ATAPI) {
854c73a2
TH
2397 const char *cdb_intr_string = "";
2398 const char *atapi_an_string = "";
91163006 2399 const char *dma_dir_string = "";
7d77b247 2400 u32 sntf;
08a556db 2401
1148c3a7 2402 rc = atapi_cdb_len(id);
1da177e4 2403 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 2404 if (ata_msg_warn(ap))
88574551
TH
2405 ata_dev_printk(dev, KERN_WARNING,
2406 "unsupported CDB len\n");
ffeae418 2407 rc = -EINVAL;
1da177e4
LT
2408 goto err_out_nosup;
2409 }
6e7846e9 2410 dev->cdb_len = (unsigned int) rc;
1da177e4 2411
7d77b247
TH
2412 /* Enable ATAPI AN if both the host and device have
2413 * the support. If PMP is attached, SNTF is required
2414 * to enable ATAPI AN to discern between PHY status
2415 * changed notifications and ATAPI ANs.
9f45cbd3 2416 */
7d77b247
TH
2417 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2418 (!ap->nr_pmp_links ||
2419 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
854c73a2
TH
2420 unsigned int err_mask;
2421
9f45cbd3 2422 /* issue SET feature command to turn this on */
218f3d30
JG
2423 err_mask = ata_dev_set_feature(dev,
2424 SETFEATURES_SATA_ENABLE, SATA_AN);
854c73a2 2425 if (err_mask)
9f45cbd3 2426 ata_dev_printk(dev, KERN_ERR,
854c73a2
TH
2427 "failed to enable ATAPI AN "
2428 "(err_mask=0x%x)\n", err_mask);
2429 else {
9f45cbd3 2430 dev->flags |= ATA_DFLAG_AN;
854c73a2
TH
2431 atapi_an_string = ", ATAPI AN";
2432 }
9f45cbd3
KCA
2433 }
2434
08a556db 2435 if (ata_id_cdb_intr(dev->id)) {
312f7da2 2436 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
2437 cdb_intr_string = ", CDB intr";
2438 }
312f7da2 2439
91163006
TH
2440 if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
2441 dev->flags |= ATA_DFLAG_DMADIR;
2442 dma_dir_string = ", DMADIR";
2443 }
2444
1da177e4 2445 /* print device info to dmesg */
5afc8142 2446 if (ata_msg_drv(ap) && print_info)
ef143d57 2447 ata_dev_printk(dev, KERN_INFO,
91163006 2448 "ATAPI: %s, %s, max %s%s%s%s\n",
ef143d57 2449 modelbuf, fwrevbuf,
12436c30 2450 ata_mode_string(xfer_mask),
91163006
TH
2451 cdb_intr_string, atapi_an_string,
2452 dma_dir_string);
1da177e4
LT
2453 }
2454
914ed354
TH
2455 /* determine max_sectors */
2456 dev->max_sectors = ATA_MAX_SECTORS;
2457 if (dev->flags & ATA_DFLAG_LBA48)
2458 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2459
ca77329f
KCA
2460 if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2461 if (ata_id_has_hipm(dev->id))
2462 dev->flags |= ATA_DFLAG_HIPM;
2463 if (ata_id_has_dipm(dev->id))
2464 dev->flags |= ATA_DFLAG_DIPM;
2465 }
2466
c5038fc0
AC
2467 /* Limit PATA drive on SATA cable bridge transfers to udma5,
2468 200 sectors */
3373efd8 2469 if (ata_dev_knobble(dev)) {
5afc8142 2470 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
2471 ata_dev_printk(dev, KERN_INFO,
2472 "applying bridge limits\n");
5a529139 2473 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
2474 dev->max_sectors = ATA_MAX_SECTORS;
2475 }
2476
f8d8e579 2477 if ((dev->class == ATA_DEV_ATAPI) &&
f442cd86 2478 (atapi_command_packet_set(id) == TYPE_TAPE)) {
f8d8e579 2479 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
f442cd86
AL
2480 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2481 }
f8d8e579 2482
75683fe7 2483 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
03ec52de
TH
2484 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2485 dev->max_sectors);
18d6e9d5 2486
ca77329f
KCA
2487 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2488 dev->horkage |= ATA_HORKAGE_IPM;
2489
2490 /* reset link pm_policy for this port to no pm */
2491 ap->pm_policy = MAX_PERFORMANCE;
2492 }
2493
4b2f3ede 2494 if (ap->ops->dev_config)
cd0d3bbc 2495 ap->ops->dev_config(dev);
4b2f3ede 2496
c5038fc0
AC
2497 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2498 /* Let the user know. We don't want to disallow opens for
2499 rescue purposes, or in case the vendor is just a blithering
2500 idiot. Do this after the dev_config call as some controllers
2501 with buggy firmware may want to avoid reporting false device
2502 bugs */
2503
2504 if (print_info) {
2505 ata_dev_printk(dev, KERN_WARNING,
2506"Drive reports diagnostics failure. This may indicate a drive\n");
2507 ata_dev_printk(dev, KERN_WARNING,
2508"fault or invalid emulation. Contact drive vendor for information.\n");
2509 }
2510 }
2511
0dd4b21f
BP
2512 if (ata_msg_probe(ap))
2513 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2514 __FUNCTION__, ata_chk_status(ap));
ffeae418 2515 return 0;
1da177e4
LT
2516
2517err_out_nosup:
0dd4b21f 2518 if (ata_msg_probe(ap))
88574551
TH
2519 ata_dev_printk(dev, KERN_DEBUG,
2520 "%s: EXIT, err\n", __FUNCTION__);
ffeae418 2521 return rc;
1da177e4
LT
2522}
2523
be0d18df 2524/**
2e41e8e6 2525 * ata_cable_40wire - return 40 wire cable type
be0d18df
AC
2526 * @ap: port
2527 *
2e41e8e6 2528 * Helper method for drivers which want to hardwire 40 wire cable
be0d18df
AC
2529 * detection.
2530 */
2531
2532int ata_cable_40wire(struct ata_port *ap)
2533{
2534 return ATA_CBL_PATA40;
2535}
2536
2537/**
2e41e8e6 2538 * ata_cable_80wire - return 80 wire cable type
be0d18df
AC
2539 * @ap: port
2540 *
2e41e8e6 2541 * Helper method for drivers which want to hardwire 80 wire cable
be0d18df
AC
2542 * detection.
2543 */
2544
2545int ata_cable_80wire(struct ata_port *ap)
2546{
2547 return ATA_CBL_PATA80;
2548}
2549
2550/**
2551 * ata_cable_unknown - return unknown PATA cable.
2552 * @ap: port
2553 *
2554 * Helper method for drivers which have no PATA cable detection.
2555 */
2556
2557int ata_cable_unknown(struct ata_port *ap)
2558{
2559 return ATA_CBL_PATA_UNK;
2560}
2561
c88f90c3
TH
2562/**
2563 * ata_cable_ignore - return ignored PATA cable.
2564 * @ap: port
2565 *
2566 * Helper method for drivers which don't use cable type to limit
2567 * transfer mode.
2568 */
2569int ata_cable_ignore(struct ata_port *ap)
2570{
2571 return ATA_CBL_PATA_IGN;
2572}
2573
be0d18df
AC
2574/**
2575 * ata_cable_sata - return SATA cable type
2576 * @ap: port
2577 *
2578 * Helper method for drivers which have SATA cables
2579 */
2580
2581int ata_cable_sata(struct ata_port *ap)
2582{
2583 return ATA_CBL_SATA;
2584}
2585
1da177e4
LT
2586/**
2587 * ata_bus_probe - Reset and probe ATA bus
2588 * @ap: Bus to probe
2589 *
0cba632b
JG
2590 * Master ATA bus probing function. Initiates a hardware-dependent
2591 * bus reset, then attempts to identify any devices found on
2592 * the bus.
2593 *
1da177e4 2594 * LOCKING:
0cba632b 2595 * PCI/etc. bus probe sem.
1da177e4
LT
2596 *
2597 * RETURNS:
96072e69 2598 * Zero on success, negative errno otherwise.
1da177e4
LT
2599 */
2600
80289167 2601int ata_bus_probe(struct ata_port *ap)
1da177e4 2602{
28ca5c57 2603 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 2604 int tries[ATA_MAX_DEVICES];
f58229f8 2605 int rc;
e82cbdb9 2606 struct ata_device *dev;
1da177e4 2607
28ca5c57 2608 ata_port_probe(ap);
c19ba8af 2609
f58229f8
TH
2610 ata_link_for_each_dev(dev, &ap->link)
2611 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
14d2bac1
TH
2612
2613 retry:
cdeab114
TH
2614 ata_link_for_each_dev(dev, &ap->link) {
2615 /* If we issue an SRST then an ATA drive (not ATAPI)
2616 * may change configuration and be in PIO0 timing. If
2617 * we do a hard reset (or are coming from power on)
2618 * this is true for ATA or ATAPI. Until we've set a
2619 * suitable controller mode we should not touch the
2620 * bus as we may be talking too fast.
2621 */
2622 dev->pio_mode = XFER_PIO_0;
2623
2624 /* If the controller has a pio mode setup function
2625 * then use it to set the chipset to rights. Don't
2626 * touch the DMA setup as that will be dealt with when
2627 * configuring devices.
2628 */
2629 if (ap->ops->set_piomode)
2630 ap->ops->set_piomode(ap, dev);
2631 }
2632
2044470c 2633 /* reset and determine device classes */
52783c5d 2634 ap->ops->phy_reset(ap);
2061a47a 2635
f58229f8 2636 ata_link_for_each_dev(dev, &ap->link) {
52783c5d
TH
2637 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2638 dev->class != ATA_DEV_UNKNOWN)
2639 classes[dev->devno] = dev->class;
2640 else
2641 classes[dev->devno] = ATA_DEV_NONE;
2044470c 2642
52783c5d 2643 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 2644 }
1da177e4 2645
52783c5d 2646 ata_port_probe(ap);
2044470c 2647
f31f0cc2
JG
2648 /* read IDENTIFY page and configure devices. We have to do the identify
2649 specific sequence bass-ackwards so that PDIAG- is released by
2650 the slave device */
2651
f58229f8
TH
2652 ata_link_for_each_dev(dev, &ap->link) {
2653 if (tries[dev->devno])
2654 dev->class = classes[dev->devno];
ffeae418 2655
14d2bac1 2656 if (!ata_dev_enabled(dev))
ffeae418 2657 continue;
ffeae418 2658
bff04647
TH
2659 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2660 dev->id);
14d2bac1
TH
2661 if (rc)
2662 goto fail;
f31f0cc2
JG
2663 }
2664
be0d18df
AC
2665 /* Now ask for the cable type as PDIAG- should have been released */
2666 if (ap->ops->cable_detect)
2667 ap->cbl = ap->ops->cable_detect(ap);
2668
614fe29b
AC
2669 /* We may have SATA bridge glue hiding here irrespective of the
2670 reported cable types and sensed types */
2671 ata_link_for_each_dev(dev, &ap->link) {
2672 if (!ata_dev_enabled(dev))
2673 continue;
2674 /* SATA drives indicate we have a bridge. We don't know which
2675 end of the link the bridge is which is a problem */
2676 if (ata_id_is_sata(dev->id))
2677 ap->cbl = ATA_CBL_SATA;
2678 }
2679
f31f0cc2
JG
2680 /* After the identify sequence we can now set up the devices. We do
2681 this in the normal order so that the user doesn't get confused */
2682
f58229f8 2683 ata_link_for_each_dev(dev, &ap->link) {
f31f0cc2
JG
2684 if (!ata_dev_enabled(dev))
2685 continue;
14d2bac1 2686
9af5c9c9 2687 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
efdaedc4 2688 rc = ata_dev_configure(dev);
9af5c9c9 2689 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
2690 if (rc)
2691 goto fail;
1da177e4
LT
2692 }
2693
e82cbdb9 2694 /* configure transfer mode */
0260731f 2695 rc = ata_set_mode(&ap->link, &dev);
4ae72a1e 2696 if (rc)
51713d35 2697 goto fail;
1da177e4 2698
f58229f8
TH
2699 ata_link_for_each_dev(dev, &ap->link)
2700 if (ata_dev_enabled(dev))
e82cbdb9 2701 return 0;
1da177e4 2702
e82cbdb9
TH
2703 /* no device present, disable port */
2704 ata_port_disable(ap);
96072e69 2705 return -ENODEV;
14d2bac1
TH
2706
2707 fail:
4ae72a1e
TH
2708 tries[dev->devno]--;
2709
14d2bac1
TH
2710 switch (rc) {
2711 case -EINVAL:
4ae72a1e 2712 /* eeek, something went very wrong, give up */
14d2bac1
TH
2713 tries[dev->devno] = 0;
2714 break;
4ae72a1e
TH
2715
2716 case -ENODEV:
2717 /* give it just one more chance */
2718 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 2719 case -EIO:
4ae72a1e
TH
2720 if (tries[dev->devno] == 1) {
2721 /* This is the last chance, better to slow
2722 * down than lose it.
2723 */
936fd732 2724 sata_down_spd_limit(&ap->link);
4ae72a1e
TH
2725 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2726 }
14d2bac1
TH
2727 }
2728
4ae72a1e 2729 if (!tries[dev->devno])
3373efd8 2730 ata_dev_disable(dev);
ec573755 2731
14d2bac1 2732 goto retry;
1da177e4
LT
2733}
2734
2735/**
0cba632b
JG
2736 * ata_port_probe - Mark port as enabled
2737 * @ap: Port for which we indicate enablement
1da177e4 2738 *
0cba632b
JG
2739 * Modify @ap data structure such that the system
2740 * thinks that the entire port is enabled.
2741 *
cca3974e 2742 * LOCKING: host lock, or some other form of
0cba632b 2743 * serialization.
1da177e4
LT
2744 */
2745
2746void ata_port_probe(struct ata_port *ap)
2747{
198e0fed 2748 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
2749}
2750
3be680b7
TH
2751/**
2752 * sata_print_link_status - Print SATA link status
936fd732 2753 * @link: SATA link to printk link status about
3be680b7
TH
2754 *
2755 * This function prints link speed and status of a SATA link.
2756 *
2757 * LOCKING:
2758 * None.
2759 */
936fd732 2760void sata_print_link_status(struct ata_link *link)
3be680b7 2761{
6d5f9732 2762 u32 sstatus, scontrol, tmp;
3be680b7 2763
936fd732 2764 if (sata_scr_read(link, SCR_STATUS, &sstatus))
3be680b7 2765 return;
936fd732 2766 sata_scr_read(link, SCR_CONTROL, &scontrol);
3be680b7 2767
936fd732 2768 if (ata_link_online(link)) {
3be680b7 2769 tmp = (sstatus >> 4) & 0xf;
936fd732 2770 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2771 "SATA link up %s (SStatus %X SControl %X)\n",
2772 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 2773 } else {
936fd732 2774 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2775 "SATA link down (SStatus %X SControl %X)\n",
2776 sstatus, scontrol);
3be680b7
TH
2777 }
2778}
2779
ebdfca6e
AC
2780/**
2781 * ata_dev_pair - return other device on cable
ebdfca6e
AC
2782 * @adev: device
2783 *
2784 * Obtain the other device on the same cable, or if none is
2785 * present NULL is returned
2786 */
2e9edbf8 2787
3373efd8 2788struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2789{
9af5c9c9
TH
2790 struct ata_link *link = adev->link;
2791 struct ata_device *pair = &link->device[1 - adev->devno];
e1211e3f 2792 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2793 return NULL;
2794 return pair;
2795}
2796
1da177e4 2797/**
780a87f7
JG
2798 * ata_port_disable - Disable port.
2799 * @ap: Port to be disabled.
1da177e4 2800 *
780a87f7
JG
2801 * Modify @ap data structure such that the system
2802 * thinks that the entire port is disabled, and should
2803 * never attempt to probe or communicate with devices
2804 * on this port.
2805 *
cca3974e 2806 * LOCKING: host lock, or some other form of
780a87f7 2807 * serialization.
1da177e4
LT
2808 */
2809
2810void ata_port_disable(struct ata_port *ap)
2811{
9af5c9c9
TH
2812 ap->link.device[0].class = ATA_DEV_NONE;
2813 ap->link.device[1].class = ATA_DEV_NONE;
198e0fed 2814 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2815}
2816
1c3fae4d 2817/**
3c567b7d 2818 * sata_down_spd_limit - adjust SATA spd limit downward
936fd732 2819 * @link: Link to adjust SATA spd limit for
1c3fae4d 2820 *
936fd732 2821 * Adjust SATA spd limit of @link downward. Note that this
1c3fae4d 2822 * function only adjusts the limit. The change must be applied
3c567b7d 2823 * using sata_set_spd().
1c3fae4d
TH
2824 *
2825 * LOCKING:
2826 * Inherited from caller.
2827 *
2828 * RETURNS:
2829 * 0 on success, negative errno on failure
2830 */
936fd732 2831int sata_down_spd_limit(struct ata_link *link)
1c3fae4d 2832{
81952c54
TH
2833 u32 sstatus, spd, mask;
2834 int rc, highbit;
1c3fae4d 2835
936fd732 2836 if (!sata_scr_valid(link))
008a7896
TH
2837 return -EOPNOTSUPP;
2838
2839 /* If SCR can be read, use it to determine the current SPD.
936fd732 2840 * If not, use cached value in link->sata_spd.
008a7896 2841 */
936fd732 2842 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
008a7896
TH
2843 if (rc == 0)
2844 spd = (sstatus >> 4) & 0xf;
2845 else
936fd732 2846 spd = link->sata_spd;
1c3fae4d 2847
936fd732 2848 mask = link->sata_spd_limit;
1c3fae4d
TH
2849 if (mask <= 1)
2850 return -EINVAL;
008a7896
TH
2851
2852 /* unconditionally mask off the highest bit */
1c3fae4d
TH
2853 highbit = fls(mask) - 1;
2854 mask &= ~(1 << highbit);
2855
008a7896
TH
2856 /* Mask off all speeds higher than or equal to the current
2857 * one. Force 1.5Gbps if current SPD is not available.
2858 */
2859 if (spd > 1)
2860 mask &= (1 << (spd - 1)) - 1;
2861 else
2862 mask &= 1;
2863
2864 /* were we already at the bottom? */
1c3fae4d
TH
2865 if (!mask)
2866 return -EINVAL;
2867
936fd732 2868 link->sata_spd_limit = mask;
1c3fae4d 2869
936fd732 2870 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
f15a1daf 2871 sata_spd_string(fls(mask)));
1c3fae4d
TH
2872
2873 return 0;
2874}
2875
936fd732 2876static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
1c3fae4d 2877{
5270222f
TH
2878 struct ata_link *host_link = &link->ap->link;
2879 u32 limit, target, spd;
1c3fae4d 2880
5270222f
TH
2881 limit = link->sata_spd_limit;
2882
2883 /* Don't configure downstream link faster than upstream link.
2884 * It doesn't speed up anything and some PMPs choke on such
2885 * configuration.
2886 */
2887 if (!ata_is_host_link(link) && host_link->sata_spd)
2888 limit &= (1 << host_link->sata_spd) - 1;
2889
2890 if (limit == UINT_MAX)
2891 target = 0;
1c3fae4d 2892 else
5270222f 2893 target = fls(limit);
1c3fae4d
TH
2894
2895 spd = (*scontrol >> 4) & 0xf;
5270222f 2896 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
1c3fae4d 2897
5270222f 2898 return spd != target;
1c3fae4d
TH
2899}
2900
2901/**
3c567b7d 2902 * sata_set_spd_needed - is SATA spd configuration needed
936fd732 2903 * @link: Link in question
1c3fae4d
TH
2904 *
2905 * Test whether the spd limit in SControl matches
936fd732 2906 * @link->sata_spd_limit. This function is used to determine
1c3fae4d
TH
2907 * whether hardreset is necessary to apply SATA spd
2908 * configuration.
2909 *
2910 * LOCKING:
2911 * Inherited from caller.
2912 *
2913 * RETURNS:
2914 * 1 if SATA spd configuration is needed, 0 otherwise.
2915 */
936fd732 2916int sata_set_spd_needed(struct ata_link *link)
1c3fae4d
TH
2917{
2918 u32 scontrol;
2919
936fd732 2920 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
db64bcf3 2921 return 1;
1c3fae4d 2922
936fd732 2923 return __sata_set_spd_needed(link, &scontrol);
1c3fae4d
TH
2924}
2925
2926/**
3c567b7d 2927 * sata_set_spd - set SATA spd according to spd limit
936fd732 2928 * @link: Link to set SATA spd for
1c3fae4d 2929 *
936fd732 2930 * Set SATA spd of @link according to sata_spd_limit.
1c3fae4d
TH
2931 *
2932 * LOCKING:
2933 * Inherited from caller.
2934 *
2935 * RETURNS:
2936 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2937 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2938 */
936fd732 2939int sata_set_spd(struct ata_link *link)
1c3fae4d
TH
2940{
2941 u32 scontrol;
81952c54 2942 int rc;
1c3fae4d 2943
936fd732 2944 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 2945 return rc;
1c3fae4d 2946
936fd732 2947 if (!__sata_set_spd_needed(link, &scontrol))
1c3fae4d
TH
2948 return 0;
2949
936fd732 2950 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54
TH
2951 return rc;
2952
1c3fae4d
TH
2953 return 1;
2954}
2955
452503f9
AC
2956/*
2957 * This mode timing computation functionality is ported over from
2958 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2959 */
2960/*
b352e57d 2961 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2962 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2963 * for UDMA6, which is currently supported only by Maxtor drives.
2964 *
2965 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2966 */
2967
2968static const struct ata_timing ata_timing[] = {
70cd071e
TH
2969/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2970 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2971 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2972 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2973 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2974 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2975 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
2976 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
452503f9 2977
70cd071e
TH
2978 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2979 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2980 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
452503f9 2981
70cd071e
TH
2982 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2983 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2984 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
b352e57d 2985 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
70cd071e 2986 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
452503f9
AC
2987
2988/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
70cd071e
TH
2989 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2990 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2991 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2992 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2993 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2994 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2995 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
452503f9
AC
2996
2997 { 0xFF }
2998};
2999
2dcb407e
JG
3000#define ENOUGH(v, unit) (((v)-1)/(unit)+1)
3001#define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
452503f9
AC
3002
3003static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
3004{
3005 q->setup = EZ(t->setup * 1000, T);
3006 q->act8b = EZ(t->act8b * 1000, T);
3007 q->rec8b = EZ(t->rec8b * 1000, T);
3008 q->cyc8b = EZ(t->cyc8b * 1000, T);
3009 q->active = EZ(t->active * 1000, T);
3010 q->recover = EZ(t->recover * 1000, T);
3011 q->cycle = EZ(t->cycle * 1000, T);
3012 q->udma = EZ(t->udma * 1000, UT);
3013}
3014
3015void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
3016 struct ata_timing *m, unsigned int what)
3017{
3018 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
3019 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
3020 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
3021 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
3022 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
3023 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
3024 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
3025 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
3026}
3027
6357357c 3028const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
452503f9 3029{
70cd071e
TH
3030 const struct ata_timing *t = ata_timing;
3031
3032 while (xfer_mode > t->mode)
3033 t++;
452503f9 3034
70cd071e
TH
3035 if (xfer_mode == t->mode)
3036 return t;
3037 return NULL;
452503f9
AC
3038}
3039
3040int ata_timing_compute(struct ata_device *adev, unsigned short speed,
3041 struct ata_timing *t, int T, int UT)
3042{
3043 const struct ata_timing *s;
3044 struct ata_timing p;
3045
3046 /*
2e9edbf8 3047 * Find the mode.
75b1f2f8 3048 */
452503f9
AC
3049
3050 if (!(s = ata_timing_find_mode(speed)))
3051 return -EINVAL;
3052
75b1f2f8
AL
3053 memcpy(t, s, sizeof(*s));
3054
452503f9
AC
3055 /*
3056 * If the drive is an EIDE drive, it can tell us it needs extended
3057 * PIO/MW_DMA cycle timing.
3058 */
3059
3060 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
3061 memset(&p, 0, sizeof(p));
2dcb407e 3062 if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
452503f9
AC
3063 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
3064 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2dcb407e 3065 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
452503f9
AC
3066 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
3067 }
3068 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
3069 }
3070
3071 /*
3072 * Convert the timing to bus clock counts.
3073 */
3074
75b1f2f8 3075 ata_timing_quantize(t, t, T, UT);
452503f9
AC
3076
3077 /*
c893a3ae
RD
3078 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
3079 * S.M.A.R.T * and some other commands. We have to ensure that the
3080 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
3081 */
3082
fd3367af 3083 if (speed > XFER_PIO_6) {
452503f9
AC
3084 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3085 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3086 }
3087
3088 /*
c893a3ae 3089 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
3090 */
3091
3092 if (t->act8b + t->rec8b < t->cyc8b) {
3093 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3094 t->rec8b = t->cyc8b - t->act8b;
3095 }
3096
3097 if (t->active + t->recover < t->cycle) {
3098 t->active += (t->cycle - (t->active + t->recover)) / 2;
3099 t->recover = t->cycle - t->active;
3100 }
a617c09f 3101
4f701d1e
AC
3102 /* In a few cases quantisation may produce enough errors to
3103 leave t->cycle too low for the sum of active and recovery
3104 if so we must correct this */
3105 if (t->active + t->recover > t->cycle)
3106 t->cycle = t->active + t->recover;
452503f9
AC
3107
3108 return 0;
3109}
3110
a0f79b92
TH
3111/**
3112 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3113 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3114 * @cycle: cycle duration in ns
3115 *
3116 * Return matching xfer mode for @cycle. The returned mode is of
3117 * the transfer type specified by @xfer_shift. If @cycle is too
3118 * slow for @xfer_shift, 0xff is returned. If @cycle is faster
3119 * than the fastest known mode, the fasted mode is returned.
3120 *
3121 * LOCKING:
3122 * None.
3123 *
3124 * RETURNS:
3125 * Matching xfer_mode, 0xff if no match found.
3126 */
3127u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3128{
3129 u8 base_mode = 0xff, last_mode = 0xff;
3130 const struct ata_xfer_ent *ent;
3131 const struct ata_timing *t;
3132
3133 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3134 if (ent->shift == xfer_shift)
3135 base_mode = ent->base;
3136
3137 for (t = ata_timing_find_mode(base_mode);
3138 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3139 unsigned short this_cycle;
3140
3141 switch (xfer_shift) {
3142 case ATA_SHIFT_PIO:
3143 case ATA_SHIFT_MWDMA:
3144 this_cycle = t->cycle;
3145 break;
3146 case ATA_SHIFT_UDMA:
3147 this_cycle = t->udma;
3148 break;
3149 default:
3150 return 0xff;
3151 }
3152
3153 if (cycle > this_cycle)
3154 break;
3155
3156 last_mode = t->mode;
3157 }
3158
3159 return last_mode;
3160}
3161
cf176e1a
TH
3162/**
3163 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 3164 * @dev: Device to adjust xfer masks
458337db 3165 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
3166 *
3167 * Adjust xfer masks of @dev downward. Note that this function
3168 * does not apply the change. Invoking ata_set_mode() afterwards
3169 * will apply the limit.
3170 *
3171 * LOCKING:
3172 * Inherited from caller.
3173 *
3174 * RETURNS:
3175 * 0 on success, negative errno on failure
3176 */
458337db 3177int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 3178{
458337db 3179 char buf[32];
7dc951ae
TH
3180 unsigned long orig_mask, xfer_mask;
3181 unsigned long pio_mask, mwdma_mask, udma_mask;
458337db 3182 int quiet, highbit;
cf176e1a 3183
458337db
TH
3184 quiet = !!(sel & ATA_DNXFER_QUIET);
3185 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 3186
458337db
TH
3187 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3188 dev->mwdma_mask,
3189 dev->udma_mask);
3190 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 3191
458337db
TH
3192 switch (sel) {
3193 case ATA_DNXFER_PIO:
3194 highbit = fls(pio_mask) - 1;
3195 pio_mask &= ~(1 << highbit);
3196 break;
3197
3198 case ATA_DNXFER_DMA:
3199 if (udma_mask) {
3200 highbit = fls(udma_mask) - 1;
3201 udma_mask &= ~(1 << highbit);
3202 if (!udma_mask)
3203 return -ENOENT;
3204 } else if (mwdma_mask) {
3205 highbit = fls(mwdma_mask) - 1;
3206 mwdma_mask &= ~(1 << highbit);
3207 if (!mwdma_mask)
3208 return -ENOENT;
3209 }
3210 break;
3211
3212 case ATA_DNXFER_40C:
3213 udma_mask &= ATA_UDMA_MASK_40C;
3214 break;
3215
3216 case ATA_DNXFER_FORCE_PIO0:
3217 pio_mask &= 1;
3218 case ATA_DNXFER_FORCE_PIO:
3219 mwdma_mask = 0;
3220 udma_mask = 0;
3221 break;
3222
458337db
TH
3223 default:
3224 BUG();
3225 }
3226
3227 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3228
3229 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3230 return -ENOENT;
3231
3232 if (!quiet) {
3233 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3234 snprintf(buf, sizeof(buf), "%s:%s",
3235 ata_mode_string(xfer_mask),
3236 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3237 else
3238 snprintf(buf, sizeof(buf), "%s",
3239 ata_mode_string(xfer_mask));
3240
3241 ata_dev_printk(dev, KERN_WARNING,
3242 "limiting speed to %s\n", buf);
3243 }
cf176e1a
TH
3244
3245 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3246 &dev->udma_mask);
3247
cf176e1a 3248 return 0;
cf176e1a
TH
3249}
3250
3373efd8 3251static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 3252{
9af5c9c9 3253 struct ata_eh_context *ehc = &dev->link->eh_context;
4055dee7
TH
3254 const char *dev_err_whine = "";
3255 int ign_dev_err = 0;
83206a29
TH
3256 unsigned int err_mask;
3257 int rc;
1da177e4 3258
e8384607 3259 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
3260 if (dev->xfer_shift == ATA_SHIFT_PIO)
3261 dev->flags |= ATA_DFLAG_PIO;
3262
3373efd8 3263 err_mask = ata_dev_set_xfermode(dev);
2dcb407e 3264
4055dee7
TH
3265 if (err_mask & ~AC_ERR_DEV)
3266 goto fail;
3267
3268 /* revalidate */
3269 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3270 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3271 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3272 if (rc)
3273 return rc;
3274
11750a40
AC
3275 /* Old CFA may refuse this command, which is just fine */
3276 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
4055dee7 3277 ign_dev_err = 1;
2dcb407e 3278
0bc2a79a
AC
3279 /* Some very old devices and some bad newer ones fail any kind of
3280 SET_XFERMODE request but support PIO0-2 timings and no IORDY */
3281 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
3282 dev->pio_mode <= XFER_PIO_2)
4055dee7 3283 ign_dev_err = 1;
2dcb407e 3284
3acaf94b
AC
3285 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3286 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
c5038fc0 3287 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3acaf94b
AC
3288 dev->dma_mode == XFER_MW_DMA_0 &&
3289 (dev->id[63] >> 8) & 1)
4055dee7 3290 ign_dev_err = 1;
3acaf94b 3291
4055dee7
TH
3292 /* if the device is actually configured correctly, ignore dev err */
3293 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3294 ign_dev_err = 1;
1da177e4 3295
4055dee7
TH
3296 if (err_mask & AC_ERR_DEV) {
3297 if (!ign_dev_err)
3298 goto fail;
3299 else
3300 dev_err_whine = " (device error ignored)";
3301 }
48a8a14f 3302
23e71c3d
TH
3303 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3304 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 3305
4055dee7
TH
3306 ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n",
3307 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3308 dev_err_whine);
3309
83206a29 3310 return 0;
4055dee7
TH
3311
3312 fail:
3313 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3314 "(err_mask=0x%x)\n", err_mask);
3315 return -EIO;
1da177e4
LT
3316}
3317
1da177e4 3318/**
04351821 3319 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 3320 * @link: link on which timings will be programmed
1967b7ff 3321 * @r_failed_dev: out parameter for failed device
1da177e4 3322 *
04351821
AC
3323 * Standard implementation of the function used to tune and set
3324 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3325 * ata_dev_set_mode() fails, pointer to the failing device is
e82cbdb9 3326 * returned in @r_failed_dev.
780a87f7 3327 *
1da177e4 3328 * LOCKING:
0cba632b 3329 * PCI/etc. bus probe sem.
e82cbdb9
TH
3330 *
3331 * RETURNS:
3332 * 0 on success, negative errno otherwise
1da177e4 3333 */
04351821 3334
0260731f 3335int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
1da177e4 3336{
0260731f 3337 struct ata_port *ap = link->ap;
e8e0619f 3338 struct ata_device *dev;
f58229f8 3339 int rc = 0, used_dma = 0, found = 0;
3adcebb2 3340
a6d5a51c 3341 /* step 1: calculate xfer_mask */
f58229f8 3342 ata_link_for_each_dev(dev, link) {
7dc951ae 3343 unsigned long pio_mask, dma_mask;
b3a70601 3344 unsigned int mode_mask;
a6d5a51c 3345
e1211e3f 3346 if (!ata_dev_enabled(dev))
a6d5a51c
TH
3347 continue;
3348
b3a70601
AC
3349 mode_mask = ATA_DMA_MASK_ATA;
3350 if (dev->class == ATA_DEV_ATAPI)
3351 mode_mask = ATA_DMA_MASK_ATAPI;
3352 else if (ata_id_is_cfa(dev->id))
3353 mode_mask = ATA_DMA_MASK_CFA;
3354
3373efd8 3355 ata_dev_xfermask(dev);
33267325 3356 ata_force_xfermask(dev);
1da177e4 3357
acf356b1
TH
3358 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3359 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
b3a70601
AC
3360
3361 if (libata_dma_mask & mode_mask)
3362 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3363 else
3364 dma_mask = 0;
3365
acf356b1
TH
3366 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3367 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 3368
4f65977d 3369 found = 1;
70cd071e 3370 if (dev->dma_mode != 0xff)
5444a6f4 3371 used_dma = 1;
a6d5a51c 3372 }
4f65977d 3373 if (!found)
e82cbdb9 3374 goto out;
a6d5a51c
TH
3375
3376 /* step 2: always set host PIO timings */
f58229f8 3377 ata_link_for_each_dev(dev, link) {
e8e0619f
TH
3378 if (!ata_dev_enabled(dev))
3379 continue;
3380
70cd071e 3381 if (dev->pio_mode == 0xff) {
f15a1daf 3382 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 3383 rc = -EINVAL;
e82cbdb9 3384 goto out;
e8e0619f
TH
3385 }
3386
3387 dev->xfer_mode = dev->pio_mode;
3388 dev->xfer_shift = ATA_SHIFT_PIO;
3389 if (ap->ops->set_piomode)
3390 ap->ops->set_piomode(ap, dev);
3391 }
1da177e4 3392
a6d5a51c 3393 /* step 3: set host DMA timings */
f58229f8 3394 ata_link_for_each_dev(dev, link) {
70cd071e 3395 if (!ata_dev_enabled(dev) || dev->dma_mode == 0xff)
e8e0619f
TH
3396 continue;
3397
3398 dev->xfer_mode = dev->dma_mode;
3399 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3400 if (ap->ops->set_dmamode)
3401 ap->ops->set_dmamode(ap, dev);
3402 }
1da177e4
LT
3403
3404 /* step 4: update devices' xfer mode */
f58229f8 3405 ata_link_for_each_dev(dev, link) {
18d90deb 3406 /* don't update suspended devices' xfer mode */
9666f400 3407 if (!ata_dev_enabled(dev))
83206a29
TH
3408 continue;
3409
3373efd8 3410 rc = ata_dev_set_mode(dev);
5bbc53f4 3411 if (rc)
e82cbdb9 3412 goto out;
83206a29 3413 }
1da177e4 3414
e8e0619f
TH
3415 /* Record simplex status. If we selected DMA then the other
3416 * host channels are not permitted to do so.
5444a6f4 3417 */
cca3974e 3418 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 3419 ap->host->simplex_claimed = ap;
5444a6f4 3420
e82cbdb9
TH
3421 out:
3422 if (rc)
3423 *r_failed_dev = dev;
3424 return rc;
1da177e4
LT
3425}
3426
1fdffbce
JG
3427/**
3428 * ata_tf_to_host - issue ATA taskfile to host controller
3429 * @ap: port to which command is being issued
3430 * @tf: ATA taskfile register set
3431 *
3432 * Issues ATA taskfile register set to ATA host controller,
3433 * with proper synchronization with interrupt handler and
3434 * other threads.
3435 *
3436 * LOCKING:
cca3974e 3437 * spin_lock_irqsave(host lock)
1fdffbce
JG
3438 */
3439
3440static inline void ata_tf_to_host(struct ata_port *ap,
3441 const struct ata_taskfile *tf)
3442{
3443 ap->ops->tf_load(ap, tf);
3444 ap->ops->exec_command(ap, tf);
3445}
3446
1da177e4
LT
3447/**
3448 * ata_busy_sleep - sleep until BSY clears, or timeout
3449 * @ap: port containing status register to be polled
3450 * @tmout_pat: impatience timeout
3451 * @tmout: overall timeout
3452 *
780a87f7
JG
3453 * Sleep until ATA Status register bit BSY clears,
3454 * or a timeout occurs.
3455 *
d1adc1bb
TH
3456 * LOCKING:
3457 * Kernel thread context (may sleep).
3458 *
3459 * RETURNS:
3460 * 0 on success, -errno otherwise.
1da177e4 3461 */
d1adc1bb
TH
3462int ata_busy_sleep(struct ata_port *ap,
3463 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
3464{
3465 unsigned long timer_start, timeout;
3466 u8 status;
3467
3468 status = ata_busy_wait(ap, ATA_BUSY, 300);
3469 timer_start = jiffies;
3470 timeout = timer_start + tmout_pat;
d1adc1bb
TH
3471 while (status != 0xff && (status & ATA_BUSY) &&
3472 time_before(jiffies, timeout)) {
1da177e4
LT
3473 msleep(50);
3474 status = ata_busy_wait(ap, ATA_BUSY, 3);
3475 }
3476
d1adc1bb 3477 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 3478 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
3479 "port is slow to respond, please be patient "
3480 "(Status 0x%x)\n", status);
1da177e4
LT
3481
3482 timeout = timer_start + tmout;
d1adc1bb
TH
3483 while (status != 0xff && (status & ATA_BUSY) &&
3484 time_before(jiffies, timeout)) {
1da177e4
LT
3485 msleep(50);
3486 status = ata_chk_status(ap);
3487 }
3488
d1adc1bb
TH
3489 if (status == 0xff)
3490 return -ENODEV;
3491
1da177e4 3492 if (status & ATA_BUSY) {
f15a1daf 3493 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
3494 "(%lu secs, Status 0x%x)\n",
3495 tmout / HZ, status);
d1adc1bb 3496 return -EBUSY;
1da177e4
LT
3497 }
3498
3499 return 0;
3500}
3501
88ff6eaf
TH
3502/**
3503 * ata_wait_after_reset - wait before checking status after reset
3504 * @ap: port containing status register to be polled
3505 * @deadline: deadline jiffies for the operation
3506 *
3507 * After reset, we need to pause a while before reading status.
3508 * Also, certain combination of controller and device report 0xff
3509 * for some duration (e.g. until SATA PHY is up and running)
3510 * which is interpreted as empty port in ATA world. This
3511 * function also waits for such devices to get out of 0xff
3512 * status.
3513 *
3514 * LOCKING:
3515 * Kernel thread context (may sleep).
3516 */
3517void ata_wait_after_reset(struct ata_port *ap, unsigned long deadline)
3518{
3519 unsigned long until = jiffies + ATA_TMOUT_FF_WAIT;
3520
3521 if (time_before(until, deadline))
3522 deadline = until;
3523
3524 /* Spec mandates ">= 2ms" before checking status. We wait
3525 * 150ms, because that was the magic delay used for ATAPI
3526 * devices in Hale Landis's ATADRVR, for the period of time
3527 * between when the ATA command register is written, and then
3528 * status is checked. Because waiting for "a while" before
3529 * checking status is fine, post SRST, we perform this magic
3530 * delay here as well.
3531 *
3532 * Old drivers/ide uses the 2mS rule and then waits for ready.
3533 */
3534 msleep(150);
3535
3536 /* Wait for 0xff to clear. Some SATA devices take a long time
3537 * to clear 0xff after reset. For example, HHD424020F7SV00
3538 * iVDR needs >= 800ms while. Quantum GoVault needs even more
3539 * than that.
1974e201
TH
3540 *
3541 * Note that some PATA controllers (pata_ali) explode if
3542 * status register is read more than once when there's no
3543 * device attached.
88ff6eaf 3544 */
1974e201
TH
3545 if (ap->flags & ATA_FLAG_SATA) {
3546 while (1) {
3547 u8 status = ata_chk_status(ap);
88ff6eaf 3548
1974e201
TH
3549 if (status != 0xff || time_after(jiffies, deadline))
3550 return;
88ff6eaf 3551
1974e201
TH
3552 msleep(50);
3553 }
88ff6eaf
TH
3554 }
3555}
3556
d4b2bab4
TH
3557/**
3558 * ata_wait_ready - sleep until BSY clears, or timeout
3559 * @ap: port containing status register to be polled
3560 * @deadline: deadline jiffies for the operation
3561 *
3562 * Sleep until ATA Status register bit BSY clears, or timeout
3563 * occurs.
3564 *
3565 * LOCKING:
3566 * Kernel thread context (may sleep).
3567 *
3568 * RETURNS:
3569 * 0 on success, -errno otherwise.
3570 */
3571int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3572{
3573 unsigned long start = jiffies;
3574 int warned = 0;
3575
3576 while (1) {
3577 u8 status = ata_chk_status(ap);
3578 unsigned long now = jiffies;
3579
3580 if (!(status & ATA_BUSY))
3581 return 0;
936fd732 3582 if (!ata_link_online(&ap->link) && status == 0xff)
d4b2bab4
TH
3583 return -ENODEV;
3584 if (time_after(now, deadline))
3585 return -EBUSY;
3586
3587 if (!warned && time_after(now, start + 5 * HZ) &&
3588 (deadline - now > 3 * HZ)) {
3589 ata_port_printk(ap, KERN_WARNING,
3590 "port is slow to respond, please be patient "
3591 "(Status 0x%x)\n", status);
3592 warned = 1;
3593 }
3594
3595 msleep(50);
3596 }
3597}
3598
3599static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3600 unsigned long deadline)
1da177e4
LT
3601{
3602 struct ata_ioports *ioaddr = &ap->ioaddr;
3603 unsigned int dev0 = devmask & (1 << 0);
3604 unsigned int dev1 = devmask & (1 << 1);
9b89391c 3605 int rc, ret = 0;
1da177e4
LT
3606
3607 /* if device 0 was found in ata_devchk, wait for its
3608 * BSY bit to clear
3609 */
d4b2bab4
TH
3610 if (dev0) {
3611 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3612 if (rc) {
3613 if (rc != -ENODEV)
3614 return rc;
3615 ret = rc;
3616 }
d4b2bab4 3617 }
1da177e4 3618
e141d999
TH
3619 /* if device 1 was found in ata_devchk, wait for register
3620 * access briefly, then wait for BSY to clear.
1da177e4 3621 */
e141d999
TH
3622 if (dev1) {
3623 int i;
1da177e4
LT
3624
3625 ap->ops->dev_select(ap, 1);
e141d999
TH
3626
3627 /* Wait for register access. Some ATAPI devices fail
3628 * to set nsect/lbal after reset, so don't waste too
3629 * much time on it. We're gonna wait for !BSY anyway.
3630 */
3631 for (i = 0; i < 2; i++) {
3632 u8 nsect, lbal;
3633
3634 nsect = ioread8(ioaddr->nsect_addr);
3635 lbal = ioread8(ioaddr->lbal_addr);
3636 if ((nsect == 1) && (lbal == 1))
3637 break;
3638 msleep(50); /* give drive a breather */
3639 }
3640
d4b2bab4 3641 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3642 if (rc) {
3643 if (rc != -ENODEV)
3644 return rc;
3645 ret = rc;
3646 }
d4b2bab4 3647 }
1da177e4
LT
3648
3649 /* is all this really necessary? */
3650 ap->ops->dev_select(ap, 0);
3651 if (dev1)
3652 ap->ops->dev_select(ap, 1);
3653 if (dev0)
3654 ap->ops->dev_select(ap, 0);
d4b2bab4 3655
9b89391c 3656 return ret;
1da177e4
LT
3657}
3658
d4b2bab4
TH
3659static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3660 unsigned long deadline)
1da177e4
LT
3661{
3662 struct ata_ioports *ioaddr = &ap->ioaddr;
3663
44877b4e 3664 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
1da177e4
LT
3665
3666 /* software reset. causes dev0 to be selected */
0d5ff566
TH
3667 iowrite8(ap->ctl, ioaddr->ctl_addr);
3668 udelay(20); /* FIXME: flush */
3669 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3670 udelay(20); /* FIXME: flush */
3671 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4 3672
88ff6eaf
TH
3673 /* wait a while before checking status */
3674 ata_wait_after_reset(ap, deadline);
1da177e4 3675
2e9edbf8 3676 /* Before we perform post reset processing we want to see if
298a41ca
TH
3677 * the bus shows 0xFF because the odd clown forgets the D7
3678 * pulldown resistor.
3679 */
150981b0 3680 if (ata_chk_status(ap) == 0xFF)
9b89391c 3681 return -ENODEV;
09c7ad79 3682
d4b2bab4 3683 return ata_bus_post_reset(ap, devmask, deadline);
1da177e4
LT
3684}
3685
3686/**
3687 * ata_bus_reset - reset host port and associated ATA channel
3688 * @ap: port to reset
3689 *
3690 * This is typically the first time we actually start issuing
3691 * commands to the ATA channel. We wait for BSY to clear, then
3692 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3693 * result. Determine what devices, if any, are on the channel
3694 * by looking at the device 0/1 error register. Look at the signature
3695 * stored in each device's taskfile registers, to determine if
3696 * the device is ATA or ATAPI.
3697 *
3698 * LOCKING:
0cba632b 3699 * PCI/etc. bus probe sem.
cca3974e 3700 * Obtains host lock.
1da177e4
LT
3701 *
3702 * SIDE EFFECTS:
198e0fed 3703 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
3704 */
3705
3706void ata_bus_reset(struct ata_port *ap)
3707{
9af5c9c9 3708 struct ata_device *device = ap->link.device;
1da177e4
LT
3709 struct ata_ioports *ioaddr = &ap->ioaddr;
3710 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3711 u8 err;
aec5c3c1 3712 unsigned int dev0, dev1 = 0, devmask = 0;
9b89391c 3713 int rc;
1da177e4 3714
44877b4e 3715 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
1da177e4
LT
3716
3717 /* determine if device 0/1 are present */
3718 if (ap->flags & ATA_FLAG_SATA_RESET)
3719 dev0 = 1;
3720 else {
3721 dev0 = ata_devchk(ap, 0);
3722 if (slave_possible)
3723 dev1 = ata_devchk(ap, 1);
3724 }
3725
3726 if (dev0)
3727 devmask |= (1 << 0);
3728 if (dev1)
3729 devmask |= (1 << 1);
3730
3731 /* select device 0 again */
3732 ap->ops->dev_select(ap, 0);
3733
3734 /* issue bus reset */
9b89391c
TH
3735 if (ap->flags & ATA_FLAG_SRST) {
3736 rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
3737 if (rc && rc != -ENODEV)
aec5c3c1 3738 goto err_out;
9b89391c 3739 }
1da177e4
LT
3740
3741 /*
3742 * determine by signature whether we have ATA or ATAPI devices
3743 */
3f19859e 3744 device[0].class = ata_dev_try_classify(&device[0], dev0, &err);
1da177e4 3745 if ((slave_possible) && (err != 0x81))
3f19859e 3746 device[1].class = ata_dev_try_classify(&device[1], dev1, &err);
1da177e4 3747
1da177e4 3748 /* is double-select really necessary? */
9af5c9c9 3749 if (device[1].class != ATA_DEV_NONE)
1da177e4 3750 ap->ops->dev_select(ap, 1);
9af5c9c9 3751 if (device[0].class != ATA_DEV_NONE)
1da177e4
LT
3752 ap->ops->dev_select(ap, 0);
3753
3754 /* if no devices were detected, disable this port */
9af5c9c9
TH
3755 if ((device[0].class == ATA_DEV_NONE) &&
3756 (device[1].class == ATA_DEV_NONE))
1da177e4
LT
3757 goto err_out;
3758
3759 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3760 /* set up device control for ATA_FLAG_SATA_RESET */
0d5ff566 3761 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3762 }
3763
3764 DPRINTK("EXIT\n");
3765 return;
3766
3767err_out:
f15a1daf 3768 ata_port_printk(ap, KERN_ERR, "disabling port\n");
ac8869d5 3769 ata_port_disable(ap);
1da177e4
LT
3770
3771 DPRINTK("EXIT\n");
3772}
3773
d7bb4cc7 3774/**
936fd732
TH
3775 * sata_link_debounce - debounce SATA phy status
3776 * @link: ATA link to debounce SATA phy status for
d7bb4cc7 3777 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3778 * @deadline: deadline jiffies for the operation
d7bb4cc7 3779 *
936fd732 3780* Make sure SStatus of @link reaches stable state, determined by
d7bb4cc7
TH
3781 * holding the same value where DET is not 1 for @duration polled
3782 * every @interval, before @timeout. Timeout constraints the
d4b2bab4
TH
3783 * beginning of the stable state. Because DET gets stuck at 1 on
3784 * some controllers after hot unplugging, this functions waits
d7bb4cc7
TH
3785 * until timeout then returns 0 if DET is stable at 1.
3786 *
d4b2bab4
TH
3787 * @timeout is further limited by @deadline. The sooner of the
3788 * two is used.
3789 *
d7bb4cc7
TH
3790 * LOCKING:
3791 * Kernel thread context (may sleep)
3792 *
3793 * RETURNS:
3794 * 0 on success, -errno on failure.
3795 */
936fd732
TH
3796int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3797 unsigned long deadline)
7a7921e8 3798{
d7bb4cc7 3799 unsigned long interval_msec = params[0];
d4b2bab4
TH
3800 unsigned long duration = msecs_to_jiffies(params[1]);
3801 unsigned long last_jiffies, t;
d7bb4cc7
TH
3802 u32 last, cur;
3803 int rc;
3804
d4b2bab4
TH
3805 t = jiffies + msecs_to_jiffies(params[2]);
3806 if (time_before(t, deadline))
3807 deadline = t;
3808
936fd732 3809 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3810 return rc;
3811 cur &= 0xf;
3812
3813 last = cur;
3814 last_jiffies = jiffies;
3815
3816 while (1) {
3817 msleep(interval_msec);
936fd732 3818 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3819 return rc;
3820 cur &= 0xf;
3821
3822 /* DET stable? */
3823 if (cur == last) {
d4b2bab4 3824 if (cur == 1 && time_before(jiffies, deadline))
d7bb4cc7
TH
3825 continue;
3826 if (time_after(jiffies, last_jiffies + duration))
3827 return 0;
3828 continue;
3829 }
3830
3831 /* unstable, start over */
3832 last = cur;
3833 last_jiffies = jiffies;
3834
f1545154
TH
3835 /* Check deadline. If debouncing failed, return
3836 * -EPIPE to tell upper layer to lower link speed.
3837 */
d4b2bab4 3838 if (time_after(jiffies, deadline))
f1545154 3839 return -EPIPE;
d7bb4cc7
TH
3840 }
3841}
3842
3843/**
936fd732
TH
3844 * sata_link_resume - resume SATA link
3845 * @link: ATA link to resume SATA
d7bb4cc7 3846 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3847 * @deadline: deadline jiffies for the operation
d7bb4cc7 3848 *
936fd732 3849 * Resume SATA phy @link and debounce it.
d7bb4cc7
TH
3850 *
3851 * LOCKING:
3852 * Kernel thread context (may sleep)
3853 *
3854 * RETURNS:
3855 * 0 on success, -errno on failure.
3856 */
936fd732
TH
3857int sata_link_resume(struct ata_link *link, const unsigned long *params,
3858 unsigned long deadline)
d7bb4cc7
TH
3859{
3860 u32 scontrol;
81952c54
TH
3861 int rc;
3862
936fd732 3863 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 3864 return rc;
7a7921e8 3865
852ee16a 3866 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54 3867
936fd732 3868 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54 3869 return rc;
7a7921e8 3870
d7bb4cc7
TH
3871 /* Some PHYs react badly if SStatus is pounded immediately
3872 * after resuming. Delay 200ms before debouncing.
3873 */
3874 msleep(200);
7a7921e8 3875
936fd732 3876 return sata_link_debounce(link, params, deadline);
7a7921e8
TH
3877}
3878
f5914a46
TH
3879/**
3880 * ata_std_prereset - prepare for reset
cc0680a5 3881 * @link: ATA link to be reset
d4b2bab4 3882 * @deadline: deadline jiffies for the operation
f5914a46 3883 *
cc0680a5 3884 * @link is about to be reset. Initialize it. Failure from
b8cffc6a
TH
3885 * prereset makes libata abort whole reset sequence and give up
3886 * that port, so prereset should be best-effort. It does its
3887 * best to prepare for reset sequence but if things go wrong, it
3888 * should just whine, not fail.
f5914a46
TH
3889 *
3890 * LOCKING:
3891 * Kernel thread context (may sleep)
3892 *
3893 * RETURNS:
3894 * 0 on success, -errno otherwise.
3895 */
cc0680a5 3896int ata_std_prereset(struct ata_link *link, unsigned long deadline)
f5914a46 3897{
cc0680a5 3898 struct ata_port *ap = link->ap;
936fd732 3899 struct ata_eh_context *ehc = &link->eh_context;
e9c83914 3900 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
3901 int rc;
3902
31daabda 3903 /* handle link resume */
28324304 3904 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
0c88758b 3905 (link->flags & ATA_LFLAG_HRST_TO_RESUME))
28324304
TH
3906 ehc->i.action |= ATA_EH_HARDRESET;
3907
633273a3
TH
3908 /* Some PMPs don't work with only SRST, force hardreset if PMP
3909 * is supported.
3910 */
3911 if (ap->flags & ATA_FLAG_PMP)
3912 ehc->i.action |= ATA_EH_HARDRESET;
3913
f5914a46
TH
3914 /* if we're about to do hardreset, nothing more to do */
3915 if (ehc->i.action & ATA_EH_HARDRESET)
3916 return 0;
3917
936fd732 3918 /* if SATA, resume link */
a16abc0b 3919 if (ap->flags & ATA_FLAG_SATA) {
936fd732 3920 rc = sata_link_resume(link, timing, deadline);
b8cffc6a
TH
3921 /* whine about phy resume failure but proceed */
3922 if (rc && rc != -EOPNOTSUPP)
cc0680a5 3923 ata_link_printk(link, KERN_WARNING, "failed to resume "
f5914a46 3924 "link for reset (errno=%d)\n", rc);
f5914a46
TH
3925 }
3926
3927 /* Wait for !BSY if the controller can wait for the first D2H
3928 * Reg FIS and we don't know that no device is attached.
3929 */
0c88758b 3930 if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) {
b8cffc6a 3931 rc = ata_wait_ready(ap, deadline);
6dffaf61 3932 if (rc && rc != -ENODEV) {
cc0680a5 3933 ata_link_printk(link, KERN_WARNING, "device not ready "
b8cffc6a
TH
3934 "(errno=%d), forcing hardreset\n", rc);
3935 ehc->i.action |= ATA_EH_HARDRESET;
3936 }
3937 }
f5914a46
TH
3938
3939 return 0;
3940}
3941
c2bd5804
TH
3942/**
3943 * ata_std_softreset - reset host port via ATA SRST
cc0680a5 3944 * @link: ATA link to reset
c2bd5804 3945 * @classes: resulting classes of attached devices
d4b2bab4 3946 * @deadline: deadline jiffies for the operation
c2bd5804 3947 *
52783c5d 3948 * Reset host port using ATA SRST.
c2bd5804
TH
3949 *
3950 * LOCKING:
3951 * Kernel thread context (may sleep)
3952 *
3953 * RETURNS:
3954 * 0 on success, -errno otherwise.
3955 */
cc0680a5 3956int ata_std_softreset(struct ata_link *link, unsigned int *classes,
d4b2bab4 3957 unsigned long deadline)
c2bd5804 3958{
cc0680a5 3959 struct ata_port *ap = link->ap;
c2bd5804 3960 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
d4b2bab4
TH
3961 unsigned int devmask = 0;
3962 int rc;
c2bd5804
TH
3963 u8 err;
3964
3965 DPRINTK("ENTER\n");
3966
936fd732 3967 if (ata_link_offline(link)) {
3a39746a
TH
3968 classes[0] = ATA_DEV_NONE;
3969 goto out;
3970 }
3971
c2bd5804
TH
3972 /* determine if device 0/1 are present */
3973 if (ata_devchk(ap, 0))
3974 devmask |= (1 << 0);
3975 if (slave_possible && ata_devchk(ap, 1))
3976 devmask |= (1 << 1);
3977
c2bd5804
TH
3978 /* select device 0 again */
3979 ap->ops->dev_select(ap, 0);
3980
3981 /* issue bus reset */
3982 DPRINTK("about to softreset, devmask=%x\n", devmask);
d4b2bab4 3983 rc = ata_bus_softreset(ap, devmask, deadline);
9b89391c 3984 /* if link is occupied, -ENODEV too is an error */
936fd732 3985 if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
cc0680a5 3986 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
d4b2bab4 3987 return rc;
c2bd5804
TH
3988 }
3989
3990 /* determine by signature whether we have ATA or ATAPI devices */
3f19859e
TH
3991 classes[0] = ata_dev_try_classify(&link->device[0],
3992 devmask & (1 << 0), &err);
c2bd5804 3993 if (slave_possible && err != 0x81)
3f19859e
TH
3994 classes[1] = ata_dev_try_classify(&link->device[1],
3995 devmask & (1 << 1), &err);
c2bd5804 3996
3a39746a 3997 out:
c2bd5804
TH
3998 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3999 return 0;
4000}
4001
4002/**
cc0680a5
TH
4003 * sata_link_hardreset - reset link via SATA phy reset
4004 * @link: link to reset
b6103f6d 4005 * @timing: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 4006 * @deadline: deadline jiffies for the operation
c2bd5804 4007 *
cc0680a5 4008 * SATA phy-reset @link using DET bits of SControl register.
c2bd5804
TH
4009 *
4010 * LOCKING:
4011 * Kernel thread context (may sleep)
4012 *
4013 * RETURNS:
4014 * 0 on success, -errno otherwise.
4015 */
cc0680a5 4016int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
d4b2bab4 4017 unsigned long deadline)
c2bd5804 4018{
852ee16a 4019 u32 scontrol;
81952c54 4020 int rc;
852ee16a 4021
c2bd5804
TH
4022 DPRINTK("ENTER\n");
4023
936fd732 4024 if (sata_set_spd_needed(link)) {
1c3fae4d
TH
4025 /* SATA spec says nothing about how to reconfigure
4026 * spd. To be on the safe side, turn off phy during
4027 * reconfiguration. This works for at least ICH7 AHCI
4028 * and Sil3124.
4029 */
936fd732 4030 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 4031 goto out;
81952c54 4032
a34b6fc0 4033 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54 4034
936fd732 4035 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
b6103f6d 4036 goto out;
1c3fae4d 4037
936fd732 4038 sata_set_spd(link);
1c3fae4d
TH
4039 }
4040
4041 /* issue phy wake/reset */
936fd732 4042 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 4043 goto out;
81952c54 4044
852ee16a 4045 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54 4046
936fd732 4047 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
b6103f6d 4048 goto out;
c2bd5804 4049
1c3fae4d 4050 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
4051 * 10.4.2 says at least 1 ms.
4052 */
4053 msleep(1);
4054
936fd732
TH
4055 /* bring link back */
4056 rc = sata_link_resume(link, timing, deadline);
b6103f6d
TH
4057 out:
4058 DPRINTK("EXIT, rc=%d\n", rc);
4059 return rc;
4060}
4061
4062/**
4063 * sata_std_hardreset - reset host port via SATA phy reset
cc0680a5 4064 * @link: link to reset
b6103f6d 4065 * @class: resulting class of attached device
d4b2bab4 4066 * @deadline: deadline jiffies for the operation
b6103f6d
TH
4067 *
4068 * SATA phy-reset host port using DET bits of SControl register,
4069 * wait for !BSY and classify the attached device.
4070 *
4071 * LOCKING:
4072 * Kernel thread context (may sleep)
4073 *
4074 * RETURNS:
4075 * 0 on success, -errno otherwise.
4076 */
cc0680a5 4077int sata_std_hardreset(struct ata_link *link, unsigned int *class,
d4b2bab4 4078 unsigned long deadline)
b6103f6d 4079{
cc0680a5 4080 struct ata_port *ap = link->ap;
936fd732 4081 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
b6103f6d
TH
4082 int rc;
4083
4084 DPRINTK("ENTER\n");
4085
4086 /* do hardreset */
cc0680a5 4087 rc = sata_link_hardreset(link, timing, deadline);
b6103f6d 4088 if (rc) {
cc0680a5 4089 ata_link_printk(link, KERN_ERR,
b6103f6d
TH
4090 "COMRESET failed (errno=%d)\n", rc);
4091 return rc;
4092 }
c2bd5804 4093
c2bd5804 4094 /* TODO: phy layer with polling, timeouts, etc. */
936fd732 4095 if (ata_link_offline(link)) {
c2bd5804
TH
4096 *class = ATA_DEV_NONE;
4097 DPRINTK("EXIT, link offline\n");
4098 return 0;
4099 }
4100
88ff6eaf
TH
4101 /* wait a while before checking status */
4102 ata_wait_after_reset(ap, deadline);
34fee227 4103
633273a3
TH
4104 /* If PMP is supported, we have to do follow-up SRST. Note
4105 * that some PMPs don't send D2H Reg FIS after hardreset at
4106 * all if the first port is empty. Wait for it just for a
4107 * second and request follow-up SRST.
4108 */
4109 if (ap->flags & ATA_FLAG_PMP) {
4110 ata_wait_ready(ap, jiffies + HZ);
4111 return -EAGAIN;
4112 }
4113
d4b2bab4 4114 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
4115 /* link occupied, -ENODEV too is an error */
4116 if (rc) {
cc0680a5 4117 ata_link_printk(link, KERN_ERR,
d4b2bab4
TH
4118 "COMRESET failed (errno=%d)\n", rc);
4119 return rc;
c2bd5804
TH
4120 }
4121
3a39746a
TH
4122 ap->ops->dev_select(ap, 0); /* probably unnecessary */
4123
3f19859e 4124 *class = ata_dev_try_classify(link->device, 1, NULL);
c2bd5804
TH
4125
4126 DPRINTK("EXIT, class=%u\n", *class);
4127 return 0;
4128}
4129
4130/**
4131 * ata_std_postreset - standard postreset callback
cc0680a5 4132 * @link: the target ata_link
c2bd5804
TH
4133 * @classes: classes of attached devices
4134 *
4135 * This function is invoked after a successful reset. Note that
4136 * the device might have been reset more than once using
4137 * different reset methods before postreset is invoked.
c2bd5804 4138 *
c2bd5804
TH
4139 * LOCKING:
4140 * Kernel thread context (may sleep)
4141 */
cc0680a5 4142void ata_std_postreset(struct ata_link *link, unsigned int *classes)
c2bd5804 4143{
cc0680a5 4144 struct ata_port *ap = link->ap;
dc2b3515
TH
4145 u32 serror;
4146
c2bd5804
TH
4147 DPRINTK("ENTER\n");
4148
c2bd5804 4149 /* print link status */
936fd732 4150 sata_print_link_status(link);
c2bd5804 4151
dc2b3515 4152 /* clear SError */
936fd732
TH
4153 if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
4154 sata_scr_write(link, SCR_ERROR, serror);
f7fe7ad4 4155 link->eh_info.serror = 0;
dc2b3515 4156
c2bd5804
TH
4157 /* is double-select really necessary? */
4158 if (classes[0] != ATA_DEV_NONE)
4159 ap->ops->dev_select(ap, 1);
4160 if (classes[1] != ATA_DEV_NONE)
4161 ap->ops->dev_select(ap, 0);
4162
3a39746a
TH
4163 /* bail out if no device is present */
4164 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
4165 DPRINTK("EXIT, no device\n");
4166 return;
4167 }
4168
4169 /* set up device control */
0d5ff566
TH
4170 if (ap->ioaddr.ctl_addr)
4171 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
c2bd5804
TH
4172
4173 DPRINTK("EXIT\n");
4174}
4175
623a3128
TH
4176/**
4177 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
4178 * @dev: device to compare against
4179 * @new_class: class of the new device
4180 * @new_id: IDENTIFY page of the new device
4181 *
4182 * Compare @new_class and @new_id against @dev and determine
4183 * whether @dev is the device indicated by @new_class and
4184 * @new_id.
4185 *
4186 * LOCKING:
4187 * None.
4188 *
4189 * RETURNS:
4190 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
4191 */
3373efd8
TH
4192static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
4193 const u16 *new_id)
623a3128
TH
4194{
4195 const u16 *old_id = dev->id;
a0cf733b
TH
4196 unsigned char model[2][ATA_ID_PROD_LEN + 1];
4197 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
4198
4199 if (dev->class != new_class) {
f15a1daf
TH
4200 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
4201 dev->class, new_class);
623a3128
TH
4202 return 0;
4203 }
4204
a0cf733b
TH
4205 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
4206 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
4207 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
4208 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
4209
4210 if (strcmp(model[0], model[1])) {
f15a1daf
TH
4211 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
4212 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
4213 return 0;
4214 }
4215
4216 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
4217 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
4218 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
4219 return 0;
4220 }
4221
623a3128
TH
4222 return 1;
4223}
4224
4225/**
fe30911b 4226 * ata_dev_reread_id - Re-read IDENTIFY data
3fae450c 4227 * @dev: target ATA device
bff04647 4228 * @readid_flags: read ID flags
623a3128
TH
4229 *
4230 * Re-read IDENTIFY page and make sure @dev is still attached to
4231 * the port.
4232 *
4233 * LOCKING:
4234 * Kernel thread context (may sleep)
4235 *
4236 * RETURNS:
4237 * 0 on success, negative errno otherwise
4238 */
fe30911b 4239int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
623a3128 4240{
5eb45c02 4241 unsigned int class = dev->class;
9af5c9c9 4242 u16 *id = (void *)dev->link->ap->sector_buf;
623a3128
TH
4243 int rc;
4244
fe635c7e 4245 /* read ID data */
bff04647 4246 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128 4247 if (rc)
fe30911b 4248 return rc;
623a3128
TH
4249
4250 /* is the device still there? */
fe30911b
TH
4251 if (!ata_dev_same_device(dev, class, id))
4252 return -ENODEV;
623a3128 4253
fe635c7e 4254 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
fe30911b
TH
4255 return 0;
4256}
4257
4258/**
4259 * ata_dev_revalidate - Revalidate ATA device
4260 * @dev: device to revalidate
422c9daa 4261 * @new_class: new class code
fe30911b
TH
4262 * @readid_flags: read ID flags
4263 *
4264 * Re-read IDENTIFY page, make sure @dev is still attached to the
4265 * port and reconfigure it according to the new IDENTIFY page.
4266 *
4267 * LOCKING:
4268 * Kernel thread context (may sleep)
4269 *
4270 * RETURNS:
4271 * 0 on success, negative errno otherwise
4272 */
422c9daa
TH
4273int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4274 unsigned int readid_flags)
fe30911b 4275{
6ddcd3b0 4276 u64 n_sectors = dev->n_sectors;
fe30911b
TH
4277 int rc;
4278
4279 if (!ata_dev_enabled(dev))
4280 return -ENODEV;
4281
422c9daa
TH
4282 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4283 if (ata_class_enabled(new_class) &&
4284 new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
4285 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
4286 dev->class, new_class);
4287 rc = -ENODEV;
4288 goto fail;
4289 }
4290
fe30911b
TH
4291 /* re-read ID */
4292 rc = ata_dev_reread_id(dev, readid_flags);
4293 if (rc)
4294 goto fail;
623a3128
TH
4295
4296 /* configure device according to the new ID */
efdaedc4 4297 rc = ata_dev_configure(dev);
6ddcd3b0
TH
4298 if (rc)
4299 goto fail;
4300
4301 /* verify n_sectors hasn't changed */
b54eebd6
TH
4302 if (dev->class == ATA_DEV_ATA && n_sectors &&
4303 dev->n_sectors != n_sectors) {
6ddcd3b0
TH
4304 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
4305 "%llu != %llu\n",
4306 (unsigned long long)n_sectors,
4307 (unsigned long long)dev->n_sectors);
8270bec4
TH
4308
4309 /* restore original n_sectors */
4310 dev->n_sectors = n_sectors;
4311
6ddcd3b0
TH
4312 rc = -ENODEV;
4313 goto fail;
4314 }
4315
4316 return 0;
623a3128
TH
4317
4318 fail:
f15a1daf 4319 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
4320 return rc;
4321}
4322
6919a0a6
AC
4323struct ata_blacklist_entry {
4324 const char *model_num;
4325 const char *model_rev;
4326 unsigned long horkage;
4327};
4328
4329static const struct ata_blacklist_entry ata_device_blacklist [] = {
4330 /* Devices with DMA related problems under Linux */
4331 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
4332 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
4333 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
4334 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
4335 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
4336 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
4337 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
4338 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4339 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
4340 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
4341 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
4342 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4343 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4344 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4345 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4346 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
4347 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
4348 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
4349 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4350 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4351 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
4352 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
4353 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
4354 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
4355 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
4356 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
4357 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4358 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
2dcb407e 4359 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
39f19886 4360 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
3af9a77a
TH
4361 /* Odd clown on sil3726/4726 PMPs */
4362 { "Config Disk", NULL, ATA_HORKAGE_NODMA |
4363 ATA_HORKAGE_SKIP_PM },
6919a0a6 4364
18d6e9d5 4365 /* Weird ATAPI devices */
40a1d531 4366 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
18d6e9d5 4367
6919a0a6
AC
4368 /* Devices we expect to fail diagnostics */
4369
4370 /* Devices where NCQ should be avoided */
4371 /* NCQ is slow */
2dcb407e 4372 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
459ad688 4373 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
09125ea6
TH
4374 /* http://thread.gmane.org/gmane.linux.ide/14907 */
4375 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30 4376 /* NCQ is broken */
539cc7c7 4377 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
0e3dbc01 4378 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
da6f0ec2 4379 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
e41bd3e8 4380 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
539cc7c7 4381
36e337d0
RH
4382 /* Blacklist entries taken from Silicon Image 3124/3132
4383 Windows driver .inf file - also several Linux problem reports */
4384 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4385 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4386 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
6919a0a6 4387
16c55b03
TH
4388 /* devices which puke on READ_NATIVE_MAX */
4389 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4390 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4391 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4392 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
6919a0a6 4393
93328e11
AC
4394 /* Devices which report 1 sector over size HPA */
4395 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4396 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
b152fcd3 4397 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
93328e11 4398
6bbfd53d
AC
4399 /* Devices which get the IVB wrong */
4400 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4401 { "TSSTcorp CDDVDW SH-S202J", "SB00", ATA_HORKAGE_IVB, },
e9f33406
PM
4402 { "TSSTcorp CDDVDW SH-S202J", "SB01", ATA_HORKAGE_IVB, },
4403 { "TSSTcorp CDDVDW SH-S202N", "SB00", ATA_HORKAGE_IVB, },
4404 { "TSSTcorp CDDVDW SH-S202N", "SB01", ATA_HORKAGE_IVB, },
6bbfd53d 4405
6919a0a6
AC
4406 /* End Marker */
4407 { }
1da177e4 4408};
2e9edbf8 4409
741b7763 4410static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
539cc7c7
JG
4411{
4412 const char *p;
4413 int len;
4414
4415 /*
4416 * check for trailing wildcard: *\0
4417 */
4418 p = strchr(patt, wildchar);
4419 if (p && ((*(p + 1)) == 0))
4420 len = p - patt;
317b50b8 4421 else {
539cc7c7 4422 len = strlen(name);
317b50b8
AP
4423 if (!len) {
4424 if (!*patt)
4425 return 0;
4426 return -1;
4427 }
4428 }
539cc7c7
JG
4429
4430 return strncmp(patt, name, len);
4431}
4432
75683fe7 4433static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
1da177e4 4434{
8bfa79fc
TH
4435 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4436 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 4437 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 4438
8bfa79fc
TH
4439 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4440 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 4441
6919a0a6 4442 while (ad->model_num) {
539cc7c7 4443 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
6919a0a6
AC
4444 if (ad->model_rev == NULL)
4445 return ad->horkage;
539cc7c7 4446 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
6919a0a6 4447 return ad->horkage;
f4b15fef 4448 }
6919a0a6 4449 ad++;
f4b15fef 4450 }
1da177e4
LT
4451 return 0;
4452}
4453
6919a0a6
AC
4454static int ata_dma_blacklisted(const struct ata_device *dev)
4455{
4456 /* We don't support polling DMA.
4457 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4458 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4459 */
9af5c9c9 4460 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
6919a0a6
AC
4461 (dev->flags & ATA_DFLAG_CDB_INTR))
4462 return 1;
75683fe7 4463 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
6919a0a6
AC
4464}
4465
6bbfd53d
AC
4466/**
4467 * ata_is_40wire - check drive side detection
4468 * @dev: device
4469 *
4470 * Perform drive side detection decoding, allowing for device vendors
4471 * who can't follow the documentation.
4472 */
4473
4474static int ata_is_40wire(struct ata_device *dev)
4475{
4476 if (dev->horkage & ATA_HORKAGE_IVB)
4477 return ata_drive_40wire_relaxed(dev->id);
4478 return ata_drive_40wire(dev->id);
4479}
4480
a6d5a51c
TH
4481/**
4482 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
4483 * @dev: Device to compute xfermask for
4484 *
acf356b1
TH
4485 * Compute supported xfermask of @dev and store it in
4486 * dev->*_mask. This function is responsible for applying all
4487 * known limits including host controller limits, device
4488 * blacklist, etc...
a6d5a51c
TH
4489 *
4490 * LOCKING:
4491 * None.
a6d5a51c 4492 */
3373efd8 4493static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 4494{
9af5c9c9
TH
4495 struct ata_link *link = dev->link;
4496 struct ata_port *ap = link->ap;
cca3974e 4497 struct ata_host *host = ap->host;
a6d5a51c 4498 unsigned long xfer_mask;
1da177e4 4499
37deecb5 4500 /* controller modes available */
565083e1
TH
4501 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4502 ap->mwdma_mask, ap->udma_mask);
4503
8343f889 4504 /* drive modes available */
37deecb5
TH
4505 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4506 dev->mwdma_mask, dev->udma_mask);
4507 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 4508
b352e57d
AC
4509 /*
4510 * CFA Advanced TrueIDE timings are not allowed on a shared
4511 * cable
4512 */
4513 if (ata_dev_pair(dev)) {
4514 /* No PIO5 or PIO6 */
4515 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4516 /* No MWDMA3 or MWDMA 4 */
4517 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4518 }
4519
37deecb5
TH
4520 if (ata_dma_blacklisted(dev)) {
4521 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
4522 ata_dev_printk(dev, KERN_WARNING,
4523 "device is on DMA blacklist, disabling DMA\n");
37deecb5 4524 }
a6d5a51c 4525
14d66ab7 4526 if ((host->flags & ATA_HOST_SIMPLEX) &&
2dcb407e 4527 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5
TH
4528 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4529 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4530 "other device, disabling DMA\n");
5444a6f4 4531 }
565083e1 4532
e424675f
JG
4533 if (ap->flags & ATA_FLAG_NO_IORDY)
4534 xfer_mask &= ata_pio_mask_no_iordy(dev);
4535
5444a6f4 4536 if (ap->ops->mode_filter)
a76b62ca 4537 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
5444a6f4 4538
8343f889
RH
4539 /* Apply cable rule here. Don't apply it early because when
4540 * we handle hot plug the cable type can itself change.
4541 * Check this last so that we know if the transfer rate was
4542 * solely limited by the cable.
4543 * Unknown or 80 wire cables reported host side are checked
4544 * drive side as well. Cases where we know a 40wire cable
4545 * is used safely for 80 are not checked here.
4546 */
4547 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4548 /* UDMA/44 or higher would be available */
2dcb407e 4549 if ((ap->cbl == ATA_CBL_PATA40) ||
6bbfd53d 4550 (ata_is_40wire(dev) &&
2dcb407e
JG
4551 (ap->cbl == ATA_CBL_PATA_UNK ||
4552 ap->cbl == ATA_CBL_PATA80))) {
4553 ata_dev_printk(dev, KERN_WARNING,
8343f889
RH
4554 "limited to UDMA/33 due to 40-wire cable\n");
4555 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4556 }
4557
565083e1
TH
4558 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4559 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
4560}
4561
1da177e4
LT
4562/**
4563 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
4564 * @dev: Device to which command will be sent
4565 *
780a87f7
JG
4566 * Issue SET FEATURES - XFER MODE command to device @dev
4567 * on port @ap.
4568 *
1da177e4 4569 * LOCKING:
0cba632b 4570 * PCI/etc. bus probe sem.
83206a29
TH
4571 *
4572 * RETURNS:
4573 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
4574 */
4575
3373efd8 4576static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 4577{
a0123703 4578 struct ata_taskfile tf;
83206a29 4579 unsigned int err_mask;
1da177e4
LT
4580
4581 /* set up set-features taskfile */
4582 DPRINTK("set features - xfer mode\n");
4583
464cf177
TH
4584 /* Some controllers and ATAPI devices show flaky interrupt
4585 * behavior after setting xfer mode. Use polling instead.
4586 */
3373efd8 4587 ata_tf_init(dev, &tf);
a0123703
TH
4588 tf.command = ATA_CMD_SET_FEATURES;
4589 tf.feature = SETFEATURES_XFER;
464cf177 4590 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
a0123703 4591 tf.protocol = ATA_PROT_NODATA;
b9f8ab2d 4592 /* If we are using IORDY we must send the mode setting command */
11b7becc
JG
4593 if (ata_pio_need_iordy(dev))
4594 tf.nsect = dev->xfer_mode;
b9f8ab2d
AC
4595 /* If the device has IORDY and the controller does not - turn it off */
4596 else if (ata_id_has_iordy(dev->id))
11b7becc 4597 tf.nsect = 0x01;
b9f8ab2d
AC
4598 else /* In the ancient relic department - skip all of this */
4599 return 0;
1da177e4 4600
2b789108 4601 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
9f45cbd3
KCA
4602
4603 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4604 return err_mask;
4605}
9f45cbd3 4606/**
218f3d30 4607 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
9f45cbd3
KCA
4608 * @dev: Device to which command will be sent
4609 * @enable: Whether to enable or disable the feature
218f3d30 4610 * @feature: The sector count represents the feature to set
9f45cbd3
KCA
4611 *
4612 * Issue SET FEATURES - SATA FEATURES command to device @dev
218f3d30 4613 * on port @ap with sector count
9f45cbd3
KCA
4614 *
4615 * LOCKING:
4616 * PCI/etc. bus probe sem.
4617 *
4618 * RETURNS:
4619 * 0 on success, AC_ERR_* mask otherwise.
4620 */
218f3d30
JG
4621static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4622 u8 feature)
9f45cbd3
KCA
4623{
4624 struct ata_taskfile tf;
4625 unsigned int err_mask;
4626
4627 /* set up set-features taskfile */
4628 DPRINTK("set features - SATA features\n");
4629
4630 ata_tf_init(dev, &tf);
4631 tf.command = ATA_CMD_SET_FEATURES;
4632 tf.feature = enable;
4633 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4634 tf.protocol = ATA_PROT_NODATA;
218f3d30 4635 tf.nsect = feature;
9f45cbd3 4636
2b789108 4637 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1da177e4 4638
83206a29
TH
4639 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4640 return err_mask;
1da177e4
LT
4641}
4642
8bf62ece
AL
4643/**
4644 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 4645 * @dev: Device to which command will be sent
e2a7f77a
RD
4646 * @heads: Number of heads (taskfile parameter)
4647 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
4648 *
4649 * LOCKING:
6aff8f1f
TH
4650 * Kernel thread context (may sleep)
4651 *
4652 * RETURNS:
4653 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 4654 */
3373efd8
TH
4655static unsigned int ata_dev_init_params(struct ata_device *dev,
4656 u16 heads, u16 sectors)
8bf62ece 4657{
a0123703 4658 struct ata_taskfile tf;
6aff8f1f 4659 unsigned int err_mask;
8bf62ece
AL
4660
4661 /* Number of sectors per track 1-255. Number of heads 1-16 */
4662 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 4663 return AC_ERR_INVALID;
8bf62ece
AL
4664
4665 /* set up init dev params taskfile */
4666 DPRINTK("init dev params \n");
4667
3373efd8 4668 ata_tf_init(dev, &tf);
a0123703
TH
4669 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4670 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4671 tf.protocol = ATA_PROT_NODATA;
4672 tf.nsect = sectors;
4673 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 4674
2b789108 4675 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
18b2466c
AC
4676 /* A clean abort indicates an original or just out of spec drive
4677 and we should continue as we issue the setup based on the
4678 drive reported working geometry */
4679 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4680 err_mask = 0;
8bf62ece 4681
6aff8f1f
TH
4682 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4683 return err_mask;
8bf62ece
AL
4684}
4685
1da177e4 4686/**
0cba632b
JG
4687 * ata_sg_clean - Unmap DMA memory associated with command
4688 * @qc: Command containing DMA memory to be released
4689 *
4690 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
4691 *
4692 * LOCKING:
cca3974e 4693 * spin_lock_irqsave(host lock)
1da177e4 4694 */
70e6ad0c 4695void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
4696{
4697 struct ata_port *ap = qc->ap;
ff2aeb1e 4698 struct scatterlist *sg = qc->sg;
1da177e4
LT
4699 int dir = qc->dma_dir;
4700
a4631474 4701 WARN_ON(sg == NULL);
1da177e4 4702
dde20207 4703 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 4704
dde20207
JB
4705 if (qc->n_elem)
4706 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
1da177e4
LT
4707
4708 qc->flags &= ~ATA_QCFLAG_DMAMAP;
ff2aeb1e 4709 qc->sg = NULL;
1da177e4
LT
4710}
4711
4712/**
4713 * ata_fill_sg - Fill PCI IDE PRD table
4714 * @qc: Metadata associated with taskfile to be transferred
4715 *
780a87f7
JG
4716 * Fill PCI IDE PRD (scatter-gather) table with segments
4717 * associated with the current disk command.
4718 *
1da177e4 4719 * LOCKING:
cca3974e 4720 * spin_lock_irqsave(host lock)
1da177e4
LT
4721 *
4722 */
4723static void ata_fill_sg(struct ata_queued_cmd *qc)
4724{
1da177e4 4725 struct ata_port *ap = qc->ap;
cedc9a47 4726 struct scatterlist *sg;
ff2aeb1e 4727 unsigned int si, pi;
1da177e4 4728
ff2aeb1e
TH
4729 pi = 0;
4730 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1da177e4
LT
4731 u32 addr, offset;
4732 u32 sg_len, len;
4733
4734 /* determine if physical DMA addr spans 64K boundary.
4735 * Note h/w doesn't support 64-bit, so we unconditionally
4736 * truncate dma_addr_t to u32.
4737 */
4738 addr = (u32) sg_dma_address(sg);
4739 sg_len = sg_dma_len(sg);
4740
4741 while (sg_len) {
4742 offset = addr & 0xffff;
4743 len = sg_len;
4744 if ((offset + sg_len) > 0x10000)
4745 len = 0x10000 - offset;
4746
ff2aeb1e
TH
4747 ap->prd[pi].addr = cpu_to_le32(addr);
4748 ap->prd[pi].flags_len = cpu_to_le32(len & 0xffff);
4749 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
1da177e4 4750
ff2aeb1e 4751 pi++;
1da177e4
LT
4752 sg_len -= len;
4753 addr += len;
4754 }
4755 }
4756
ff2aeb1e 4757 ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
1da177e4 4758}
b9a4197e 4759
d26fc955
AC
4760/**
4761 * ata_fill_sg_dumb - Fill PCI IDE PRD table
4762 * @qc: Metadata associated with taskfile to be transferred
4763 *
4764 * Fill PCI IDE PRD (scatter-gather) table with segments
4765 * associated with the current disk command. Perform the fill
4766 * so that we avoid writing any length 64K records for
4767 * controllers that don't follow the spec.
4768 *
4769 * LOCKING:
4770 * spin_lock_irqsave(host lock)
4771 *
4772 */
4773static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4774{
4775 struct ata_port *ap = qc->ap;
4776 struct scatterlist *sg;
ff2aeb1e 4777 unsigned int si, pi;
d26fc955 4778
ff2aeb1e
TH
4779 pi = 0;
4780 for_each_sg(qc->sg, sg, qc->n_elem, si) {
d26fc955
AC
4781 u32 addr, offset;
4782 u32 sg_len, len, blen;
4783
2dcb407e 4784 /* determine if physical DMA addr spans 64K boundary.
d26fc955
AC
4785 * Note h/w doesn't support 64-bit, so we unconditionally
4786 * truncate dma_addr_t to u32.
4787 */
4788 addr = (u32) sg_dma_address(sg);
4789 sg_len = sg_dma_len(sg);
4790
4791 while (sg_len) {
4792 offset = addr & 0xffff;
4793 len = sg_len;
4794 if ((offset + sg_len) > 0x10000)
4795 len = 0x10000 - offset;
4796
4797 blen = len & 0xffff;
ff2aeb1e 4798 ap->prd[pi].addr = cpu_to_le32(addr);
d26fc955
AC
4799 if (blen == 0) {
4800 /* Some PATA chipsets like the CS5530 can't
4801 cope with 0x0000 meaning 64K as the spec says */
ff2aeb1e 4802 ap->prd[pi].flags_len = cpu_to_le32(0x8000);
d26fc955 4803 blen = 0x8000;
ff2aeb1e 4804 ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000);
d26fc955 4805 }
ff2aeb1e
TH
4806 ap->prd[pi].flags_len = cpu_to_le32(blen);
4807 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
d26fc955 4808
ff2aeb1e 4809 pi++;
d26fc955
AC
4810 sg_len -= len;
4811 addr += len;
4812 }
4813 }
4814
ff2aeb1e 4815 ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
d26fc955
AC
4816}
4817
1da177e4
LT
4818/**
4819 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4820 * @qc: Metadata associated with taskfile to check
4821 *
780a87f7
JG
4822 * Allow low-level driver to filter ATA PACKET commands, returning
4823 * a status indicating whether or not it is OK to use DMA for the
4824 * supplied PACKET command.
4825 *
1da177e4 4826 * LOCKING:
cca3974e 4827 * spin_lock_irqsave(host lock)
0cba632b 4828 *
1da177e4
LT
4829 * RETURNS: 0 when ATAPI DMA can be used
4830 * nonzero otherwise
4831 */
4832int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4833{
4834 struct ata_port *ap = qc->ap;
b9a4197e
TH
4835
4836 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4837 * few ATAPI devices choke on such DMA requests.
4838 */
4839 if (unlikely(qc->nbytes & 15))
4840 return 1;
6f23a31d 4841
1da177e4 4842 if (ap->ops->check_atapi_dma)
b9a4197e 4843 return ap->ops->check_atapi_dma(qc);
1da177e4 4844
b9a4197e 4845 return 0;
1da177e4 4846}
b9a4197e 4847
31cc23b3
TH
4848/**
4849 * ata_std_qc_defer - Check whether a qc needs to be deferred
4850 * @qc: ATA command in question
4851 *
4852 * Non-NCQ commands cannot run with any other command, NCQ or
4853 * not. As upper layer only knows the queue depth, we are
4854 * responsible for maintaining exclusion. This function checks
4855 * whether a new command @qc can be issued.
4856 *
4857 * LOCKING:
4858 * spin_lock_irqsave(host lock)
4859 *
4860 * RETURNS:
4861 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4862 */
4863int ata_std_qc_defer(struct ata_queued_cmd *qc)
4864{
4865 struct ata_link *link = qc->dev->link;
4866
4867 if (qc->tf.protocol == ATA_PROT_NCQ) {
4868 if (!ata_tag_valid(link->active_tag))
4869 return 0;
4870 } else {
4871 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4872 return 0;
4873 }
4874
4875 return ATA_DEFER_LINK;
4876}
4877
1da177e4
LT
4878/**
4879 * ata_qc_prep - Prepare taskfile for submission
4880 * @qc: Metadata associated with taskfile to be prepared
4881 *
780a87f7
JG
4882 * Prepare ATA taskfile for submission.
4883 *
1da177e4 4884 * LOCKING:
cca3974e 4885 * spin_lock_irqsave(host lock)
1da177e4
LT
4886 */
4887void ata_qc_prep(struct ata_queued_cmd *qc)
4888{
4889 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4890 return;
4891
4892 ata_fill_sg(qc);
4893}
4894
d26fc955
AC
4895/**
4896 * ata_dumb_qc_prep - Prepare taskfile for submission
4897 * @qc: Metadata associated with taskfile to be prepared
4898 *
4899 * Prepare ATA taskfile for submission.
4900 *
4901 * LOCKING:
4902 * spin_lock_irqsave(host lock)
4903 */
4904void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4905{
4906 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4907 return;
4908
4909 ata_fill_sg_dumb(qc);
4910}
4911
e46834cd
BK
4912void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4913
0cba632b
JG
4914/**
4915 * ata_sg_init - Associate command with scatter-gather table.
4916 * @qc: Command to be associated
4917 * @sg: Scatter-gather table.
4918 * @n_elem: Number of elements in s/g table.
4919 *
4920 * Initialize the data-related elements of queued_cmd @qc
4921 * to point to a scatter-gather table @sg, containing @n_elem
4922 * elements.
4923 *
4924 * LOCKING:
cca3974e 4925 * spin_lock_irqsave(host lock)
0cba632b 4926 */
1da177e4
LT
4927void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4928 unsigned int n_elem)
4929{
ff2aeb1e 4930 qc->sg = sg;
1da177e4 4931 qc->n_elem = n_elem;
ff2aeb1e 4932 qc->cursg = qc->sg;
1da177e4
LT
4933}
4934
ff2aeb1e
TH
4935/**
4936 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4937 * @qc: Command with scatter-gather table to be mapped.
4938 *
4939 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4940 *
4941 * LOCKING:
4942 * spin_lock_irqsave(host lock)
4943 *
4944 * RETURNS:
4945 * Zero on success, negative on error.
4946 *
4947 */
4948static int ata_sg_setup(struct ata_queued_cmd *qc)
4949{
4950 struct ata_port *ap = qc->ap;
dde20207 4951 unsigned int n_elem;
ff2aeb1e
TH
4952
4953 VPRINTK("ENTER, ata%u\n", ap->print_id);
4954
dde20207
JB
4955 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4956 if (n_elem < 1)
4957 return -1;
ff2aeb1e 4958
dde20207 4959 DPRINTK("%d sg elements mapped\n", n_elem);
1da177e4 4960
dde20207 4961 qc->n_elem = n_elem;
f92a2636 4962 qc->flags |= ATA_QCFLAG_DMAMAP;
1da177e4
LT
4963
4964 return 0;
4965}
4966
0baab86b 4967/**
c893a3ae 4968 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
4969 * @buf: Buffer to swap
4970 * @buf_words: Number of 16-bit words in buffer.
4971 *
4972 * Swap halves of 16-bit words if needed to convert from
4973 * little-endian byte order to native cpu byte order, or
4974 * vice-versa.
4975 *
4976 * LOCKING:
6f0ef4fa 4977 * Inherited from caller.
0baab86b 4978 */
1da177e4
LT
4979void swap_buf_le16(u16 *buf, unsigned int buf_words)
4980{
4981#ifdef __BIG_ENDIAN
4982 unsigned int i;
4983
4984 for (i = 0; i < buf_words; i++)
4985 buf[i] = le16_to_cpu(buf[i]);
4986#endif /* __BIG_ENDIAN */
4987}
4988
6ae4cfb5 4989/**
0d5ff566 4990 * ata_data_xfer - Transfer data by PIO
55dba312 4991 * @dev: device to target
6ae4cfb5
AL
4992 * @buf: data buffer
4993 * @buflen: buffer length
0affa456 4994 * @rw: read/write
6ae4cfb5
AL
4995 *
4996 * Transfer data from/to the device data register by PIO.
4997 *
4998 * LOCKING:
4999 * Inherited from caller.
55dba312
TH
5000 *
5001 * RETURNS:
5002 * Bytes consumed.
6ae4cfb5 5003 */
55dba312
TH
5004unsigned int ata_data_xfer(struct ata_device *dev, unsigned char *buf,
5005 unsigned int buflen, int rw)
1da177e4 5006{
55dba312
TH
5007 struct ata_port *ap = dev->link->ap;
5008 void __iomem *data_addr = ap->ioaddr.data_addr;
6ae4cfb5 5009 unsigned int words = buflen >> 1;
1da177e4 5010
6ae4cfb5 5011 /* Transfer multiple of 2 bytes */
55dba312
TH
5012 if (rw == READ)
5013 ioread16_rep(data_addr, buf, words);
1da177e4 5014 else
55dba312 5015 iowrite16_rep(data_addr, buf, words);
6ae4cfb5
AL
5016
5017 /* Transfer trailing 1 byte, if any. */
5018 if (unlikely(buflen & 0x01)) {
4ca4e439 5019 __le16 align_buf[1] = { 0 };
6ae4cfb5
AL
5020 unsigned char *trailing_buf = buf + buflen - 1;
5021
55dba312
TH
5022 if (rw == READ) {
5023 align_buf[0] = cpu_to_le16(ioread16(data_addr));
6ae4cfb5 5024 memcpy(trailing_buf, align_buf, 1);
55dba312
TH
5025 } else {
5026 memcpy(align_buf, trailing_buf, 1);
5027 iowrite16(le16_to_cpu(align_buf[0]), data_addr);
6ae4cfb5 5028 }
55dba312 5029 words++;
6ae4cfb5 5030 }
55dba312
TH
5031
5032 return words << 1;
1da177e4
LT
5033}
5034
75e99585 5035/**
0d5ff566 5036 * ata_data_xfer_noirq - Transfer data by PIO
55dba312 5037 * @dev: device to target
75e99585
AC
5038 * @buf: data buffer
5039 * @buflen: buffer length
0affa456 5040 * @rw: read/write
75e99585 5041 *
88574551 5042 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
5043 * transfer with interrupts disabled.
5044 *
5045 * LOCKING:
5046 * Inherited from caller.
55dba312
TH
5047 *
5048 * RETURNS:
5049 * Bytes consumed.
75e99585 5050 */
55dba312
TH
5051unsigned int ata_data_xfer_noirq(struct ata_device *dev, unsigned char *buf,
5052 unsigned int buflen, int rw)
75e99585
AC
5053{
5054 unsigned long flags;
55dba312
TH
5055 unsigned int consumed;
5056
75e99585 5057 local_irq_save(flags);
55dba312 5058 consumed = ata_data_xfer(dev, buf, buflen, rw);
75e99585 5059 local_irq_restore(flags);
55dba312
TH
5060
5061 return consumed;
75e99585
AC
5062}
5063
5064
6ae4cfb5 5065/**
5a5dbd18 5066 * ata_pio_sector - Transfer a sector of data.
6ae4cfb5
AL
5067 * @qc: Command on going
5068 *
5a5dbd18 5069 * Transfer qc->sect_size bytes of data from/to the ATA device.
6ae4cfb5
AL
5070 *
5071 * LOCKING:
5072 * Inherited from caller.
5073 */
5074
1da177e4
LT
5075static void ata_pio_sector(struct ata_queued_cmd *qc)
5076{
5077 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
1da177e4
LT
5078 struct ata_port *ap = qc->ap;
5079 struct page *page;
5080 unsigned int offset;
5081 unsigned char *buf;
5082
5a5dbd18 5083 if (qc->curbytes == qc->nbytes - qc->sect_size)
14be71f4 5084 ap->hsm_task_state = HSM_ST_LAST;
1da177e4 5085
45711f1a 5086 page = sg_page(qc->cursg);
87260216 5087 offset = qc->cursg->offset + qc->cursg_ofs;
1da177e4
LT
5088
5089 /* get the current page and offset */
5090 page = nth_page(page, (offset >> PAGE_SHIFT));
5091 offset %= PAGE_SIZE;
5092
1da177e4
LT
5093 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5094
91b8b313
AL
5095 if (PageHighMem(page)) {
5096 unsigned long flags;
5097
a6b2c5d4 5098 /* FIXME: use a bounce buffer */
91b8b313
AL
5099 local_irq_save(flags);
5100 buf = kmap_atomic(page, KM_IRQ0);
083958d3 5101
91b8b313 5102 /* do the actual data transfer */
5a5dbd18 5103 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
1da177e4 5104
91b8b313
AL
5105 kunmap_atomic(buf, KM_IRQ0);
5106 local_irq_restore(flags);
5107 } else {
5108 buf = page_address(page);
5a5dbd18 5109 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
91b8b313 5110 }
1da177e4 5111
5a5dbd18
ML
5112 qc->curbytes += qc->sect_size;
5113 qc->cursg_ofs += qc->sect_size;
1da177e4 5114
87260216
JA
5115 if (qc->cursg_ofs == qc->cursg->length) {
5116 qc->cursg = sg_next(qc->cursg);
1da177e4
LT
5117 qc->cursg_ofs = 0;
5118 }
1da177e4 5119}
1da177e4 5120
07f6f7d0 5121/**
5a5dbd18 5122 * ata_pio_sectors - Transfer one or many sectors.
07f6f7d0
AL
5123 * @qc: Command on going
5124 *
5a5dbd18 5125 * Transfer one or many sectors of data from/to the
07f6f7d0
AL
5126 * ATA device for the DRQ request.
5127 *
5128 * LOCKING:
5129 * Inherited from caller.
5130 */
1da177e4 5131
07f6f7d0
AL
5132static void ata_pio_sectors(struct ata_queued_cmd *qc)
5133{
5134 if (is_multi_taskfile(&qc->tf)) {
5135 /* READ/WRITE MULTIPLE */
5136 unsigned int nsect;
5137
587005de 5138 WARN_ON(qc->dev->multi_count == 0);
1da177e4 5139
5a5dbd18 5140 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
726f0785 5141 qc->dev->multi_count);
07f6f7d0
AL
5142 while (nsect--)
5143 ata_pio_sector(qc);
5144 } else
5145 ata_pio_sector(qc);
4cc980b3
AL
5146
5147 ata_altstatus(qc->ap); /* flush */
07f6f7d0
AL
5148}
5149
c71c1857
AL
5150/**
5151 * atapi_send_cdb - Write CDB bytes to hardware
5152 * @ap: Port to which ATAPI device is attached.
5153 * @qc: Taskfile currently active
5154 *
5155 * When device has indicated its readiness to accept
5156 * a CDB, this function is called. Send the CDB.
5157 *
5158 * LOCKING:
5159 * caller.
5160 */
5161
5162static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
5163{
5164 /* send SCSI cdb */
5165 DPRINTK("send cdb\n");
db024d53 5166 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 5167
a6b2c5d4 5168 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
5169 ata_altstatus(ap); /* flush */
5170
5171 switch (qc->tf.protocol) {
0dc36888 5172 case ATAPI_PROT_PIO:
c71c1857
AL
5173 ap->hsm_task_state = HSM_ST;
5174 break;
0dc36888 5175 case ATAPI_PROT_NODATA:
c71c1857
AL
5176 ap->hsm_task_state = HSM_ST_LAST;
5177 break;
0dc36888 5178 case ATAPI_PROT_DMA:
c71c1857
AL
5179 ap->hsm_task_state = HSM_ST_LAST;
5180 /* initiate bmdma */
5181 ap->ops->bmdma_start(qc);
5182 break;
5183 }
1da177e4
LT
5184}
5185
6ae4cfb5
AL
5186/**
5187 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
5188 * @qc: Command on going
5189 * @bytes: number of bytes
5190 *
5191 * Transfer Transfer data from/to the ATAPI device.
5192 *
5193 * LOCKING:
5194 * Inherited from caller.
5195 *
5196 */
140b5e59 5197static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
1da177e4 5198{
56c819df 5199 int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ;
1da177e4 5200 struct ata_port *ap = qc->ap;
56c819df
TH
5201 struct ata_device *dev = qc->dev;
5202 struct ata_eh_info *ehi = &dev->link->eh_info;
140b5e59 5203 struct scatterlist *sg;
1da177e4
LT
5204 struct page *page;
5205 unsigned char *buf;
56c819df 5206 unsigned int offset, count, consumed;
1da177e4
LT
5207
5208next_sg:
140b5e59
TH
5209 sg = qc->cursg;
5210 if (unlikely(!sg)) {
fa2fc7f4
JB
5211 ata_ehi_push_desc(ehi, "unexpected or too much trailing data "
5212 "buf=%u cur=%u bytes=%u",
5213 qc->nbytes, qc->curbytes, bytes);
5214 return -1;
140b5e59 5215 }
1da177e4 5216
45711f1a 5217 page = sg_page(sg);
1da177e4
LT
5218 offset = sg->offset + qc->cursg_ofs;
5219
5220 /* get the current page and offset */
5221 page = nth_page(page, (offset >> PAGE_SHIFT));
5222 offset %= PAGE_SIZE;
5223
6952df03 5224 /* don't overrun current sg */
32529e01 5225 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
5226
5227 /* don't cross page boundaries */
5228 count = min(count, (unsigned int)PAGE_SIZE - offset);
5229
7282aa4b
AL
5230 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5231
91b8b313
AL
5232 if (PageHighMem(page)) {
5233 unsigned long flags;
5234
a6b2c5d4 5235 /* FIXME: use bounce buffer */
91b8b313
AL
5236 local_irq_save(flags);
5237 buf = kmap_atomic(page, KM_IRQ0);
083958d3 5238
91b8b313 5239 /* do the actual data transfer */
56c819df 5240 consumed = ap->ops->data_xfer(dev, buf + offset, count, rw);
7282aa4b 5241
91b8b313
AL
5242 kunmap_atomic(buf, KM_IRQ0);
5243 local_irq_restore(flags);
5244 } else {
5245 buf = page_address(page);
56c819df 5246 consumed = ap->ops->data_xfer(dev, buf + offset, count, rw);
91b8b313 5247 }
1da177e4 5248
56c819df 5249 bytes -= min(bytes, consumed);
1da177e4
LT
5250 qc->curbytes += count;
5251 qc->cursg_ofs += count;
5252
32529e01 5253 if (qc->cursg_ofs == sg->length) {
87260216 5254 qc->cursg = sg_next(qc->cursg);
1da177e4
LT
5255 qc->cursg_ofs = 0;
5256 }
5257
56c819df
TH
5258 /* consumed can be larger than count only for the last transfer */
5259 WARN_ON(qc->cursg && count != consumed);
5260
563a6e1f 5261 if (bytes)
1da177e4 5262 goto next_sg;
140b5e59 5263 return 0;
1da177e4
LT
5264}
5265
6ae4cfb5
AL
5266/**
5267 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
5268 * @qc: Command on going
5269 *
5270 * Transfer Transfer data from/to the ATAPI device.
5271 *
5272 * LOCKING:
5273 * Inherited from caller.
6ae4cfb5
AL
5274 */
5275
1da177e4
LT
5276static void atapi_pio_bytes(struct ata_queued_cmd *qc)
5277{
5278 struct ata_port *ap = qc->ap;
5279 struct ata_device *dev = qc->dev;
56c819df 5280 struct ata_eh_info *ehi = &dev->link->eh_info;
1da177e4
LT
5281 unsigned int ireason, bc_lo, bc_hi, bytes;
5282 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
5283
eec4c3f3
AL
5284 /* Abuse qc->result_tf for temp storage of intermediate TF
5285 * here to save some kernel stack usage.
5286 * For normal completion, qc->result_tf is not relevant. For
5287 * error, qc->result_tf is later overwritten by ata_qc_complete().
5288 * So, the correctness of qc->result_tf is not affected.
5289 */
5290 ap->ops->tf_read(ap, &qc->result_tf);
5291 ireason = qc->result_tf.nsect;
5292 bc_lo = qc->result_tf.lbam;
5293 bc_hi = qc->result_tf.lbah;
1da177e4
LT
5294 bytes = (bc_hi << 8) | bc_lo;
5295
5296 /* shall be cleared to zero, indicating xfer of data */
0106372d 5297 if (unlikely(ireason & (1 << 0)))
56c819df 5298 goto atapi_check;
1da177e4
LT
5299
5300 /* make sure transfer direction matches expected */
5301 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
0106372d 5302 if (unlikely(do_write != i_write))
56c819df 5303 goto atapi_check;
0106372d
AL
5304
5305 if (unlikely(!bytes))
56c819df 5306 goto atapi_check;
1da177e4 5307
44877b4e 5308 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
312f7da2 5309
56c819df 5310 if (unlikely(__atapi_pio_bytes(qc, bytes)))
140b5e59 5311 goto err_out;
4cc980b3 5312 ata_altstatus(ap); /* flush */
1da177e4
LT
5313
5314 return;
5315
56c819df
TH
5316 atapi_check:
5317 ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)",
5318 ireason, bytes);
5319 err_out:
11a56d24 5320 qc->err_mask |= AC_ERR_HSM;
14be71f4 5321 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
5322}
5323
5324/**
c234fb00
AL
5325 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
5326 * @ap: the target ata_port
5327 * @qc: qc on going
1da177e4 5328 *
c234fb00
AL
5329 * RETURNS:
5330 * 1 if ok in workqueue, 0 otherwise.
1da177e4 5331 */
c234fb00
AL
5332
5333static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 5334{
c234fb00
AL
5335 if (qc->tf.flags & ATA_TFLAG_POLLING)
5336 return 1;
1da177e4 5337
c234fb00
AL
5338 if (ap->hsm_task_state == HSM_ST_FIRST) {
5339 if (qc->tf.protocol == ATA_PROT_PIO &&
5340 (qc->tf.flags & ATA_TFLAG_WRITE))
5341 return 1;
1da177e4 5342
405e66b3 5343 if (ata_is_atapi(qc->tf.protocol) &&
c234fb00
AL
5344 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5345 return 1;
fe79e683
AL
5346 }
5347
c234fb00
AL
5348 return 0;
5349}
1da177e4 5350
c17ea20d
TH
5351/**
5352 * ata_hsm_qc_complete - finish a qc running on standard HSM
5353 * @qc: Command to complete
5354 * @in_wq: 1 if called from workqueue, 0 otherwise
5355 *
5356 * Finish @qc which is running on standard HSM.
5357 *
5358 * LOCKING:
cca3974e 5359 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
5360 * Otherwise, none on entry and grabs host lock.
5361 */
5362static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
5363{
5364 struct ata_port *ap = qc->ap;
5365 unsigned long flags;
5366
5367 if (ap->ops->error_handler) {
5368 if (in_wq) {
ba6a1308 5369 spin_lock_irqsave(ap->lock, flags);
c17ea20d 5370
cca3974e
JG
5371 /* EH might have kicked in while host lock is
5372 * released.
c17ea20d
TH
5373 */
5374 qc = ata_qc_from_tag(ap, qc->tag);
5375 if (qc) {
5376 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
83625006 5377 ap->ops->irq_on(ap);
c17ea20d
TH
5378 ata_qc_complete(qc);
5379 } else
5380 ata_port_freeze(ap);
5381 }
5382
ba6a1308 5383 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
5384 } else {
5385 if (likely(!(qc->err_mask & AC_ERR_HSM)))
5386 ata_qc_complete(qc);
5387 else
5388 ata_port_freeze(ap);
5389 }
5390 } else {
5391 if (in_wq) {
ba6a1308 5392 spin_lock_irqsave(ap->lock, flags);
83625006 5393 ap->ops->irq_on(ap);
c17ea20d 5394 ata_qc_complete(qc);
ba6a1308 5395 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
5396 } else
5397 ata_qc_complete(qc);
5398 }
5399}
5400
bb5cb290
AL
5401/**
5402 * ata_hsm_move - move the HSM to the next state.
5403 * @ap: the target ata_port
5404 * @qc: qc on going
5405 * @status: current device status
5406 * @in_wq: 1 if called from workqueue, 0 otherwise
5407 *
5408 * RETURNS:
5409 * 1 when poll next status needed, 0 otherwise.
5410 */
9a1004d0
TH
5411int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
5412 u8 status, int in_wq)
e2cec771 5413{
bb5cb290
AL
5414 unsigned long flags = 0;
5415 int poll_next;
5416
6912ccd5
AL
5417 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
5418
bb5cb290
AL
5419 /* Make sure ata_qc_issue_prot() does not throw things
5420 * like DMA polling into the workqueue. Notice that
5421 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
5422 */
c234fb00 5423 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 5424
e2cec771 5425fsm_start:
999bb6f4 5426 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
44877b4e 5427 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
999bb6f4 5428
e2cec771
AL
5429 switch (ap->hsm_task_state) {
5430 case HSM_ST_FIRST:
bb5cb290
AL
5431 /* Send first data block or PACKET CDB */
5432
5433 /* If polling, we will stay in the work queue after
5434 * sending the data. Otherwise, interrupt handler
5435 * takes over after sending the data.
5436 */
5437 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
5438
e2cec771 5439 /* check device status */
3655d1d3
AL
5440 if (unlikely((status & ATA_DRQ) == 0)) {
5441 /* handle BSY=0, DRQ=0 as error */
5442 if (likely(status & (ATA_ERR | ATA_DF)))
5443 /* device stops HSM for abort/error */
5444 qc->err_mask |= AC_ERR_DEV;
5445 else
5446 /* HSM violation. Let EH handle this */
5447 qc->err_mask |= AC_ERR_HSM;
5448
14be71f4 5449 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 5450 goto fsm_start;
1da177e4
LT
5451 }
5452
71601958
AL
5453 /* Device should not ask for data transfer (DRQ=1)
5454 * when it finds something wrong.
eee6c32f
AL
5455 * We ignore DRQ here and stop the HSM by
5456 * changing hsm_task_state to HSM_ST_ERR and
5457 * let the EH abort the command or reset the device.
71601958
AL
5458 */
5459 if (unlikely(status & (ATA_ERR | ATA_DF))) {
2d3b8eea
AL
5460 /* Some ATAPI tape drives forget to clear the ERR bit
5461 * when doing the next command (mostly request sense).
5462 * We ignore ERR here to workaround and proceed sending
5463 * the CDB.
5464 */
5465 if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
5466 ata_port_printk(ap, KERN_WARNING,
5467 "DRQ=1 with device error, "
5468 "dev_stat 0x%X\n", status);
5469 qc->err_mask |= AC_ERR_HSM;
5470 ap->hsm_task_state = HSM_ST_ERR;
5471 goto fsm_start;
5472 }
71601958 5473 }
1da177e4 5474
bb5cb290
AL
5475 /* Send the CDB (atapi) or the first data block (ata pio out).
5476 * During the state transition, interrupt handler shouldn't
5477 * be invoked before the data transfer is complete and
5478 * hsm_task_state is changed. Hence, the following locking.
5479 */
5480 if (in_wq)
ba6a1308 5481 spin_lock_irqsave(ap->lock, flags);
1da177e4 5482
bb5cb290
AL
5483 if (qc->tf.protocol == ATA_PROT_PIO) {
5484 /* PIO data out protocol.
5485 * send first data block.
5486 */
0565c26d 5487
bb5cb290
AL
5488 /* ata_pio_sectors() might change the state
5489 * to HSM_ST_LAST. so, the state is changed here
5490 * before ata_pio_sectors().
5491 */
5492 ap->hsm_task_state = HSM_ST;
5493 ata_pio_sectors(qc);
bb5cb290
AL
5494 } else
5495 /* send CDB */
5496 atapi_send_cdb(ap, qc);
5497
5498 if (in_wq)
ba6a1308 5499 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
5500
5501 /* if polling, ata_pio_task() handles the rest.
5502 * otherwise, interrupt handler takes over from here.
5503 */
e2cec771 5504 break;
1c848984 5505
e2cec771
AL
5506 case HSM_ST:
5507 /* complete command or read/write the data register */
0dc36888 5508 if (qc->tf.protocol == ATAPI_PROT_PIO) {
e2cec771
AL
5509 /* ATAPI PIO protocol */
5510 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
5511 /* No more data to transfer or device error.
5512 * Device error will be tagged in HSM_ST_LAST.
5513 */
e2cec771
AL
5514 ap->hsm_task_state = HSM_ST_LAST;
5515 goto fsm_start;
5516 }
1da177e4 5517
71601958
AL
5518 /* Device should not ask for data transfer (DRQ=1)
5519 * when it finds something wrong.
eee6c32f
AL
5520 * We ignore DRQ here and stop the HSM by
5521 * changing hsm_task_state to HSM_ST_ERR and
5522 * let the EH abort the command or reset the device.
71601958
AL
5523 */
5524 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
5525 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
5526 "device error, dev_stat 0x%X\n",
5527 status);
3655d1d3 5528 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
5529 ap->hsm_task_state = HSM_ST_ERR;
5530 goto fsm_start;
71601958 5531 }
1da177e4 5532
e2cec771 5533 atapi_pio_bytes(qc);
7fb6ec28 5534
e2cec771
AL
5535 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
5536 /* bad ireason reported by device */
5537 goto fsm_start;
1da177e4 5538
e2cec771
AL
5539 } else {
5540 /* ATA PIO protocol */
5541 if (unlikely((status & ATA_DRQ) == 0)) {
5542 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
5543 if (likely(status & (ATA_ERR | ATA_DF)))
5544 /* device stops HSM for abort/error */
5545 qc->err_mask |= AC_ERR_DEV;
5546 else
55a8e2c8
TH
5547 /* HSM violation. Let EH handle this.
5548 * Phantom devices also trigger this
5549 * condition. Mark hint.
5550 */
5551 qc->err_mask |= AC_ERR_HSM |
5552 AC_ERR_NODEV_HINT;
3655d1d3 5553
e2cec771
AL
5554 ap->hsm_task_state = HSM_ST_ERR;
5555 goto fsm_start;
5556 }
1da177e4 5557
eee6c32f
AL
5558 /* For PIO reads, some devices may ask for
5559 * data transfer (DRQ=1) alone with ERR=1.
5560 * We respect DRQ here and transfer one
5561 * block of junk data before changing the
5562 * hsm_task_state to HSM_ST_ERR.
5563 *
5564 * For PIO writes, ERR=1 DRQ=1 doesn't make
5565 * sense since the data block has been
5566 * transferred to the device.
71601958
AL
5567 */
5568 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
5569 /* data might be corrputed */
5570 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
5571
5572 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5573 ata_pio_sectors(qc);
eee6c32f
AL
5574 status = ata_wait_idle(ap);
5575 }
5576
3655d1d3
AL
5577 if (status & (ATA_BUSY | ATA_DRQ))
5578 qc->err_mask |= AC_ERR_HSM;
5579
eee6c32f
AL
5580 /* ata_pio_sectors() might change the
5581 * state to HSM_ST_LAST. so, the state
5582 * is changed after ata_pio_sectors().
5583 */
5584 ap->hsm_task_state = HSM_ST_ERR;
5585 goto fsm_start;
71601958
AL
5586 }
5587
e2cec771
AL
5588 ata_pio_sectors(qc);
5589
5590 if (ap->hsm_task_state == HSM_ST_LAST &&
5591 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5592 /* all data read */
52a32205 5593 status = ata_wait_idle(ap);
e2cec771
AL
5594 goto fsm_start;
5595 }
5596 }
5597
bb5cb290 5598 poll_next = 1;
1da177e4
LT
5599 break;
5600
14be71f4 5601 case HSM_ST_LAST:
6912ccd5
AL
5602 if (unlikely(!ata_ok(status))) {
5603 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
5604 ap->hsm_task_state = HSM_ST_ERR;
5605 goto fsm_start;
5606 }
5607
5608 /* no more data to transfer */
4332a771 5609 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
44877b4e 5610 ap->print_id, qc->dev->devno, status);
e2cec771 5611
6912ccd5
AL
5612 WARN_ON(qc->err_mask);
5613
e2cec771 5614 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 5615
e2cec771 5616 /* complete taskfile transaction */
c17ea20d 5617 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5618
5619 poll_next = 0;
1da177e4
LT
5620 break;
5621
14be71f4 5622 case HSM_ST_ERR:
e2cec771
AL
5623 /* make sure qc->err_mask is available to
5624 * know what's wrong and recover
5625 */
5626 WARN_ON(qc->err_mask == 0);
5627
5628 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 5629
999bb6f4 5630 /* complete taskfile transaction */
c17ea20d 5631 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5632
5633 poll_next = 0;
e2cec771
AL
5634 break;
5635 default:
bb5cb290 5636 poll_next = 0;
6912ccd5 5637 BUG();
1da177e4
LT
5638 }
5639
bb5cb290 5640 return poll_next;
1da177e4
LT
5641}
5642
65f27f38 5643static void ata_pio_task(struct work_struct *work)
8061f5f0 5644{
65f27f38
DH
5645 struct ata_port *ap =
5646 container_of(work, struct ata_port, port_task.work);
5647 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 5648 u8 status;
a1af3734 5649 int poll_next;
8061f5f0 5650
7fb6ec28 5651fsm_start:
a1af3734 5652 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 5653
a1af3734
AL
5654 /*
5655 * This is purely heuristic. This is a fast path.
5656 * Sometimes when we enter, BSY will be cleared in
5657 * a chk-status or two. If not, the drive is probably seeking
5658 * or something. Snooze for a couple msecs, then
5659 * chk-status again. If still busy, queue delayed work.
5660 */
5661 status = ata_busy_wait(ap, ATA_BUSY, 5);
5662 if (status & ATA_BUSY) {
5663 msleep(2);
5664 status = ata_busy_wait(ap, ATA_BUSY, 10);
5665 if (status & ATA_BUSY) {
442eacc3 5666 ata_pio_queue_task(ap, qc, ATA_SHORT_PAUSE);
a1af3734
AL
5667 return;
5668 }
8061f5f0
TH
5669 }
5670
a1af3734
AL
5671 /* move the HSM */
5672 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 5673
a1af3734
AL
5674 /* another command or interrupt handler
5675 * may be running at this point.
5676 */
5677 if (poll_next)
7fb6ec28 5678 goto fsm_start;
8061f5f0
TH
5679}
5680
1da177e4
LT
5681/**
5682 * ata_qc_new - Request an available ATA command, for queueing
5683 * @ap: Port associated with device @dev
5684 * @dev: Device from whom we request an available command structure
5685 *
5686 * LOCKING:
0cba632b 5687 * None.
1da177e4
LT
5688 */
5689
5690static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5691{
5692 struct ata_queued_cmd *qc = NULL;
5693 unsigned int i;
5694
e3180499 5695 /* no command while frozen */
b51e9e5d 5696 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
5697 return NULL;
5698
2ab7db1f
TH
5699 /* the last tag is reserved for internal command. */
5700 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 5701 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 5702 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
5703 break;
5704 }
5705
5706 if (qc)
5707 qc->tag = i;
5708
5709 return qc;
5710}
5711
5712/**
5713 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
5714 * @dev: Device from whom we request an available command structure
5715 *
5716 * LOCKING:
0cba632b 5717 * None.
1da177e4
LT
5718 */
5719
3373efd8 5720struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 5721{
9af5c9c9 5722 struct ata_port *ap = dev->link->ap;
1da177e4
LT
5723 struct ata_queued_cmd *qc;
5724
5725 qc = ata_qc_new(ap);
5726 if (qc) {
1da177e4
LT
5727 qc->scsicmd = NULL;
5728 qc->ap = ap;
5729 qc->dev = dev;
1da177e4 5730
2c13b7ce 5731 ata_qc_reinit(qc);
1da177e4
LT
5732 }
5733
5734 return qc;
5735}
5736
1da177e4
LT
5737/**
5738 * ata_qc_free - free unused ata_queued_cmd
5739 * @qc: Command to complete
5740 *
5741 * Designed to free unused ata_queued_cmd object
5742 * in case something prevents using it.
5743 *
5744 * LOCKING:
cca3974e 5745 * spin_lock_irqsave(host lock)
1da177e4
LT
5746 */
5747void ata_qc_free(struct ata_queued_cmd *qc)
5748{
4ba946e9
TH
5749 struct ata_port *ap = qc->ap;
5750 unsigned int tag;
5751
a4631474 5752 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 5753
4ba946e9
TH
5754 qc->flags = 0;
5755 tag = qc->tag;
5756 if (likely(ata_tag_valid(tag))) {
4ba946e9 5757 qc->tag = ATA_TAG_POISON;
6cec4a39 5758 clear_bit(tag, &ap->qc_allocated);
4ba946e9 5759 }
1da177e4
LT
5760}
5761
76014427 5762void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 5763{
dedaf2b0 5764 struct ata_port *ap = qc->ap;
9af5c9c9 5765 struct ata_link *link = qc->dev->link;
dedaf2b0 5766
a4631474
TH
5767 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5768 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
5769
5770 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5771 ata_sg_clean(qc);
5772
7401abf2 5773 /* command should be marked inactive atomically with qc completion */
da917d69 5774 if (qc->tf.protocol == ATA_PROT_NCQ) {
9af5c9c9 5775 link->sactive &= ~(1 << qc->tag);
da917d69
TH
5776 if (!link->sactive)
5777 ap->nr_active_links--;
5778 } else {
9af5c9c9 5779 link->active_tag = ATA_TAG_POISON;
da917d69
TH
5780 ap->nr_active_links--;
5781 }
5782
5783 /* clear exclusive status */
5784 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5785 ap->excl_link == link))
5786 ap->excl_link = NULL;
7401abf2 5787
3f3791d3
AL
5788 /* atapi: mark qc as inactive to prevent the interrupt handler
5789 * from completing the command twice later, before the error handler
5790 * is called. (when rc != 0 and atapi request sense is needed)
5791 */
5792 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 5793 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 5794
1da177e4 5795 /* call completion callback */
77853bf2 5796 qc->complete_fn(qc);
1da177e4
LT
5797}
5798
39599a53
TH
5799static void fill_result_tf(struct ata_queued_cmd *qc)
5800{
5801 struct ata_port *ap = qc->ap;
5802
39599a53 5803 qc->result_tf.flags = qc->tf.flags;
4742d54f 5804 ap->ops->tf_read(ap, &qc->result_tf);
39599a53
TH
5805}
5806
00115e0f
TH
5807static void ata_verify_xfer(struct ata_queued_cmd *qc)
5808{
5809 struct ata_device *dev = qc->dev;
5810
5811 if (ata_tag_internal(qc->tag))
5812 return;
5813
5814 if (ata_is_nodata(qc->tf.protocol))
5815 return;
5816
5817 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
5818 return;
5819
5820 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
5821}
5822
f686bcb8
TH
5823/**
5824 * ata_qc_complete - Complete an active ATA command
5825 * @qc: Command to complete
5826 * @err_mask: ATA Status register contents
5827 *
5828 * Indicate to the mid and upper layers that an ATA
5829 * command has completed, with either an ok or not-ok status.
5830 *
5831 * LOCKING:
cca3974e 5832 * spin_lock_irqsave(host lock)
f686bcb8
TH
5833 */
5834void ata_qc_complete(struct ata_queued_cmd *qc)
5835{
5836 struct ata_port *ap = qc->ap;
5837
5838 /* XXX: New EH and old EH use different mechanisms to
5839 * synchronize EH with regular execution path.
5840 *
5841 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5842 * Normal execution path is responsible for not accessing a
5843 * failed qc. libata core enforces the rule by returning NULL
5844 * from ata_qc_from_tag() for failed qcs.
5845 *
5846 * Old EH depends on ata_qc_complete() nullifying completion
5847 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5848 * not synchronize with interrupt handler. Only PIO task is
5849 * taken care of.
5850 */
5851 if (ap->ops->error_handler) {
4dbfa39b
TH
5852 struct ata_device *dev = qc->dev;
5853 struct ata_eh_info *ehi = &dev->link->eh_info;
5854
b51e9e5d 5855 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
5856
5857 if (unlikely(qc->err_mask))
5858 qc->flags |= ATA_QCFLAG_FAILED;
5859
5860 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5861 if (!ata_tag_internal(qc->tag)) {
5862 /* always fill result TF for failed qc */
39599a53 5863 fill_result_tf(qc);
f686bcb8
TH
5864 ata_qc_schedule_eh(qc);
5865 return;
5866 }
5867 }
5868
5869 /* read result TF if requested */
5870 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5871 fill_result_tf(qc);
f686bcb8 5872
4dbfa39b
TH
5873 /* Some commands need post-processing after successful
5874 * completion.
5875 */
5876 switch (qc->tf.command) {
5877 case ATA_CMD_SET_FEATURES:
5878 if (qc->tf.feature != SETFEATURES_WC_ON &&
5879 qc->tf.feature != SETFEATURES_WC_OFF)
5880 break;
5881 /* fall through */
5882 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
5883 case ATA_CMD_SET_MULTI: /* multi_count changed */
5884 /* revalidate device */
5885 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
5886 ata_port_schedule_eh(ap);
5887 break;
054a5fba
TH
5888
5889 case ATA_CMD_SLEEP:
5890 dev->flags |= ATA_DFLAG_SLEEPING;
5891 break;
4dbfa39b
TH
5892 }
5893
00115e0f
TH
5894 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
5895 ata_verify_xfer(qc);
5896
f686bcb8
TH
5897 __ata_qc_complete(qc);
5898 } else {
5899 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5900 return;
5901
5902 /* read result TF if failed or requested */
5903 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5904 fill_result_tf(qc);
f686bcb8
TH
5905
5906 __ata_qc_complete(qc);
5907 }
5908}
5909
dedaf2b0
TH
5910/**
5911 * ata_qc_complete_multiple - Complete multiple qcs successfully
5912 * @ap: port in question
5913 * @qc_active: new qc_active mask
5914 * @finish_qc: LLDD callback invoked before completing a qc
5915 *
5916 * Complete in-flight commands. This functions is meant to be
5917 * called from low-level driver's interrupt routine to complete
5918 * requests normally. ap->qc_active and @qc_active is compared
5919 * and commands are completed accordingly.
5920 *
5921 * LOCKING:
cca3974e 5922 * spin_lock_irqsave(host lock)
dedaf2b0
TH
5923 *
5924 * RETURNS:
5925 * Number of completed commands on success, -errno otherwise.
5926 */
5927int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5928 void (*finish_qc)(struct ata_queued_cmd *))
5929{
5930 int nr_done = 0;
5931 u32 done_mask;
5932 int i;
5933
5934 done_mask = ap->qc_active ^ qc_active;
5935
5936 if (unlikely(done_mask & qc_active)) {
5937 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5938 "(%08x->%08x)\n", ap->qc_active, qc_active);
5939 return -EINVAL;
5940 }
5941
5942 for (i = 0; i < ATA_MAX_QUEUE; i++) {
5943 struct ata_queued_cmd *qc;
5944
5945 if (!(done_mask & (1 << i)))
5946 continue;
5947
5948 if ((qc = ata_qc_from_tag(ap, i))) {
5949 if (finish_qc)
5950 finish_qc(qc);
5951 ata_qc_complete(qc);
5952 nr_done++;
5953 }
5954 }
5955
5956 return nr_done;
5957}
5958
1da177e4
LT
5959/**
5960 * ata_qc_issue - issue taskfile to device
5961 * @qc: command to issue to device
5962 *
5963 * Prepare an ATA command to submission to device.
5964 * This includes mapping the data into a DMA-able
5965 * area, filling in the S/G table, and finally
5966 * writing the taskfile to hardware, starting the command.
5967 *
5968 * LOCKING:
cca3974e 5969 * spin_lock_irqsave(host lock)
1da177e4 5970 */
8e0e694a 5971void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
5972{
5973 struct ata_port *ap = qc->ap;
9af5c9c9 5974 struct ata_link *link = qc->dev->link;
405e66b3 5975 u8 prot = qc->tf.protocol;
1da177e4 5976
dedaf2b0
TH
5977 /* Make sure only one non-NCQ command is outstanding. The
5978 * check is skipped for old EH because it reuses active qc to
5979 * request ATAPI sense.
5980 */
9af5c9c9 5981 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
dedaf2b0 5982
1973a023 5983 if (ata_is_ncq(prot)) {
9af5c9c9 5984 WARN_ON(link->sactive & (1 << qc->tag));
da917d69
TH
5985
5986 if (!link->sactive)
5987 ap->nr_active_links++;
9af5c9c9 5988 link->sactive |= 1 << qc->tag;
dedaf2b0 5989 } else {
9af5c9c9 5990 WARN_ON(link->sactive);
da917d69
TH
5991
5992 ap->nr_active_links++;
9af5c9c9 5993 link->active_tag = qc->tag;
dedaf2b0
TH
5994 }
5995
e4a70e76 5996 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 5997 ap->qc_active |= 1 << qc->tag;
e4a70e76 5998
f92a2636
TH
5999 /* We guarantee to LLDs that they will have at least one
6000 * non-zero sg if the command is a data command.
6001 */
ff2aeb1e 6002 BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
f92a2636 6003
405e66b3 6004 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
f92a2636 6005 (ap->flags & ATA_FLAG_PIO_DMA)))
001102d7
TH
6006 if (ata_sg_setup(qc))
6007 goto sg_err;
1da177e4 6008
054a5fba
TH
6009 /* if device is sleeping, schedule softreset and abort the link */
6010 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
6011 link->eh_info.action |= ATA_EH_SOFTRESET;
6012 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
6013 ata_link_abort(link);
6014 return;
6015 }
6016
1da177e4
LT
6017 ap->ops->qc_prep(qc);
6018
8e0e694a
TH
6019 qc->err_mask |= ap->ops->qc_issue(qc);
6020 if (unlikely(qc->err_mask))
6021 goto err;
6022 return;
1da177e4 6023
8e436af9 6024sg_err:
8e0e694a
TH
6025 qc->err_mask |= AC_ERR_SYSTEM;
6026err:
6027 ata_qc_complete(qc);
1da177e4
LT
6028}
6029
6030/**
6031 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
6032 * @qc: command to issue to device
6033 *
6034 * Using various libata functions and hooks, this function
6035 * starts an ATA command. ATA commands are grouped into
6036 * classes called "protocols", and issuing each type of protocol
6037 * is slightly different.
6038 *
0baab86b
EF
6039 * May be used as the qc_issue() entry in ata_port_operations.
6040 *
1da177e4 6041 * LOCKING:
cca3974e 6042 * spin_lock_irqsave(host lock)
1da177e4
LT
6043 *
6044 * RETURNS:
9a3d9eb0 6045 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
6046 */
6047
9a3d9eb0 6048unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
6049{
6050 struct ata_port *ap = qc->ap;
6051
e50362ec
AL
6052 /* Use polling pio if the LLD doesn't handle
6053 * interrupt driven pio and atapi CDB interrupt.
6054 */
6055 if (ap->flags & ATA_FLAG_PIO_POLLING) {
6056 switch (qc->tf.protocol) {
6057 case ATA_PROT_PIO:
e3472cbe 6058 case ATA_PROT_NODATA:
0dc36888
TH
6059 case ATAPI_PROT_PIO:
6060 case ATAPI_PROT_NODATA:
e50362ec
AL
6061 qc->tf.flags |= ATA_TFLAG_POLLING;
6062 break;
0dc36888 6063 case ATAPI_PROT_DMA:
e50362ec 6064 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 6065 /* see ata_dma_blacklisted() */
e50362ec
AL
6066 BUG();
6067 break;
6068 default:
6069 break;
6070 }
6071 }
6072
312f7da2 6073 /* select the device */
1da177e4
LT
6074 ata_dev_select(ap, qc->dev->devno, 1, 0);
6075
312f7da2 6076 /* start the command */
1da177e4
LT
6077 switch (qc->tf.protocol) {
6078 case ATA_PROT_NODATA:
312f7da2
AL
6079 if (qc->tf.flags & ATA_TFLAG_POLLING)
6080 ata_qc_set_polling(qc);
6081
e5338254 6082 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
6083 ap->hsm_task_state = HSM_ST_LAST;
6084
6085 if (qc->tf.flags & ATA_TFLAG_POLLING)
442eacc3 6086 ata_pio_queue_task(ap, qc, 0);
312f7da2 6087
1da177e4
LT
6088 break;
6089
6090 case ATA_PROT_DMA:
587005de 6091 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 6092
1da177e4
LT
6093 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
6094 ap->ops->bmdma_setup(qc); /* set up bmdma */
6095 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 6096 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
6097 break;
6098
312f7da2
AL
6099 case ATA_PROT_PIO:
6100 if (qc->tf.flags & ATA_TFLAG_POLLING)
6101 ata_qc_set_polling(qc);
1da177e4 6102
e5338254 6103 ata_tf_to_host(ap, &qc->tf);
312f7da2 6104
54f00389
AL
6105 if (qc->tf.flags & ATA_TFLAG_WRITE) {
6106 /* PIO data out protocol */
6107 ap->hsm_task_state = HSM_ST_FIRST;
442eacc3 6108 ata_pio_queue_task(ap, qc, 0);
54f00389
AL
6109
6110 /* always send first data block using
e27486db 6111 * the ata_pio_task() codepath.
54f00389 6112 */
312f7da2 6113 } else {
54f00389
AL
6114 /* PIO data in protocol */
6115 ap->hsm_task_state = HSM_ST;
6116
6117 if (qc->tf.flags & ATA_TFLAG_POLLING)
442eacc3 6118 ata_pio_queue_task(ap, qc, 0);
54f00389
AL
6119
6120 /* if polling, ata_pio_task() handles the rest.
6121 * otherwise, interrupt handler takes over from here.
6122 */
312f7da2
AL
6123 }
6124
1da177e4
LT
6125 break;
6126
0dc36888
TH
6127 case ATAPI_PROT_PIO:
6128 case ATAPI_PROT_NODATA:
312f7da2
AL
6129 if (qc->tf.flags & ATA_TFLAG_POLLING)
6130 ata_qc_set_polling(qc);
6131
e5338254 6132 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 6133
312f7da2
AL
6134 ap->hsm_task_state = HSM_ST_FIRST;
6135
6136 /* send cdb by polling if no cdb interrupt */
6137 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
6138 (qc->tf.flags & ATA_TFLAG_POLLING))
442eacc3 6139 ata_pio_queue_task(ap, qc, 0);
1da177e4
LT
6140 break;
6141
0dc36888 6142 case ATAPI_PROT_DMA:
587005de 6143 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 6144
1da177e4
LT
6145 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
6146 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
6147 ap->hsm_task_state = HSM_ST_FIRST;
6148
6149 /* send cdb by polling if no cdb interrupt */
6150 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
442eacc3 6151 ata_pio_queue_task(ap, qc, 0);
1da177e4
LT
6152 break;
6153
6154 default:
6155 WARN_ON(1);
9a3d9eb0 6156 return AC_ERR_SYSTEM;
1da177e4
LT
6157 }
6158
6159 return 0;
6160}
6161
1da177e4
LT
6162/**
6163 * ata_host_intr - Handle host interrupt for given (port, task)
6164 * @ap: Port on which interrupt arrived (possibly...)
6165 * @qc: Taskfile currently active in engine
6166 *
6167 * Handle host interrupt for given queued command. Currently,
6168 * only DMA interrupts are handled. All other commands are
6169 * handled via polling with interrupts disabled (nIEN bit).
6170 *
6171 * LOCKING:
cca3974e 6172 * spin_lock_irqsave(host lock)
1da177e4
LT
6173 *
6174 * RETURNS:
6175 * One if interrupt was handled, zero if not (shared irq).
6176 */
6177
2dcb407e
JG
6178inline unsigned int ata_host_intr(struct ata_port *ap,
6179 struct ata_queued_cmd *qc)
1da177e4 6180{
9af5c9c9 6181 struct ata_eh_info *ehi = &ap->link.eh_info;
312f7da2 6182 u8 status, host_stat = 0;
1da177e4 6183
312f7da2 6184 VPRINTK("ata%u: protocol %d task_state %d\n",
44877b4e 6185 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 6186
312f7da2
AL
6187 /* Check whether we are expecting interrupt in this state */
6188 switch (ap->hsm_task_state) {
6189 case HSM_ST_FIRST:
6912ccd5
AL
6190 /* Some pre-ATAPI-4 devices assert INTRQ
6191 * at this state when ready to receive CDB.
6192 */
1da177e4 6193
312f7da2 6194 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
405e66b3
TH
6195 * The flag was turned on only for atapi devices. No
6196 * need to check ata_is_atapi(qc->tf.protocol) again.
312f7da2
AL
6197 */
6198 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 6199 goto idle_irq;
1da177e4 6200 break;
312f7da2
AL
6201 case HSM_ST_LAST:
6202 if (qc->tf.protocol == ATA_PROT_DMA ||
0dc36888 6203 qc->tf.protocol == ATAPI_PROT_DMA) {
312f7da2
AL
6204 /* check status of DMA engine */
6205 host_stat = ap->ops->bmdma_status(ap);
44877b4e
TH
6206 VPRINTK("ata%u: host_stat 0x%X\n",
6207 ap->print_id, host_stat);
312f7da2
AL
6208
6209 /* if it's not our irq... */
6210 if (!(host_stat & ATA_DMA_INTR))
6211 goto idle_irq;
6212
6213 /* before we do anything else, clear DMA-Start bit */
6214 ap->ops->bmdma_stop(qc);
a4f16610
AL
6215
6216 if (unlikely(host_stat & ATA_DMA_ERR)) {
6217 /* error when transfering data to/from memory */
6218 qc->err_mask |= AC_ERR_HOST_BUS;
6219 ap->hsm_task_state = HSM_ST_ERR;
6220 }
312f7da2
AL
6221 }
6222 break;
6223 case HSM_ST:
6224 break;
1da177e4
LT
6225 default:
6226 goto idle_irq;
6227 }
6228
312f7da2
AL
6229 /* check altstatus */
6230 status = ata_altstatus(ap);
6231 if (status & ATA_BUSY)
6232 goto idle_irq;
1da177e4 6233
312f7da2
AL
6234 /* check main status, clearing INTRQ */
6235 status = ata_chk_status(ap);
6236 if (unlikely(status & ATA_BUSY))
6237 goto idle_irq;
1da177e4 6238
312f7da2
AL
6239 /* ack bmdma irq events */
6240 ap->ops->irq_clear(ap);
1da177e4 6241
bb5cb290 6242 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
6243
6244 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
0dc36888 6245 qc->tf.protocol == ATAPI_PROT_DMA))
ea54763f
TH
6246 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
6247
1da177e4
LT
6248 return 1; /* irq handled */
6249
6250idle_irq:
6251 ap->stats.idle_irq++;
6252
6253#ifdef ATA_IRQ_TRAP
6254 if ((ap->stats.idle_irq % 1000) == 0) {
6d32d30f
JG
6255 ata_chk_status(ap);
6256 ap->ops->irq_clear(ap);
f15a1daf 6257 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 6258 return 1;
1da177e4
LT
6259 }
6260#endif
6261 return 0; /* irq not handled */
6262}
6263
6264/**
6265 * ata_interrupt - Default ATA host interrupt handler
0cba632b 6266 * @irq: irq line (unused)
cca3974e 6267 * @dev_instance: pointer to our ata_host information structure
1da177e4 6268 *
0cba632b
JG
6269 * Default interrupt handler for PCI IDE devices. Calls
6270 * ata_host_intr() for each port that is not disabled.
6271 *
1da177e4 6272 * LOCKING:
cca3974e 6273 * Obtains host lock during operation.
1da177e4
LT
6274 *
6275 * RETURNS:
0cba632b 6276 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
6277 */
6278
2dcb407e 6279irqreturn_t ata_interrupt(int irq, void *dev_instance)
1da177e4 6280{
cca3974e 6281 struct ata_host *host = dev_instance;
1da177e4
LT
6282 unsigned int i;
6283 unsigned int handled = 0;
6284 unsigned long flags;
6285
6286 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 6287 spin_lock_irqsave(&host->lock, flags);
1da177e4 6288
cca3974e 6289 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
6290 struct ata_port *ap;
6291
cca3974e 6292 ap = host->ports[i];
c1389503 6293 if (ap &&
029f5468 6294 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
6295 struct ata_queued_cmd *qc;
6296
9af5c9c9 6297 qc = ata_qc_from_tag(ap, ap->link.active_tag);
312f7da2 6298 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 6299 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
6300 handled |= ata_host_intr(ap, qc);
6301 }
6302 }
6303
cca3974e 6304 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
6305
6306 return IRQ_RETVAL(handled);
6307}
6308
34bf2170
TH
6309/**
6310 * sata_scr_valid - test whether SCRs are accessible
936fd732 6311 * @link: ATA link to test SCR accessibility for
34bf2170 6312 *
936fd732 6313 * Test whether SCRs are accessible for @link.
34bf2170
TH
6314 *
6315 * LOCKING:
6316 * None.
6317 *
6318 * RETURNS:
6319 * 1 if SCRs are accessible, 0 otherwise.
6320 */
936fd732 6321int sata_scr_valid(struct ata_link *link)
34bf2170 6322{
936fd732
TH
6323 struct ata_port *ap = link->ap;
6324
a16abc0b 6325 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
34bf2170
TH
6326}
6327
6328/**
6329 * sata_scr_read - read SCR register of the specified port
936fd732 6330 * @link: ATA link to read SCR for
34bf2170
TH
6331 * @reg: SCR to read
6332 * @val: Place to store read value
6333 *
936fd732 6334 * Read SCR register @reg of @link into *@val. This function is
633273a3
TH
6335 * guaranteed to succeed if @link is ap->link, the cable type of
6336 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
6337 *
6338 * LOCKING:
633273a3 6339 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6340 *
6341 * RETURNS:
6342 * 0 on success, negative errno on failure.
6343 */
936fd732 6344int sata_scr_read(struct ata_link *link, int reg, u32 *val)
34bf2170 6345{
633273a3
TH
6346 if (ata_is_host_link(link)) {
6347 struct ata_port *ap = link->ap;
936fd732 6348
633273a3
TH
6349 if (sata_scr_valid(link))
6350 return ap->ops->scr_read(ap, reg, val);
6351 return -EOPNOTSUPP;
6352 }
6353
6354 return sata_pmp_scr_read(link, reg, val);
34bf2170
TH
6355}
6356
6357/**
6358 * sata_scr_write - write SCR register of the specified port
936fd732 6359 * @link: ATA link to write SCR for
34bf2170
TH
6360 * @reg: SCR to write
6361 * @val: value to write
6362 *
936fd732 6363 * Write @val to SCR register @reg of @link. This function is
633273a3
TH
6364 * guaranteed to succeed if @link is ap->link, the cable type of
6365 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
6366 *
6367 * LOCKING:
633273a3 6368 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6369 *
6370 * RETURNS:
6371 * 0 on success, negative errno on failure.
6372 */
936fd732 6373int sata_scr_write(struct ata_link *link, int reg, u32 val)
34bf2170 6374{
633273a3
TH
6375 if (ata_is_host_link(link)) {
6376 struct ata_port *ap = link->ap;
6377
6378 if (sata_scr_valid(link))
6379 return ap->ops->scr_write(ap, reg, val);
6380 return -EOPNOTSUPP;
6381 }
936fd732 6382
633273a3 6383 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
6384}
6385
6386/**
6387 * sata_scr_write_flush - write SCR register of the specified port and flush
936fd732 6388 * @link: ATA link to write SCR for
34bf2170
TH
6389 * @reg: SCR to write
6390 * @val: value to write
6391 *
6392 * This function is identical to sata_scr_write() except that this
6393 * function performs flush after writing to the register.
6394 *
6395 * LOCKING:
633273a3 6396 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6397 *
6398 * RETURNS:
6399 * 0 on success, negative errno on failure.
6400 */
936fd732 6401int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
34bf2170 6402{
633273a3
TH
6403 if (ata_is_host_link(link)) {
6404 struct ata_port *ap = link->ap;
6405 int rc;
da3dbb17 6406
633273a3
TH
6407 if (sata_scr_valid(link)) {
6408 rc = ap->ops->scr_write(ap, reg, val);
6409 if (rc == 0)
6410 rc = ap->ops->scr_read(ap, reg, &val);
6411 return rc;
6412 }
6413 return -EOPNOTSUPP;
34bf2170 6414 }
633273a3
TH
6415
6416 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
6417}
6418
6419/**
936fd732
TH
6420 * ata_link_online - test whether the given link is online
6421 * @link: ATA link to test
34bf2170 6422 *
936fd732
TH
6423 * Test whether @link is online. Note that this function returns
6424 * 0 if online status of @link cannot be obtained, so
6425 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
6426 *
6427 * LOCKING:
6428 * None.
6429 *
6430 * RETURNS:
6431 * 1 if the port online status is available and online.
6432 */
936fd732 6433int ata_link_online(struct ata_link *link)
34bf2170
TH
6434{
6435 u32 sstatus;
6436
936fd732
TH
6437 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6438 (sstatus & 0xf) == 0x3)
34bf2170
TH
6439 return 1;
6440 return 0;
6441}
6442
6443/**
936fd732
TH
6444 * ata_link_offline - test whether the given link is offline
6445 * @link: ATA link to test
34bf2170 6446 *
936fd732
TH
6447 * Test whether @link is offline. Note that this function
6448 * returns 0 if offline status of @link cannot be obtained, so
6449 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
6450 *
6451 * LOCKING:
6452 * None.
6453 *
6454 * RETURNS:
6455 * 1 if the port offline status is available and offline.
6456 */
936fd732 6457int ata_link_offline(struct ata_link *link)
34bf2170
TH
6458{
6459 u32 sstatus;
6460
936fd732
TH
6461 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6462 (sstatus & 0xf) != 0x3)
34bf2170
TH
6463 return 1;
6464 return 0;
6465}
0baab86b 6466
77b08fb5 6467int ata_flush_cache(struct ata_device *dev)
9b847548 6468{
977e6b9f 6469 unsigned int err_mask;
9b847548
JA
6470 u8 cmd;
6471
6472 if (!ata_try_flush_cache(dev))
6473 return 0;
6474
6fc49adb 6475 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
6476 cmd = ATA_CMD_FLUSH_EXT;
6477 else
6478 cmd = ATA_CMD_FLUSH;
6479
4f34337b
AC
6480 /* This is wrong. On a failed flush we get back the LBA of the lost
6481 sector and we should (assuming it wasn't aborted as unknown) issue
2dcb407e 6482 a further flush command to continue the writeback until it
4f34337b 6483 does not error */
977e6b9f
TH
6484 err_mask = ata_do_simple_cmd(dev, cmd);
6485 if (err_mask) {
6486 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
6487 return -EIO;
6488 }
6489
6490 return 0;
9b847548
JA
6491}
6492
6ffa01d8 6493#ifdef CONFIG_PM
cca3974e
JG
6494static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
6495 unsigned int action, unsigned int ehi_flags,
6496 int wait)
500530f6
TH
6497{
6498 unsigned long flags;
6499 int i, rc;
6500
cca3974e
JG
6501 for (i = 0; i < host->n_ports; i++) {
6502 struct ata_port *ap = host->ports[i];
e3667ebf 6503 struct ata_link *link;
500530f6
TH
6504
6505 /* Previous resume operation might still be in
6506 * progress. Wait for PM_PENDING to clear.
6507 */
6508 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
6509 ata_port_wait_eh(ap);
6510 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6511 }
6512
6513 /* request PM ops to EH */
6514 spin_lock_irqsave(ap->lock, flags);
6515
6516 ap->pm_mesg = mesg;
6517 if (wait) {
6518 rc = 0;
6519 ap->pm_result = &rc;
6520 }
6521
6522 ap->pflags |= ATA_PFLAG_PM_PENDING;
e3667ebf
TH
6523 __ata_port_for_each_link(link, ap) {
6524 link->eh_info.action |= action;
6525 link->eh_info.flags |= ehi_flags;
6526 }
500530f6
TH
6527
6528 ata_port_schedule_eh(ap);
6529
6530 spin_unlock_irqrestore(ap->lock, flags);
6531
6532 /* wait and check result */
6533 if (wait) {
6534 ata_port_wait_eh(ap);
6535 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6536 if (rc)
6537 return rc;
6538 }
6539 }
6540
6541 return 0;
6542}
6543
6544/**
cca3974e
JG
6545 * ata_host_suspend - suspend host
6546 * @host: host to suspend
500530f6
TH
6547 * @mesg: PM message
6548 *
cca3974e 6549 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
6550 * function requests EH to perform PM operations and waits for EH
6551 * to finish.
6552 *
6553 * LOCKING:
6554 * Kernel thread context (may sleep).
6555 *
6556 * RETURNS:
6557 * 0 on success, -errno on failure.
6558 */
cca3974e 6559int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6 6560{
9666f400 6561 int rc;
500530f6 6562
ca77329f
KCA
6563 /*
6564 * disable link pm on all ports before requesting
6565 * any pm activity
6566 */
6567 ata_lpm_enable(host);
6568
cca3974e 6569 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
72ad6ec4
JG
6570 if (rc == 0)
6571 host->dev->power.power_state = mesg;
500530f6
TH
6572 return rc;
6573}
6574
6575/**
cca3974e
JG
6576 * ata_host_resume - resume host
6577 * @host: host to resume
500530f6 6578 *
cca3974e 6579 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
6580 * function requests EH to perform PM operations and returns.
6581 * Note that all resume operations are performed parallely.
6582 *
6583 * LOCKING:
6584 * Kernel thread context (may sleep).
6585 */
cca3974e 6586void ata_host_resume(struct ata_host *host)
500530f6 6587{
cca3974e
JG
6588 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
6589 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
72ad6ec4 6590 host->dev->power.power_state = PMSG_ON;
ca77329f
KCA
6591
6592 /* reenable link pm */
6593 ata_lpm_disable(host);
500530f6 6594}
6ffa01d8 6595#endif
500530f6 6596
c893a3ae
RD
6597/**
6598 * ata_port_start - Set port up for dma.
6599 * @ap: Port to initialize
6600 *
6601 * Called just after data structures for each port are
6602 * initialized. Allocates space for PRD table.
6603 *
6604 * May be used as the port_start() entry in ata_port_operations.
6605 *
6606 * LOCKING:
6607 * Inherited from caller.
6608 */
f0d36efd 6609int ata_port_start(struct ata_port *ap)
1da177e4 6610{
2f1f610b 6611 struct device *dev = ap->dev;
1da177e4 6612
f0d36efd
TH
6613 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6614 GFP_KERNEL);
1da177e4
LT
6615 if (!ap->prd)
6616 return -ENOMEM;
6617
1da177e4
LT
6618 return 0;
6619}
6620
3ef3b43d
TH
6621/**
6622 * ata_dev_init - Initialize an ata_device structure
6623 * @dev: Device structure to initialize
6624 *
6625 * Initialize @dev in preparation for probing.
6626 *
6627 * LOCKING:
6628 * Inherited from caller.
6629 */
6630void ata_dev_init(struct ata_device *dev)
6631{
9af5c9c9
TH
6632 struct ata_link *link = dev->link;
6633 struct ata_port *ap = link->ap;
72fa4b74
TH
6634 unsigned long flags;
6635
5a04bf4b 6636 /* SATA spd limit is bound to the first device */
9af5c9c9
TH
6637 link->sata_spd_limit = link->hw_sata_spd_limit;
6638 link->sata_spd = 0;
5a04bf4b 6639
72fa4b74
TH
6640 /* High bits of dev->flags are used to record warm plug
6641 * requests which occur asynchronously. Synchronize using
cca3974e 6642 * host lock.
72fa4b74 6643 */
ba6a1308 6644 spin_lock_irqsave(ap->lock, flags);
72fa4b74 6645 dev->flags &= ~ATA_DFLAG_INIT_MASK;
3dcc323f 6646 dev->horkage = 0;
ba6a1308 6647 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 6648
72fa4b74
TH
6649 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
6650 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
6651 dev->pio_mask = UINT_MAX;
6652 dev->mwdma_mask = UINT_MAX;
6653 dev->udma_mask = UINT_MAX;
6654}
6655
4fb37a25
TH
6656/**
6657 * ata_link_init - Initialize an ata_link structure
6658 * @ap: ATA port link is attached to
6659 * @link: Link structure to initialize
8989805d 6660 * @pmp: Port multiplier port number
4fb37a25
TH
6661 *
6662 * Initialize @link.
6663 *
6664 * LOCKING:
6665 * Kernel thread context (may sleep)
6666 */
fb7fd614 6667void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
4fb37a25
TH
6668{
6669 int i;
6670
6671 /* clear everything except for devices */
6672 memset(link, 0, offsetof(struct ata_link, device[0]));
6673
6674 link->ap = ap;
8989805d 6675 link->pmp = pmp;
4fb37a25
TH
6676 link->active_tag = ATA_TAG_POISON;
6677 link->hw_sata_spd_limit = UINT_MAX;
6678
6679 /* can't use iterator, ap isn't initialized yet */
6680 for (i = 0; i < ATA_MAX_DEVICES; i++) {
6681 struct ata_device *dev = &link->device[i];
6682
6683 dev->link = link;
6684 dev->devno = dev - link->device;
6685 ata_dev_init(dev);
6686 }
6687}
6688
6689/**
6690 * sata_link_init_spd - Initialize link->sata_spd_limit
6691 * @link: Link to configure sata_spd_limit for
6692 *
6693 * Initialize @link->[hw_]sata_spd_limit to the currently
6694 * configured value.
6695 *
6696 * LOCKING:
6697 * Kernel thread context (may sleep).
6698 *
6699 * RETURNS:
6700 * 0 on success, -errno on failure.
6701 */
fb7fd614 6702int sata_link_init_spd(struct ata_link *link)
4fb37a25 6703{
33267325
TH
6704 u32 scontrol;
6705 u8 spd;
4fb37a25
TH
6706 int rc;
6707
6708 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
6709 if (rc)
6710 return rc;
6711
6712 spd = (scontrol >> 4) & 0xf;
6713 if (spd)
6714 link->hw_sata_spd_limit &= (1 << spd) - 1;
6715
33267325
TH
6716 ata_force_spd_limit(link);
6717
4fb37a25
TH
6718 link->sata_spd_limit = link->hw_sata_spd_limit;
6719
6720 return 0;
6721}
6722
1da177e4 6723/**
f3187195
TH
6724 * ata_port_alloc - allocate and initialize basic ATA port resources
6725 * @host: ATA host this allocated port belongs to
1da177e4 6726 *
f3187195
TH
6727 * Allocate and initialize basic ATA port resources.
6728 *
6729 * RETURNS:
6730 * Allocate ATA port on success, NULL on failure.
0cba632b 6731 *
1da177e4 6732 * LOCKING:
f3187195 6733 * Inherited from calling layer (may sleep).
1da177e4 6734 */
f3187195 6735struct ata_port *ata_port_alloc(struct ata_host *host)
1da177e4 6736{
f3187195 6737 struct ata_port *ap;
1da177e4 6738
f3187195
TH
6739 DPRINTK("ENTER\n");
6740
6741 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6742 if (!ap)
6743 return NULL;
6744
f4d6d004 6745 ap->pflags |= ATA_PFLAG_INITIALIZING;
cca3974e 6746 ap->lock = &host->lock;
198e0fed 6747 ap->flags = ATA_FLAG_DISABLED;
f3187195 6748 ap->print_id = -1;
1da177e4 6749 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 6750 ap->host = host;
f3187195 6751 ap->dev = host->dev;
1da177e4 6752 ap->last_ctl = 0xFF;
bd5d825c
BP
6753
6754#if defined(ATA_VERBOSE_DEBUG)
6755 /* turn on all debugging levels */
6756 ap->msg_enable = 0x00FF;
6757#elif defined(ATA_DEBUG)
6758 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 6759#else
0dd4b21f 6760 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 6761#endif
1da177e4 6762
442eacc3 6763 INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
65f27f38
DH
6764 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6765 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 6766 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 6767 init_waitqueue_head(&ap->eh_wait_q);
5ddf24c5
TH
6768 init_timer_deferrable(&ap->fastdrain_timer);
6769 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
6770 ap->fastdrain_timer.data = (unsigned long)ap;
1da177e4 6771
838df628 6772 ap->cbl = ATA_CBL_NONE;
838df628 6773
8989805d 6774 ata_link_init(ap, &ap->link, 0);
1da177e4
LT
6775
6776#ifdef ATA_IRQ_TRAP
6777 ap->stats.unhandled_irq = 1;
6778 ap->stats.idle_irq = 1;
6779#endif
1da177e4 6780 return ap;
1da177e4
LT
6781}
6782
f0d36efd
TH
6783static void ata_host_release(struct device *gendev, void *res)
6784{
6785 struct ata_host *host = dev_get_drvdata(gendev);
6786 int i;
6787
1aa506e4
TH
6788 for (i = 0; i < host->n_ports; i++) {
6789 struct ata_port *ap = host->ports[i];
6790
4911487a
TH
6791 if (!ap)
6792 continue;
6793
6794 if (ap->scsi_host)
1aa506e4
TH
6795 scsi_host_put(ap->scsi_host);
6796
633273a3 6797 kfree(ap->pmp_link);
4911487a 6798 kfree(ap);
1aa506e4
TH
6799 host->ports[i] = NULL;
6800 }
6801
1aa56cca 6802 dev_set_drvdata(gendev, NULL);
f0d36efd
TH
6803}
6804
f3187195
TH
6805/**
6806 * ata_host_alloc - allocate and init basic ATA host resources
6807 * @dev: generic device this host is associated with
6808 * @max_ports: maximum number of ATA ports associated with this host
6809 *
6810 * Allocate and initialize basic ATA host resources. LLD calls
6811 * this function to allocate a host, initializes it fully and
6812 * attaches it using ata_host_register().
6813 *
6814 * @max_ports ports are allocated and host->n_ports is
6815 * initialized to @max_ports. The caller is allowed to decrease
6816 * host->n_ports before calling ata_host_register(). The unused
6817 * ports will be automatically freed on registration.
6818 *
6819 * RETURNS:
6820 * Allocate ATA host on success, NULL on failure.
6821 *
6822 * LOCKING:
6823 * Inherited from calling layer (may sleep).
6824 */
6825struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6826{
6827 struct ata_host *host;
6828 size_t sz;
6829 int i;
6830
6831 DPRINTK("ENTER\n");
6832
6833 if (!devres_open_group(dev, NULL, GFP_KERNEL))
6834 return NULL;
6835
6836 /* alloc a container for our list of ATA ports (buses) */
6837 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6838 /* alloc a container for our list of ATA ports (buses) */
6839 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6840 if (!host)
6841 goto err_out;
6842
6843 devres_add(dev, host);
6844 dev_set_drvdata(dev, host);
6845
6846 spin_lock_init(&host->lock);
6847 host->dev = dev;
6848 host->n_ports = max_ports;
6849
6850 /* allocate ports bound to this host */
6851 for (i = 0; i < max_ports; i++) {
6852 struct ata_port *ap;
6853
6854 ap = ata_port_alloc(host);
6855 if (!ap)
6856 goto err_out;
6857
6858 ap->port_no = i;
6859 host->ports[i] = ap;
6860 }
6861
6862 devres_remove_group(dev, NULL);
6863 return host;
6864
6865 err_out:
6866 devres_release_group(dev, NULL);
6867 return NULL;
6868}
6869
f5cda257
TH
6870/**
6871 * ata_host_alloc_pinfo - alloc host and init with port_info array
6872 * @dev: generic device this host is associated with
6873 * @ppi: array of ATA port_info to initialize host with
6874 * @n_ports: number of ATA ports attached to this host
6875 *
6876 * Allocate ATA host and initialize with info from @ppi. If NULL
6877 * terminated, @ppi may contain fewer entries than @n_ports. The
6878 * last entry will be used for the remaining ports.
6879 *
6880 * RETURNS:
6881 * Allocate ATA host on success, NULL on failure.
6882 *
6883 * LOCKING:
6884 * Inherited from calling layer (may sleep).
6885 */
6886struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6887 const struct ata_port_info * const * ppi,
6888 int n_ports)
6889{
6890 const struct ata_port_info *pi;
6891 struct ata_host *host;
6892 int i, j;
6893
6894 host = ata_host_alloc(dev, n_ports);
6895 if (!host)
6896 return NULL;
6897
6898 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6899 struct ata_port *ap = host->ports[i];
6900
6901 if (ppi[j])
6902 pi = ppi[j++];
6903
6904 ap->pio_mask = pi->pio_mask;
6905 ap->mwdma_mask = pi->mwdma_mask;
6906 ap->udma_mask = pi->udma_mask;
6907 ap->flags |= pi->flags;
0c88758b 6908 ap->link.flags |= pi->link_flags;
f5cda257
TH
6909 ap->ops = pi->port_ops;
6910
6911 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6912 host->ops = pi->port_ops;
6913 if (!host->private_data && pi->private_data)
6914 host->private_data = pi->private_data;
6915 }
6916
6917 return host;
6918}
6919
32ebbc0c
TH
6920static void ata_host_stop(struct device *gendev, void *res)
6921{
6922 struct ata_host *host = dev_get_drvdata(gendev);
6923 int i;
6924
6925 WARN_ON(!(host->flags & ATA_HOST_STARTED));
6926
6927 for (i = 0; i < host->n_ports; i++) {
6928 struct ata_port *ap = host->ports[i];
6929
6930 if (ap->ops->port_stop)
6931 ap->ops->port_stop(ap);
6932 }
6933
6934 if (host->ops->host_stop)
6935 host->ops->host_stop(host);
6936}
6937
ecef7253
TH
6938/**
6939 * ata_host_start - start and freeze ports of an ATA host
6940 * @host: ATA host to start ports for
6941 *
6942 * Start and then freeze ports of @host. Started status is
6943 * recorded in host->flags, so this function can be called
6944 * multiple times. Ports are guaranteed to get started only
f3187195
TH
6945 * once. If host->ops isn't initialized yet, its set to the
6946 * first non-dummy port ops.
ecef7253
TH
6947 *
6948 * LOCKING:
6949 * Inherited from calling layer (may sleep).
6950 *
6951 * RETURNS:
6952 * 0 if all ports are started successfully, -errno otherwise.
6953 */
6954int ata_host_start(struct ata_host *host)
6955{
32ebbc0c
TH
6956 int have_stop = 0;
6957 void *start_dr = NULL;
ecef7253
TH
6958 int i, rc;
6959
6960 if (host->flags & ATA_HOST_STARTED)
6961 return 0;
6962
6963 for (i = 0; i < host->n_ports; i++) {
6964 struct ata_port *ap = host->ports[i];
6965
f3187195
TH
6966 if (!host->ops && !ata_port_is_dummy(ap))
6967 host->ops = ap->ops;
6968
32ebbc0c
TH
6969 if (ap->ops->port_stop)
6970 have_stop = 1;
6971 }
6972
6973 if (host->ops->host_stop)
6974 have_stop = 1;
6975
6976 if (have_stop) {
6977 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
6978 if (!start_dr)
6979 return -ENOMEM;
6980 }
6981
6982 for (i = 0; i < host->n_ports; i++) {
6983 struct ata_port *ap = host->ports[i];
6984
ecef7253
TH
6985 if (ap->ops->port_start) {
6986 rc = ap->ops->port_start(ap);
6987 if (rc) {
0f9fe9b7 6988 if (rc != -ENODEV)
0f757743
AM
6989 dev_printk(KERN_ERR, host->dev,
6990 "failed to start port %d "
6991 "(errno=%d)\n", i, rc);
ecef7253
TH
6992 goto err_out;
6993 }
6994 }
ecef7253
TH
6995 ata_eh_freeze_port(ap);
6996 }
6997
32ebbc0c
TH
6998 if (start_dr)
6999 devres_add(host->dev, start_dr);
ecef7253
TH
7000 host->flags |= ATA_HOST_STARTED;
7001 return 0;
7002
7003 err_out:
7004 while (--i >= 0) {
7005 struct ata_port *ap = host->ports[i];
7006
7007 if (ap->ops->port_stop)
7008 ap->ops->port_stop(ap);
7009 }
32ebbc0c 7010 devres_free(start_dr);
ecef7253
TH
7011 return rc;
7012}
7013
b03732f0 7014/**
cca3974e
JG
7015 * ata_sas_host_init - Initialize a host struct
7016 * @host: host to initialize
7017 * @dev: device host is attached to
7018 * @flags: host flags
7019 * @ops: port_ops
b03732f0
BK
7020 *
7021 * LOCKING:
7022 * PCI/etc. bus probe sem.
7023 *
7024 */
f3187195 7025/* KILLME - the only user left is ipr */
cca3974e
JG
7026void ata_host_init(struct ata_host *host, struct device *dev,
7027 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 7028{
cca3974e
JG
7029 spin_lock_init(&host->lock);
7030 host->dev = dev;
7031 host->flags = flags;
7032 host->ops = ops;
b03732f0
BK
7033}
7034
f3187195
TH
7035/**
7036 * ata_host_register - register initialized ATA host
7037 * @host: ATA host to register
7038 * @sht: template for SCSI host
7039 *
7040 * Register initialized ATA host. @host is allocated using
7041 * ata_host_alloc() and fully initialized by LLD. This function
7042 * starts ports, registers @host with ATA and SCSI layers and
7043 * probe registered devices.
7044 *
7045 * LOCKING:
7046 * Inherited from calling layer (may sleep).
7047 *
7048 * RETURNS:
7049 * 0 on success, -errno otherwise.
7050 */
7051int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
7052{
7053 int i, rc;
7054
7055 /* host must have been started */
7056 if (!(host->flags & ATA_HOST_STARTED)) {
7057 dev_printk(KERN_ERR, host->dev,
7058 "BUG: trying to register unstarted host\n");
7059 WARN_ON(1);
7060 return -EINVAL;
7061 }
7062
7063 /* Blow away unused ports. This happens when LLD can't
7064 * determine the exact number of ports to allocate at
7065 * allocation time.
7066 */
7067 for (i = host->n_ports; host->ports[i]; i++)
7068 kfree(host->ports[i]);
7069
7070 /* give ports names and add SCSI hosts */
7071 for (i = 0; i < host->n_ports; i++)
7072 host->ports[i]->print_id = ata_print_id++;
7073
7074 rc = ata_scsi_add_hosts(host, sht);
7075 if (rc)
7076 return rc;
7077
fafbae87
TH
7078 /* associate with ACPI nodes */
7079 ata_acpi_associate(host);
7080
f3187195
TH
7081 /* set cable, sata_spd_limit and report */
7082 for (i = 0; i < host->n_ports; i++) {
7083 struct ata_port *ap = host->ports[i];
f3187195
TH
7084 unsigned long xfer_mask;
7085
7086 /* set SATA cable type if still unset */
7087 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
7088 ap->cbl = ATA_CBL_SATA;
7089
7090 /* init sata_spd_limit to the current value */
4fb37a25 7091 sata_link_init_spd(&ap->link);
f3187195 7092
cbcdd875 7093 /* print per-port info to dmesg */
f3187195
TH
7094 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
7095 ap->udma_mask);
7096
abf6e8ed 7097 if (!ata_port_is_dummy(ap)) {
cbcdd875
TH
7098 ata_port_printk(ap, KERN_INFO,
7099 "%cATA max %s %s\n",
a16abc0b 7100 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
f3187195 7101 ata_mode_string(xfer_mask),
cbcdd875 7102 ap->link.eh_info.desc);
abf6e8ed
TH
7103 ata_ehi_clear_desc(&ap->link.eh_info);
7104 } else
f3187195
TH
7105 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
7106 }
7107
7108 /* perform each probe synchronously */
7109 DPRINTK("probe begin\n");
7110 for (i = 0; i < host->n_ports; i++) {
7111 struct ata_port *ap = host->ports[i];
f3187195
TH
7112
7113 /* probe */
7114 if (ap->ops->error_handler) {
9af5c9c9 7115 struct ata_eh_info *ehi = &ap->link.eh_info;
f3187195
TH
7116 unsigned long flags;
7117
7118 ata_port_probe(ap);
7119
7120 /* kick EH for boot probing */
7121 spin_lock_irqsave(ap->lock, flags);
7122
f58229f8
TH
7123 ehi->probe_mask =
7124 (1 << ata_link_max_devices(&ap->link)) - 1;
f3187195
TH
7125 ehi->action |= ATA_EH_SOFTRESET;
7126 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
7127
f4d6d004 7128 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
f3187195
TH
7129 ap->pflags |= ATA_PFLAG_LOADING;
7130 ata_port_schedule_eh(ap);
7131
7132 spin_unlock_irqrestore(ap->lock, flags);
7133
7134 /* wait for EH to finish */
7135 ata_port_wait_eh(ap);
7136 } else {
7137 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
7138 rc = ata_bus_probe(ap);
7139 DPRINTK("ata%u: bus probe end\n", ap->print_id);
7140
7141 if (rc) {
7142 /* FIXME: do something useful here?
7143 * Current libata behavior will
7144 * tear down everything when
7145 * the module is removed
7146 * or the h/w is unplugged.
7147 */
7148 }
7149 }
7150 }
7151
7152 /* probes are done, now scan each port's disk(s) */
7153 DPRINTK("host probe begin\n");
7154 for (i = 0; i < host->n_ports; i++) {
7155 struct ata_port *ap = host->ports[i];
7156
1ae46317 7157 ata_scsi_scan_host(ap, 1);
ca77329f 7158 ata_lpm_schedule(ap, ap->pm_policy);
f3187195
TH
7159 }
7160
7161 return 0;
7162}
7163
f5cda257
TH
7164/**
7165 * ata_host_activate - start host, request IRQ and register it
7166 * @host: target ATA host
7167 * @irq: IRQ to request
7168 * @irq_handler: irq_handler used when requesting IRQ
7169 * @irq_flags: irq_flags used when requesting IRQ
7170 * @sht: scsi_host_template to use when registering the host
7171 *
7172 * After allocating an ATA host and initializing it, most libata
7173 * LLDs perform three steps to activate the host - start host,
7174 * request IRQ and register it. This helper takes necessasry
7175 * arguments and performs the three steps in one go.
7176 *
3d46b2e2
PM
7177 * An invalid IRQ skips the IRQ registration and expects the host to
7178 * have set polling mode on the port. In this case, @irq_handler
7179 * should be NULL.
7180 *
f5cda257
TH
7181 * LOCKING:
7182 * Inherited from calling layer (may sleep).
7183 *
7184 * RETURNS:
7185 * 0 on success, -errno otherwise.
7186 */
7187int ata_host_activate(struct ata_host *host, int irq,
7188 irq_handler_t irq_handler, unsigned long irq_flags,
7189 struct scsi_host_template *sht)
7190{
cbcdd875 7191 int i, rc;
f5cda257
TH
7192
7193 rc = ata_host_start(host);
7194 if (rc)
7195 return rc;
7196
3d46b2e2
PM
7197 /* Special case for polling mode */
7198 if (!irq) {
7199 WARN_ON(irq_handler);
7200 return ata_host_register(host, sht);
7201 }
7202
f5cda257
TH
7203 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
7204 dev_driver_string(host->dev), host);
7205 if (rc)
7206 return rc;
7207
cbcdd875
TH
7208 for (i = 0; i < host->n_ports; i++)
7209 ata_port_desc(host->ports[i], "irq %d", irq);
4031826b 7210
f5cda257
TH
7211 rc = ata_host_register(host, sht);
7212 /* if failed, just free the IRQ and leave ports alone */
7213 if (rc)
7214 devm_free_irq(host->dev, irq, host);
7215
7216 return rc;
7217}
7218
720ba126
TH
7219/**
7220 * ata_port_detach - Detach ATA port in prepration of device removal
7221 * @ap: ATA port to be detached
7222 *
7223 * Detach all ATA devices and the associated SCSI devices of @ap;
7224 * then, remove the associated SCSI host. @ap is guaranteed to
7225 * be quiescent on return from this function.
7226 *
7227 * LOCKING:
7228 * Kernel thread context (may sleep).
7229 */
741b7763 7230static void ata_port_detach(struct ata_port *ap)
720ba126
TH
7231{
7232 unsigned long flags;
41bda9c9 7233 struct ata_link *link;
f58229f8 7234 struct ata_device *dev;
720ba126
TH
7235
7236 if (!ap->ops->error_handler)
c3cf30a9 7237 goto skip_eh;
720ba126
TH
7238
7239 /* tell EH we're leaving & flush EH */
ba6a1308 7240 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 7241 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 7242 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
7243
7244 ata_port_wait_eh(ap);
7245
7f9ad9b8
TH
7246 /* EH is now guaranteed to see UNLOADING - EH context belongs
7247 * to us. Disable all existing devices.
720ba126 7248 */
41bda9c9
TH
7249 ata_port_for_each_link(link, ap) {
7250 ata_link_for_each_dev(dev, link)
7251 ata_dev_disable(dev);
7252 }
720ba126 7253
720ba126
TH
7254 /* Final freeze & EH. All in-flight commands are aborted. EH
7255 * will be skipped and retrials will be terminated with bad
7256 * target.
7257 */
ba6a1308 7258 spin_lock_irqsave(ap->lock, flags);
720ba126 7259 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 7260 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
7261
7262 ata_port_wait_eh(ap);
45a66c1c 7263 cancel_rearming_delayed_work(&ap->hotplug_task);
720ba126 7264
c3cf30a9 7265 skip_eh:
720ba126 7266 /* remove the associated SCSI host */
cca3974e 7267 scsi_remove_host(ap->scsi_host);
720ba126
TH
7268}
7269
0529c159
TH
7270/**
7271 * ata_host_detach - Detach all ports of an ATA host
7272 * @host: Host to detach
7273 *
7274 * Detach all ports of @host.
7275 *
7276 * LOCKING:
7277 * Kernel thread context (may sleep).
7278 */
7279void ata_host_detach(struct ata_host *host)
7280{
7281 int i;
7282
7283 for (i = 0; i < host->n_ports; i++)
7284 ata_port_detach(host->ports[i]);
562f0c2d
TH
7285
7286 /* the host is dead now, dissociate ACPI */
7287 ata_acpi_dissociate(host);
0529c159
TH
7288}
7289
1da177e4
LT
7290/**
7291 * ata_std_ports - initialize ioaddr with standard port offsets.
7292 * @ioaddr: IO address structure to be initialized
0baab86b
EF
7293 *
7294 * Utility function which initializes data_addr, error_addr,
7295 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
7296 * device_addr, status_addr, and command_addr to standard offsets
7297 * relative to cmd_addr.
7298 *
7299 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 7300 */
0baab86b 7301
1da177e4
LT
7302void ata_std_ports(struct ata_ioports *ioaddr)
7303{
7304 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
7305 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
7306 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
7307 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
7308 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
7309 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
7310 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
7311 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
7312 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
7313 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
7314}
7315
0baab86b 7316
374b1873
JG
7317#ifdef CONFIG_PCI
7318
1da177e4
LT
7319/**
7320 * ata_pci_remove_one - PCI layer callback for device removal
7321 * @pdev: PCI device that was removed
7322 *
b878ca5d
TH
7323 * PCI layer indicates to libata via this hook that hot-unplug or
7324 * module unload event has occurred. Detach all ports. Resource
7325 * release is handled via devres.
1da177e4
LT
7326 *
7327 * LOCKING:
7328 * Inherited from PCI layer (may sleep).
7329 */
f0d36efd 7330void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4 7331{
2855568b 7332 struct device *dev = &pdev->dev;
cca3974e 7333 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 7334
b878ca5d 7335 ata_host_detach(host);
1da177e4
LT
7336}
7337
7338/* move to PCI subsystem */
057ace5e 7339int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
7340{
7341 unsigned long tmp = 0;
7342
7343 switch (bits->width) {
7344 case 1: {
7345 u8 tmp8 = 0;
7346 pci_read_config_byte(pdev, bits->reg, &tmp8);
7347 tmp = tmp8;
7348 break;
7349 }
7350 case 2: {
7351 u16 tmp16 = 0;
7352 pci_read_config_word(pdev, bits->reg, &tmp16);
7353 tmp = tmp16;
7354 break;
7355 }
7356 case 4: {
7357 u32 tmp32 = 0;
7358 pci_read_config_dword(pdev, bits->reg, &tmp32);
7359 tmp = tmp32;
7360 break;
7361 }
7362
7363 default:
7364 return -EINVAL;
7365 }
7366
7367 tmp &= bits->mask;
7368
7369 return (tmp == bits->val) ? 1 : 0;
7370}
9b847548 7371
6ffa01d8 7372#ifdef CONFIG_PM
3c5100c1 7373void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
7374{
7375 pci_save_state(pdev);
4c90d971 7376 pci_disable_device(pdev);
500530f6 7377
3a2d5b70 7378 if (mesg.event & PM_EVENT_SLEEP)
500530f6 7379 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
7380}
7381
553c4aa6 7382int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 7383{
553c4aa6
TH
7384 int rc;
7385
9b847548
JA
7386 pci_set_power_state(pdev, PCI_D0);
7387 pci_restore_state(pdev);
553c4aa6 7388
b878ca5d 7389 rc = pcim_enable_device(pdev);
553c4aa6
TH
7390 if (rc) {
7391 dev_printk(KERN_ERR, &pdev->dev,
7392 "failed to enable device after resume (%d)\n", rc);
7393 return rc;
7394 }
7395
9b847548 7396 pci_set_master(pdev);
553c4aa6 7397 return 0;
500530f6
TH
7398}
7399
3c5100c1 7400int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 7401{
cca3974e 7402 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
7403 int rc = 0;
7404
cca3974e 7405 rc = ata_host_suspend(host, mesg);
500530f6
TH
7406 if (rc)
7407 return rc;
7408
3c5100c1 7409 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
7410
7411 return 0;
7412}
7413
7414int ata_pci_device_resume(struct pci_dev *pdev)
7415{
cca3974e 7416 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 7417 int rc;
500530f6 7418
553c4aa6
TH
7419 rc = ata_pci_device_do_resume(pdev);
7420 if (rc == 0)
7421 ata_host_resume(host);
7422 return rc;
9b847548 7423}
6ffa01d8
TH
7424#endif /* CONFIG_PM */
7425
1da177e4
LT
7426#endif /* CONFIG_PCI */
7427
33267325
TH
7428static int __init ata_parse_force_one(char **cur,
7429 struct ata_force_ent *force_ent,
7430 const char **reason)
7431{
7432 /* FIXME: Currently, there's no way to tag init const data and
7433 * using __initdata causes build failure on some versions of
7434 * gcc. Once __initdataconst is implemented, add const to the
7435 * following structure.
7436 */
7437 static struct ata_force_param force_tbl[] __initdata = {
7438 { "40c", .cbl = ATA_CBL_PATA40 },
7439 { "80c", .cbl = ATA_CBL_PATA80 },
7440 { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
7441 { "unk", .cbl = ATA_CBL_PATA_UNK },
7442 { "ign", .cbl = ATA_CBL_PATA_IGN },
7443 { "sata", .cbl = ATA_CBL_SATA },
7444 { "1.5Gbps", .spd_limit = 1 },
7445 { "3.0Gbps", .spd_limit = 2 },
7446 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
7447 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
7448 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
7449 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
7450 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
7451 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
7452 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
7453 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
7454 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
7455 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
7456 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
7457 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
7458 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
7459 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
7460 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
7461 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
7462 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
7463 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
7464 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
7465 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
7466 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
7467 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
7468 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
7469 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
7470 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
7471 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
7472 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
7473 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
7474 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
7475 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
7476 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
7477 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
7478 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
7479 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
7480 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
7481 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
7482 };
7483 char *start = *cur, *p = *cur;
7484 char *id, *val, *endp;
7485 const struct ata_force_param *match_fp = NULL;
7486 int nr_matches = 0, i;
7487
7488 /* find where this param ends and update *cur */
7489 while (*p != '\0' && *p != ',')
7490 p++;
7491
7492 if (*p == '\0')
7493 *cur = p;
7494 else
7495 *cur = p + 1;
7496
7497 *p = '\0';
7498
7499 /* parse */
7500 p = strchr(start, ':');
7501 if (!p) {
7502 val = strstrip(start);
7503 goto parse_val;
7504 }
7505 *p = '\0';
7506
7507 id = strstrip(start);
7508 val = strstrip(p + 1);
7509
7510 /* parse id */
7511 p = strchr(id, '.');
7512 if (p) {
7513 *p++ = '\0';
7514 force_ent->device = simple_strtoul(p, &endp, 10);
7515 if (p == endp || *endp != '\0') {
7516 *reason = "invalid device";
7517 return -EINVAL;
7518 }
7519 }
7520
7521 force_ent->port = simple_strtoul(id, &endp, 10);
7522 if (p == endp || *endp != '\0') {
7523 *reason = "invalid port/link";
7524 return -EINVAL;
7525 }
7526
7527 parse_val:
7528 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
7529 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
7530 const struct ata_force_param *fp = &force_tbl[i];
7531
7532 if (strncasecmp(val, fp->name, strlen(val)))
7533 continue;
7534
7535 nr_matches++;
7536 match_fp = fp;
7537
7538 if (strcasecmp(val, fp->name) == 0) {
7539 nr_matches = 1;
7540 break;
7541 }
7542 }
7543
7544 if (!nr_matches) {
7545 *reason = "unknown value";
7546 return -EINVAL;
7547 }
7548 if (nr_matches > 1) {
7549 *reason = "ambigious value";
7550 return -EINVAL;
7551 }
7552
7553 force_ent->param = *match_fp;
7554
7555 return 0;
7556}
7557
7558static void __init ata_parse_force_param(void)
7559{
7560 int idx = 0, size = 1;
7561 int last_port = -1, last_device = -1;
7562 char *p, *cur, *next;
7563
7564 /* calculate maximum number of params and allocate force_tbl */
7565 for (p = ata_force_param_buf; *p; p++)
7566 if (*p == ',')
7567 size++;
7568
7569 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
7570 if (!ata_force_tbl) {
7571 printk(KERN_WARNING "ata: failed to extend force table, "
7572 "libata.force ignored\n");
7573 return;
7574 }
7575
7576 /* parse and populate the table */
7577 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
7578 const char *reason = "";
7579 struct ata_force_ent te = { .port = -1, .device = -1 };
7580
7581 next = cur;
7582 if (ata_parse_force_one(&next, &te, &reason)) {
7583 printk(KERN_WARNING "ata: failed to parse force "
7584 "parameter \"%s\" (%s)\n",
7585 cur, reason);
7586 continue;
7587 }
7588
7589 if (te.port == -1) {
7590 te.port = last_port;
7591 te.device = last_device;
7592 }
7593
7594 ata_force_tbl[idx++] = te;
7595
7596 last_port = te.port;
7597 last_device = te.device;
7598 }
7599
7600 ata_force_tbl_size = idx;
7601}
1da177e4 7602
1da177e4
LT
7603static int __init ata_init(void)
7604{
a8601e5f 7605 ata_probe_timeout *= HZ;
33267325
TH
7606
7607 ata_parse_force_param();
7608
1da177e4
LT
7609 ata_wq = create_workqueue("ata");
7610 if (!ata_wq)
7611 return -ENOMEM;
7612
453b07ac
TH
7613 ata_aux_wq = create_singlethread_workqueue("ata_aux");
7614 if (!ata_aux_wq) {
7615 destroy_workqueue(ata_wq);
7616 return -ENOMEM;
7617 }
7618
1da177e4
LT
7619 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
7620 return 0;
7621}
7622
7623static void __exit ata_exit(void)
7624{
33267325 7625 kfree(ata_force_tbl);
1da177e4 7626 destroy_workqueue(ata_wq);
453b07ac 7627 destroy_workqueue(ata_aux_wq);
1da177e4
LT
7628}
7629
a4625085 7630subsys_initcall(ata_init);
1da177e4
LT
7631module_exit(ata_exit);
7632
67846b30 7633static unsigned long ratelimit_time;
34af946a 7634static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
7635
7636int ata_ratelimit(void)
7637{
7638 int rc;
7639 unsigned long flags;
7640
7641 spin_lock_irqsave(&ata_ratelimit_lock, flags);
7642
7643 if (time_after(jiffies, ratelimit_time)) {
7644 rc = 1;
7645 ratelimit_time = jiffies + (HZ/5);
7646 } else
7647 rc = 0;
7648
7649 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
7650
7651 return rc;
7652}
7653
c22daff4
TH
7654/**
7655 * ata_wait_register - wait until register value changes
7656 * @reg: IO-mapped register
7657 * @mask: Mask to apply to read register value
7658 * @val: Wait condition
7659 * @interval_msec: polling interval in milliseconds
7660 * @timeout_msec: timeout in milliseconds
7661 *
7662 * Waiting for some bits of register to change is a common
7663 * operation for ATA controllers. This function reads 32bit LE
7664 * IO-mapped register @reg and tests for the following condition.
7665 *
7666 * (*@reg & mask) != val
7667 *
7668 * If the condition is met, it returns; otherwise, the process is
7669 * repeated after @interval_msec until timeout.
7670 *
7671 * LOCKING:
7672 * Kernel thread context (may sleep)
7673 *
7674 * RETURNS:
7675 * The final register value.
7676 */
7677u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
7678 unsigned long interval_msec,
7679 unsigned long timeout_msec)
7680{
7681 unsigned long timeout;
7682 u32 tmp;
7683
7684 tmp = ioread32(reg);
7685
7686 /* Calculate timeout _after_ the first read to make sure
7687 * preceding writes reach the controller before starting to
7688 * eat away the timeout.
7689 */
7690 timeout = jiffies + (timeout_msec * HZ) / 1000;
7691
7692 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
7693 msleep(interval_msec);
7694 tmp = ioread32(reg);
7695 }
7696
7697 return tmp;
7698}
7699
dd5b06c4
TH
7700/*
7701 * Dummy port_ops
7702 */
7703static void ata_dummy_noret(struct ata_port *ap) { }
7704static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
7705static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
7706
7707static u8 ata_dummy_check_status(struct ata_port *ap)
7708{
7709 return ATA_DRDY;
7710}
7711
7712static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7713{
7714 return AC_ERR_SYSTEM;
7715}
7716
7717const struct ata_port_operations ata_dummy_port_ops = {
dd5b06c4
TH
7718 .check_status = ata_dummy_check_status,
7719 .check_altstatus = ata_dummy_check_status,
7720 .dev_select = ata_noop_dev_select,
7721 .qc_prep = ata_noop_qc_prep,
7722 .qc_issue = ata_dummy_qc_issue,
7723 .freeze = ata_dummy_noret,
7724 .thaw = ata_dummy_noret,
7725 .error_handler = ata_dummy_noret,
7726 .post_internal_cmd = ata_dummy_qc_noret,
7727 .irq_clear = ata_dummy_noret,
7728 .port_start = ata_dummy_ret0,
7729 .port_stop = ata_dummy_noret,
7730};
7731
21b0ad4f
TH
7732const struct ata_port_info ata_dummy_port_info = {
7733 .port_ops = &ata_dummy_port_ops,
7734};
7735
1da177e4
LT
7736/*
7737 * libata is essentially a library of internal helper functions for
7738 * low-level ATA host controller drivers. As such, the API/ABI is
7739 * likely to change as new drivers are added and updated.
7740 * Do not depend on ABI/API stability.
7741 */
e9c83914
TH
7742EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7743EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7744EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 7745EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
21b0ad4f 7746EXPORT_SYMBOL_GPL(ata_dummy_port_info);
1da177e4
LT
7747EXPORT_SYMBOL_GPL(ata_std_bios_param);
7748EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 7749EXPORT_SYMBOL_GPL(ata_host_init);
f3187195 7750EXPORT_SYMBOL_GPL(ata_host_alloc);
f5cda257 7751EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
ecef7253 7752EXPORT_SYMBOL_GPL(ata_host_start);
f3187195 7753EXPORT_SYMBOL_GPL(ata_host_register);
f5cda257 7754EXPORT_SYMBOL_GPL(ata_host_activate);
0529c159 7755EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4 7756EXPORT_SYMBOL_GPL(ata_sg_init);
9a1004d0 7757EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 7758EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 7759EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 7760EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
7761EXPORT_SYMBOL_GPL(ata_tf_load);
7762EXPORT_SYMBOL_GPL(ata_tf_read);
7763EXPORT_SYMBOL_GPL(ata_noop_dev_select);
7764EXPORT_SYMBOL_GPL(ata_std_dev_select);
43727fbc 7765EXPORT_SYMBOL_GPL(sata_print_link_status);
1da177e4
LT
7766EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7767EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6357357c
TH
7768EXPORT_SYMBOL_GPL(ata_pack_xfermask);
7769EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
7770EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
7771EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
7772EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
7773EXPORT_SYMBOL_GPL(ata_mode_string);
7774EXPORT_SYMBOL_GPL(ata_id_xfermask);
1da177e4
LT
7775EXPORT_SYMBOL_GPL(ata_check_status);
7776EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
7777EXPORT_SYMBOL_GPL(ata_exec_command);
7778EXPORT_SYMBOL_GPL(ata_port_start);
d92e74d3 7779EXPORT_SYMBOL_GPL(ata_sff_port_start);
1da177e4 7780EXPORT_SYMBOL_GPL(ata_interrupt);
04351821 7781EXPORT_SYMBOL_GPL(ata_do_set_mode);
0d5ff566
TH
7782EXPORT_SYMBOL_GPL(ata_data_xfer);
7783EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
31cc23b3 7784EXPORT_SYMBOL_GPL(ata_std_qc_defer);
1da177e4 7785EXPORT_SYMBOL_GPL(ata_qc_prep);
d26fc955 7786EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
e46834cd 7787EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
7788EXPORT_SYMBOL_GPL(ata_bmdma_setup);
7789EXPORT_SYMBOL_GPL(ata_bmdma_start);
7790EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
7791EXPORT_SYMBOL_GPL(ata_bmdma_status);
7792EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
7793EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
7794EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
7795EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
7796EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
7797EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 7798EXPORT_SYMBOL_GPL(ata_port_probe);
10305f0f 7799EXPORT_SYMBOL_GPL(ata_dev_disable);
3c567b7d 7800EXPORT_SYMBOL_GPL(sata_set_spd);
936fd732
TH
7801EXPORT_SYMBOL_GPL(sata_link_debounce);
7802EXPORT_SYMBOL_GPL(sata_link_resume);
1da177e4 7803EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 7804EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 7805EXPORT_SYMBOL_GPL(ata_std_softreset);
cc0680a5 7806EXPORT_SYMBOL_GPL(sata_link_hardreset);
c2bd5804
TH
7807EXPORT_SYMBOL_GPL(sata_std_hardreset);
7808EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
7809EXPORT_SYMBOL_GPL(ata_dev_classify);
7810EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 7811EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 7812EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 7813EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 7814EXPORT_SYMBOL_GPL(ata_busy_sleep);
88ff6eaf 7815EXPORT_SYMBOL_GPL(ata_wait_after_reset);
d4b2bab4 7816EXPORT_SYMBOL_GPL(ata_wait_ready);
1da177e4
LT
7817EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
7818EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 7819EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 7820EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 7821EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4 7822EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
7823EXPORT_SYMBOL_GPL(sata_scr_valid);
7824EXPORT_SYMBOL_GPL(sata_scr_read);
7825EXPORT_SYMBOL_GPL(sata_scr_write);
7826EXPORT_SYMBOL_GPL(sata_scr_write_flush);
936fd732
TH
7827EXPORT_SYMBOL_GPL(ata_link_online);
7828EXPORT_SYMBOL_GPL(ata_link_offline);
6ffa01d8 7829#ifdef CONFIG_PM
cca3974e
JG
7830EXPORT_SYMBOL_GPL(ata_host_suspend);
7831EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 7832#endif /* CONFIG_PM */
6a62a04d
TH
7833EXPORT_SYMBOL_GPL(ata_id_string);
7834EXPORT_SYMBOL_GPL(ata_id_c_string);
1da177e4
LT
7835EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7836
1bc4ccff 7837EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6357357c 7838EXPORT_SYMBOL_GPL(ata_timing_find_mode);
452503f9
AC
7839EXPORT_SYMBOL_GPL(ata_timing_compute);
7840EXPORT_SYMBOL_GPL(ata_timing_merge);
a0f79b92 7841EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
452503f9 7842
1da177e4
LT
7843#ifdef CONFIG_PCI
7844EXPORT_SYMBOL_GPL(pci_test_config_bits);
d583bc18 7845EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
1626aeb8 7846EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
d583bc18 7847EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
4e6b79fa 7848EXPORT_SYMBOL_GPL(ata_pci_activate_sff_host);
1da177e4
LT
7849EXPORT_SYMBOL_GPL(ata_pci_init_one);
7850EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6ffa01d8 7851#ifdef CONFIG_PM
500530f6
TH
7852EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7853EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
7854EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7855EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 7856#endif /* CONFIG_PM */
67951ade
AC
7857EXPORT_SYMBOL_GPL(ata_pci_default_filter);
7858EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 7859#endif /* CONFIG_PCI */
9b847548 7860
31f88384 7861EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch);
3af9a77a
TH
7862EXPORT_SYMBOL_GPL(sata_pmp_std_prereset);
7863EXPORT_SYMBOL_GPL(sata_pmp_std_hardreset);
7864EXPORT_SYMBOL_GPL(sata_pmp_std_postreset);
7865EXPORT_SYMBOL_GPL(sata_pmp_do_eh);
7866
b64bbc39
TH
7867EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7868EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7869EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
cbcdd875
TH
7870EXPORT_SYMBOL_GPL(ata_port_desc);
7871#ifdef CONFIG_PCI
7872EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7873#endif /* CONFIG_PCI */
7b70fc03 7874EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
dbd82616 7875EXPORT_SYMBOL_GPL(ata_link_abort);
7b70fc03 7876EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499 7877EXPORT_SYMBOL_GPL(ata_port_freeze);
7d77b247 7878EXPORT_SYMBOL_GPL(sata_async_notification);
e3180499
TH
7879EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7880EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
7881EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7882EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 7883EXPORT_SYMBOL_GPL(ata_do_eh);
83625006 7884EXPORT_SYMBOL_GPL(ata_irq_on);
a619f981 7885EXPORT_SYMBOL_GPL(ata_dev_try_classify);
be0d18df
AC
7886
7887EXPORT_SYMBOL_GPL(ata_cable_40wire);
7888EXPORT_SYMBOL_GPL(ata_cable_80wire);
7889EXPORT_SYMBOL_GPL(ata_cable_unknown);
c88f90c3 7890EXPORT_SYMBOL_GPL(ata_cable_ignore);
be0d18df 7891EXPORT_SYMBOL_GPL(ata_cable_sata);