]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/ata/libata-core.c
i915: fix AR register restore.
[net-next-2.6.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
92c52c52
AC
33 * Standards documents from:
34 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
35 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
36 * http://www.sata-io.org (SATA)
37 * http://www.compactflash.org (CF)
38 * http://www.qic.org (QIC157 - Tape and DSC)
39 * http://www.ce-ata.org (CE-ATA: not supported)
40 *
1da177e4
LT
41 */
42
1da177e4
LT
43#include <linux/kernel.h>
44#include <linux/module.h>
45#include <linux/pci.h>
46#include <linux/init.h>
47#include <linux/list.h>
48#include <linux/mm.h>
49#include <linux/highmem.h>
50#include <linux/spinlock.h>
51#include <linux/blkdev.h>
52#include <linux/delay.h>
53#include <linux/timer.h>
54#include <linux/interrupt.h>
55#include <linux/completion.h>
56#include <linux/suspend.h>
57#include <linux/workqueue.h>
67846b30 58#include <linux/jiffies.h>
378f058c 59#include <linux/scatterlist.h>
2dcb407e 60#include <linux/io.h>
1da177e4 61#include <scsi/scsi.h>
193515d5 62#include <scsi/scsi_cmnd.h>
1da177e4
LT
63#include <scsi/scsi_host.h>
64#include <linux/libata.h>
1da177e4
LT
65#include <asm/semaphore.h>
66#include <asm/byteorder.h>
140b5e59 67#include <linux/cdrom.h>
1da177e4
LT
68
69#include "libata.h"
70
fda0efc5 71
d7bb4cc7 72/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
73const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
74const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
75const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 76
3373efd8
TH
77static unsigned int ata_dev_init_params(struct ata_device *dev,
78 u16 heads, u16 sectors);
79static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
218f3d30
JG
80static unsigned int ata_dev_set_feature(struct ata_device *dev,
81 u8 enable, u8 feature);
3373efd8 82static void ata_dev_xfermask(struct ata_device *dev);
75683fe7 83static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
1da177e4 84
f3187195 85unsigned int ata_print_id = 1;
1da177e4
LT
86static struct workqueue_struct *ata_wq;
87
453b07ac
TH
88struct workqueue_struct *ata_aux_wq;
89
33267325
TH
90struct ata_force_param {
91 const char *name;
92 unsigned int cbl;
93 int spd_limit;
94 unsigned long xfer_mask;
95 unsigned int horkage_on;
96 unsigned int horkage_off;
97};
98
99struct ata_force_ent {
100 int port;
101 int device;
102 struct ata_force_param param;
103};
104
105static struct ata_force_ent *ata_force_tbl;
106static int ata_force_tbl_size;
107
108static char ata_force_param_buf[PAGE_SIZE] __initdata;
109module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0444);
110MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
111
418dc1f5 112int atapi_enabled = 1;
1623c81e
JG
113module_param(atapi_enabled, int, 0444);
114MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
115
95de719a
AL
116int atapi_dmadir = 0;
117module_param(atapi_dmadir, int, 0444);
118MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
119
baf4fdfa
ML
120int atapi_passthru16 = 1;
121module_param(atapi_passthru16, int, 0444);
122MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
123
c3c013a2
JG
124int libata_fua = 0;
125module_param_named(fua, libata_fua, int, 0444);
126MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
127
2dcb407e 128static int ata_ignore_hpa;
1e999736
AC
129module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
130MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
131
b3a70601
AC
132static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
133module_param_named(dma, libata_dma_mask, int, 0444);
134MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
135
a8601e5f
AM
136static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
137module_param(ata_probe_timeout, int, 0444);
138MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
139
6ebe9d86 140int libata_noacpi = 0;
d7d0dad6 141module_param_named(noacpi, libata_noacpi, int, 0444);
6ebe9d86 142MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
11ef697b 143
ae8d4ee7
AC
144int libata_allow_tpm = 0;
145module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
146MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands");
147
1da177e4
LT
148MODULE_AUTHOR("Jeff Garzik");
149MODULE_DESCRIPTION("Library module for ATA devices");
150MODULE_LICENSE("GPL");
151MODULE_VERSION(DRV_VERSION);
152
0baab86b 153
33267325
TH
154/**
155 * ata_force_cbl - force cable type according to libata.force
156 * @link: ATA link of interest
157 *
158 * Force cable type according to libata.force and whine about it.
159 * The last entry which has matching port number is used, so it
160 * can be specified as part of device force parameters. For
161 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
162 * same effect.
163 *
164 * LOCKING:
165 * EH context.
166 */
167void ata_force_cbl(struct ata_port *ap)
168{
169 int i;
170
171 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
172 const struct ata_force_ent *fe = &ata_force_tbl[i];
173
174 if (fe->port != -1 && fe->port != ap->print_id)
175 continue;
176
177 if (fe->param.cbl == ATA_CBL_NONE)
178 continue;
179
180 ap->cbl = fe->param.cbl;
181 ata_port_printk(ap, KERN_NOTICE,
182 "FORCE: cable set to %s\n", fe->param.name);
183 return;
184 }
185}
186
187/**
188 * ata_force_spd_limit - force SATA spd limit according to libata.force
189 * @link: ATA link of interest
190 *
191 * Force SATA spd limit according to libata.force and whine about
192 * it. When only the port part is specified (e.g. 1:), the limit
193 * applies to all links connected to both the host link and all
194 * fan-out ports connected via PMP. If the device part is
195 * specified as 0 (e.g. 1.00:), it specifies the first fan-out
196 * link not the host link. Device number 15 always points to the
197 * host link whether PMP is attached or not.
198 *
199 * LOCKING:
200 * EH context.
201 */
202static void ata_force_spd_limit(struct ata_link *link)
203{
204 int linkno, i;
205
206 if (ata_is_host_link(link))
207 linkno = 15;
208 else
209 linkno = link->pmp;
210
211 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
212 const struct ata_force_ent *fe = &ata_force_tbl[i];
213
214 if (fe->port != -1 && fe->port != link->ap->print_id)
215 continue;
216
217 if (fe->device != -1 && fe->device != linkno)
218 continue;
219
220 if (!fe->param.spd_limit)
221 continue;
222
223 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
224 ata_link_printk(link, KERN_NOTICE,
225 "FORCE: PHY spd limit set to %s\n", fe->param.name);
226 return;
227 }
228}
229
230/**
231 * ata_force_xfermask - force xfermask according to libata.force
232 * @dev: ATA device of interest
233 *
234 * Force xfer_mask according to libata.force and whine about it.
235 * For consistency with link selection, device number 15 selects
236 * the first device connected to the host link.
237 *
238 * LOCKING:
239 * EH context.
240 */
241static void ata_force_xfermask(struct ata_device *dev)
242{
243 int devno = dev->link->pmp + dev->devno;
244 int alt_devno = devno;
245 int i;
246
247 /* allow n.15 for the first device attached to host port */
248 if (ata_is_host_link(dev->link) && devno == 0)
249 alt_devno = 15;
250
251 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
252 const struct ata_force_ent *fe = &ata_force_tbl[i];
253 unsigned long pio_mask, mwdma_mask, udma_mask;
254
255 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
256 continue;
257
258 if (fe->device != -1 && fe->device != devno &&
259 fe->device != alt_devno)
260 continue;
261
262 if (!fe->param.xfer_mask)
263 continue;
264
265 ata_unpack_xfermask(fe->param.xfer_mask,
266 &pio_mask, &mwdma_mask, &udma_mask);
267 if (udma_mask)
268 dev->udma_mask = udma_mask;
269 else if (mwdma_mask) {
270 dev->udma_mask = 0;
271 dev->mwdma_mask = mwdma_mask;
272 } else {
273 dev->udma_mask = 0;
274 dev->mwdma_mask = 0;
275 dev->pio_mask = pio_mask;
276 }
277
278 ata_dev_printk(dev, KERN_NOTICE,
279 "FORCE: xfer_mask set to %s\n", fe->param.name);
280 return;
281 }
282}
283
284/**
285 * ata_force_horkage - force horkage according to libata.force
286 * @dev: ATA device of interest
287 *
288 * Force horkage according to libata.force and whine about it.
289 * For consistency with link selection, device number 15 selects
290 * the first device connected to the host link.
291 *
292 * LOCKING:
293 * EH context.
294 */
295static void ata_force_horkage(struct ata_device *dev)
296{
297 int devno = dev->link->pmp + dev->devno;
298 int alt_devno = devno;
299 int i;
300
301 /* allow n.15 for the first device attached to host port */
302 if (ata_is_host_link(dev->link) && devno == 0)
303 alt_devno = 15;
304
305 for (i = 0; i < ata_force_tbl_size; i++) {
306 const struct ata_force_ent *fe = &ata_force_tbl[i];
307
308 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
309 continue;
310
311 if (fe->device != -1 && fe->device != devno &&
312 fe->device != alt_devno)
313 continue;
314
315 if (!(~dev->horkage & fe->param.horkage_on) &&
316 !(dev->horkage & fe->param.horkage_off))
317 continue;
318
319 dev->horkage |= fe->param.horkage_on;
320 dev->horkage &= ~fe->param.horkage_off;
321
322 ata_dev_printk(dev, KERN_NOTICE,
323 "FORCE: horkage modified (%s)\n", fe->param.name);
324 }
325}
326
1da177e4
LT
327/**
328 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
329 * @tf: Taskfile to convert
1da177e4 330 * @pmp: Port multiplier port
9977126c
TH
331 * @is_cmd: This FIS is for command
332 * @fis: Buffer into which data will output
1da177e4
LT
333 *
334 * Converts a standard ATA taskfile to a Serial ATA
335 * FIS structure (Register - Host to Device).
336 *
337 * LOCKING:
338 * Inherited from caller.
339 */
9977126c 340void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
1da177e4 341{
9977126c
TH
342 fis[0] = 0x27; /* Register - Host to Device FIS */
343 fis[1] = pmp & 0xf; /* Port multiplier number*/
344 if (is_cmd)
345 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
346
1da177e4
LT
347 fis[2] = tf->command;
348 fis[3] = tf->feature;
349
350 fis[4] = tf->lbal;
351 fis[5] = tf->lbam;
352 fis[6] = tf->lbah;
353 fis[7] = tf->device;
354
355 fis[8] = tf->hob_lbal;
356 fis[9] = tf->hob_lbam;
357 fis[10] = tf->hob_lbah;
358 fis[11] = tf->hob_feature;
359
360 fis[12] = tf->nsect;
361 fis[13] = tf->hob_nsect;
362 fis[14] = 0;
363 fis[15] = tf->ctl;
364
365 fis[16] = 0;
366 fis[17] = 0;
367 fis[18] = 0;
368 fis[19] = 0;
369}
370
371/**
372 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
373 * @fis: Buffer from which data will be input
374 * @tf: Taskfile to output
375 *
e12a1be6 376 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
377 *
378 * LOCKING:
379 * Inherited from caller.
380 */
381
057ace5e 382void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
383{
384 tf->command = fis[2]; /* status */
385 tf->feature = fis[3]; /* error */
386
387 tf->lbal = fis[4];
388 tf->lbam = fis[5];
389 tf->lbah = fis[6];
390 tf->device = fis[7];
391
392 tf->hob_lbal = fis[8];
393 tf->hob_lbam = fis[9];
394 tf->hob_lbah = fis[10];
395
396 tf->nsect = fis[12];
397 tf->hob_nsect = fis[13];
398}
399
8cbd6df1
AL
400static const u8 ata_rw_cmds[] = {
401 /* pio multi */
402 ATA_CMD_READ_MULTI,
403 ATA_CMD_WRITE_MULTI,
404 ATA_CMD_READ_MULTI_EXT,
405 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
406 0,
407 0,
408 0,
409 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
410 /* pio */
411 ATA_CMD_PIO_READ,
412 ATA_CMD_PIO_WRITE,
413 ATA_CMD_PIO_READ_EXT,
414 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
415 0,
416 0,
417 0,
418 0,
8cbd6df1
AL
419 /* dma */
420 ATA_CMD_READ,
421 ATA_CMD_WRITE,
422 ATA_CMD_READ_EXT,
9a3dccc4
TH
423 ATA_CMD_WRITE_EXT,
424 0,
425 0,
426 0,
427 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 428};
1da177e4
LT
429
430/**
8cbd6df1 431 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
432 * @tf: command to examine and configure
433 * @dev: device tf belongs to
1da177e4 434 *
2e9edbf8 435 * Examine the device configuration and tf->flags to calculate
8cbd6df1 436 * the proper read/write commands and protocol to use.
1da177e4
LT
437 *
438 * LOCKING:
439 * caller.
440 */
bd056d7e 441static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 442{
9a3dccc4 443 u8 cmd;
1da177e4 444
9a3dccc4 445 int index, fua, lba48, write;
2e9edbf8 446
9a3dccc4 447 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
448 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
449 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 450
8cbd6df1
AL
451 if (dev->flags & ATA_DFLAG_PIO) {
452 tf->protocol = ATA_PROT_PIO;
9a3dccc4 453 index = dev->multi_count ? 0 : 8;
9af5c9c9 454 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
455 /* Unable to use DMA due to host limitation */
456 tf->protocol = ATA_PROT_PIO;
0565c26d 457 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
458 } else {
459 tf->protocol = ATA_PROT_DMA;
9a3dccc4 460 index = 16;
8cbd6df1 461 }
1da177e4 462
9a3dccc4
TH
463 cmd = ata_rw_cmds[index + fua + lba48 + write];
464 if (cmd) {
465 tf->command = cmd;
466 return 0;
467 }
468 return -1;
1da177e4
LT
469}
470
35b649fe
TH
471/**
472 * ata_tf_read_block - Read block address from ATA taskfile
473 * @tf: ATA taskfile of interest
474 * @dev: ATA device @tf belongs to
475 *
476 * LOCKING:
477 * None.
478 *
479 * Read block address from @tf. This function can handle all
480 * three address formats - LBA, LBA48 and CHS. tf->protocol and
481 * flags select the address format to use.
482 *
483 * RETURNS:
484 * Block address read from @tf.
485 */
486u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
487{
488 u64 block = 0;
489
490 if (tf->flags & ATA_TFLAG_LBA) {
491 if (tf->flags & ATA_TFLAG_LBA48) {
492 block |= (u64)tf->hob_lbah << 40;
493 block |= (u64)tf->hob_lbam << 32;
494 block |= tf->hob_lbal << 24;
495 } else
496 block |= (tf->device & 0xf) << 24;
497
498 block |= tf->lbah << 16;
499 block |= tf->lbam << 8;
500 block |= tf->lbal;
501 } else {
502 u32 cyl, head, sect;
503
504 cyl = tf->lbam | (tf->lbah << 8);
505 head = tf->device & 0xf;
506 sect = tf->lbal;
507
508 block = (cyl * dev->heads + head) * dev->sectors + sect;
509 }
510
511 return block;
512}
513
bd056d7e
TH
514/**
515 * ata_build_rw_tf - Build ATA taskfile for given read/write request
516 * @tf: Target ATA taskfile
517 * @dev: ATA device @tf belongs to
518 * @block: Block address
519 * @n_block: Number of blocks
520 * @tf_flags: RW/FUA etc...
521 * @tag: tag
522 *
523 * LOCKING:
524 * None.
525 *
526 * Build ATA taskfile @tf for read/write request described by
527 * @block, @n_block, @tf_flags and @tag on @dev.
528 *
529 * RETURNS:
530 *
531 * 0 on success, -ERANGE if the request is too large for @dev,
532 * -EINVAL if the request is invalid.
533 */
534int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
535 u64 block, u32 n_block, unsigned int tf_flags,
536 unsigned int tag)
537{
538 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
539 tf->flags |= tf_flags;
540
6d1245bf 541 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
542 /* yay, NCQ */
543 if (!lba_48_ok(block, n_block))
544 return -ERANGE;
545
546 tf->protocol = ATA_PROT_NCQ;
547 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
548
549 if (tf->flags & ATA_TFLAG_WRITE)
550 tf->command = ATA_CMD_FPDMA_WRITE;
551 else
552 tf->command = ATA_CMD_FPDMA_READ;
553
554 tf->nsect = tag << 3;
555 tf->hob_feature = (n_block >> 8) & 0xff;
556 tf->feature = n_block & 0xff;
557
558 tf->hob_lbah = (block >> 40) & 0xff;
559 tf->hob_lbam = (block >> 32) & 0xff;
560 tf->hob_lbal = (block >> 24) & 0xff;
561 tf->lbah = (block >> 16) & 0xff;
562 tf->lbam = (block >> 8) & 0xff;
563 tf->lbal = block & 0xff;
564
565 tf->device = 1 << 6;
566 if (tf->flags & ATA_TFLAG_FUA)
567 tf->device |= 1 << 7;
568 } else if (dev->flags & ATA_DFLAG_LBA) {
569 tf->flags |= ATA_TFLAG_LBA;
570
571 if (lba_28_ok(block, n_block)) {
572 /* use LBA28 */
573 tf->device |= (block >> 24) & 0xf;
574 } else if (lba_48_ok(block, n_block)) {
575 if (!(dev->flags & ATA_DFLAG_LBA48))
576 return -ERANGE;
577
578 /* use LBA48 */
579 tf->flags |= ATA_TFLAG_LBA48;
580
581 tf->hob_nsect = (n_block >> 8) & 0xff;
582
583 tf->hob_lbah = (block >> 40) & 0xff;
584 tf->hob_lbam = (block >> 32) & 0xff;
585 tf->hob_lbal = (block >> 24) & 0xff;
586 } else
587 /* request too large even for LBA48 */
588 return -ERANGE;
589
590 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
591 return -EINVAL;
592
593 tf->nsect = n_block & 0xff;
594
595 tf->lbah = (block >> 16) & 0xff;
596 tf->lbam = (block >> 8) & 0xff;
597 tf->lbal = block & 0xff;
598
599 tf->device |= ATA_LBA;
600 } else {
601 /* CHS */
602 u32 sect, head, cyl, track;
603
604 /* The request -may- be too large for CHS addressing. */
605 if (!lba_28_ok(block, n_block))
606 return -ERANGE;
607
608 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
609 return -EINVAL;
610
611 /* Convert LBA to CHS */
612 track = (u32)block / dev->sectors;
613 cyl = track / dev->heads;
614 head = track % dev->heads;
615 sect = (u32)block % dev->sectors + 1;
616
617 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
618 (u32)block, track, cyl, head, sect);
619
620 /* Check whether the converted CHS can fit.
621 Cylinder: 0-65535
622 Head: 0-15
623 Sector: 1-255*/
624 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
625 return -ERANGE;
626
627 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
628 tf->lbal = sect;
629 tf->lbam = cyl;
630 tf->lbah = cyl >> 8;
631 tf->device |= head;
632 }
633
634 return 0;
635}
636
cb95d562
TH
637/**
638 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
639 * @pio_mask: pio_mask
640 * @mwdma_mask: mwdma_mask
641 * @udma_mask: udma_mask
642 *
643 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
644 * unsigned int xfer_mask.
645 *
646 * LOCKING:
647 * None.
648 *
649 * RETURNS:
650 * Packed xfer_mask.
651 */
7dc951ae
TH
652unsigned long ata_pack_xfermask(unsigned long pio_mask,
653 unsigned long mwdma_mask,
654 unsigned long udma_mask)
cb95d562
TH
655{
656 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
657 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
658 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
659}
660
c0489e4e
TH
661/**
662 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
663 * @xfer_mask: xfer_mask to unpack
664 * @pio_mask: resulting pio_mask
665 * @mwdma_mask: resulting mwdma_mask
666 * @udma_mask: resulting udma_mask
667 *
668 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
669 * Any NULL distination masks will be ignored.
670 */
7dc951ae
TH
671void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
672 unsigned long *mwdma_mask, unsigned long *udma_mask)
c0489e4e
TH
673{
674 if (pio_mask)
675 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
676 if (mwdma_mask)
677 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
678 if (udma_mask)
679 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
680}
681
cb95d562 682static const struct ata_xfer_ent {
be9a50c8 683 int shift, bits;
cb95d562
TH
684 u8 base;
685} ata_xfer_tbl[] = {
70cd071e
TH
686 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
687 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
688 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
cb95d562
TH
689 { -1, },
690};
691
692/**
693 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
694 * @xfer_mask: xfer_mask of interest
695 *
696 * Return matching XFER_* value for @xfer_mask. Only the highest
697 * bit of @xfer_mask is considered.
698 *
699 * LOCKING:
700 * None.
701 *
702 * RETURNS:
70cd071e 703 * Matching XFER_* value, 0xff if no match found.
cb95d562 704 */
7dc951ae 705u8 ata_xfer_mask2mode(unsigned long xfer_mask)
cb95d562
TH
706{
707 int highbit = fls(xfer_mask) - 1;
708 const struct ata_xfer_ent *ent;
709
710 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
711 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
712 return ent->base + highbit - ent->shift;
70cd071e 713 return 0xff;
cb95d562
TH
714}
715
716/**
717 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
718 * @xfer_mode: XFER_* of interest
719 *
720 * Return matching xfer_mask for @xfer_mode.
721 *
722 * LOCKING:
723 * None.
724 *
725 * RETURNS:
726 * Matching xfer_mask, 0 if no match found.
727 */
7dc951ae 728unsigned long ata_xfer_mode2mask(u8 xfer_mode)
cb95d562
TH
729{
730 const struct ata_xfer_ent *ent;
731
732 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
733 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
70cd071e
TH
734 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
735 & ~((1 << ent->shift) - 1);
cb95d562
TH
736 return 0;
737}
738
739/**
740 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
741 * @xfer_mode: XFER_* of interest
742 *
743 * Return matching xfer_shift for @xfer_mode.
744 *
745 * LOCKING:
746 * None.
747 *
748 * RETURNS:
749 * Matching xfer_shift, -1 if no match found.
750 */
7dc951ae 751int ata_xfer_mode2shift(unsigned long xfer_mode)
cb95d562
TH
752{
753 const struct ata_xfer_ent *ent;
754
755 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
756 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
757 return ent->shift;
758 return -1;
759}
760
1da177e4 761/**
1da7b0d0
TH
762 * ata_mode_string - convert xfer_mask to string
763 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
764 *
765 * Determine string which represents the highest speed
1da7b0d0 766 * (highest bit in @modemask).
1da177e4
LT
767 *
768 * LOCKING:
769 * None.
770 *
771 * RETURNS:
772 * Constant C string representing highest speed listed in
1da7b0d0 773 * @mode_mask, or the constant C string "<n/a>".
1da177e4 774 */
7dc951ae 775const char *ata_mode_string(unsigned long xfer_mask)
1da177e4 776{
75f554bc
TH
777 static const char * const xfer_mode_str[] = {
778 "PIO0",
779 "PIO1",
780 "PIO2",
781 "PIO3",
782 "PIO4",
b352e57d
AC
783 "PIO5",
784 "PIO6",
75f554bc
TH
785 "MWDMA0",
786 "MWDMA1",
787 "MWDMA2",
b352e57d
AC
788 "MWDMA3",
789 "MWDMA4",
75f554bc
TH
790 "UDMA/16",
791 "UDMA/25",
792 "UDMA/33",
793 "UDMA/44",
794 "UDMA/66",
795 "UDMA/100",
796 "UDMA/133",
797 "UDMA7",
798 };
1da7b0d0 799 int highbit;
1da177e4 800
1da7b0d0
TH
801 highbit = fls(xfer_mask) - 1;
802 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
803 return xfer_mode_str[highbit];
1da177e4 804 return "<n/a>";
1da177e4
LT
805}
806
4c360c81
TH
807static const char *sata_spd_string(unsigned int spd)
808{
809 static const char * const spd_str[] = {
810 "1.5 Gbps",
811 "3.0 Gbps",
812 };
813
814 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
815 return "<unknown>";
816 return spd_str[spd - 1];
817}
818
3373efd8 819void ata_dev_disable(struct ata_device *dev)
0b8efb0a 820{
09d7f9b0 821 if (ata_dev_enabled(dev)) {
9af5c9c9 822 if (ata_msg_drv(dev->link->ap))
09d7f9b0 823 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
562f0c2d 824 ata_acpi_on_disable(dev);
4ae72a1e
TH
825 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
826 ATA_DNXFER_QUIET);
0b8efb0a
TH
827 dev->class++;
828 }
829}
830
ca77329f
KCA
831static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
832{
833 struct ata_link *link = dev->link;
834 struct ata_port *ap = link->ap;
835 u32 scontrol;
836 unsigned int err_mask;
837 int rc;
838
839 /*
840 * disallow DIPM for drivers which haven't set
841 * ATA_FLAG_IPM. This is because when DIPM is enabled,
842 * phy ready will be set in the interrupt status on
843 * state changes, which will cause some drivers to
844 * think there are errors - additionally drivers will
845 * need to disable hot plug.
846 */
847 if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
848 ap->pm_policy = NOT_AVAILABLE;
849 return -EINVAL;
850 }
851
852 /*
853 * For DIPM, we will only enable it for the
854 * min_power setting.
855 *
856 * Why? Because Disks are too stupid to know that
857 * If the host rejects a request to go to SLUMBER
858 * they should retry at PARTIAL, and instead it
859 * just would give up. So, for medium_power to
860 * work at all, we need to only allow HIPM.
861 */
862 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
863 if (rc)
864 return rc;
865
866 switch (policy) {
867 case MIN_POWER:
868 /* no restrictions on IPM transitions */
869 scontrol &= ~(0x3 << 8);
870 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
871 if (rc)
872 return rc;
873
874 /* enable DIPM */
875 if (dev->flags & ATA_DFLAG_DIPM)
876 err_mask = ata_dev_set_feature(dev,
877 SETFEATURES_SATA_ENABLE, SATA_DIPM);
878 break;
879 case MEDIUM_POWER:
880 /* allow IPM to PARTIAL */
881 scontrol &= ~(0x1 << 8);
882 scontrol |= (0x2 << 8);
883 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
884 if (rc)
885 return rc;
886
f5456b63
KCA
887 /*
888 * we don't have to disable DIPM since IPM flags
889 * disallow transitions to SLUMBER, which effectively
890 * disable DIPM if it does not support PARTIAL
891 */
ca77329f
KCA
892 break;
893 case NOT_AVAILABLE:
894 case MAX_PERFORMANCE:
895 /* disable all IPM transitions */
896 scontrol |= (0x3 << 8);
897 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
898 if (rc)
899 return rc;
900
f5456b63
KCA
901 /*
902 * we don't have to disable DIPM since IPM flags
903 * disallow all transitions which effectively
904 * disable DIPM anyway.
905 */
ca77329f
KCA
906 break;
907 }
908
909 /* FIXME: handle SET FEATURES failure */
910 (void) err_mask;
911
912 return 0;
913}
914
915/**
916 * ata_dev_enable_pm - enable SATA interface power management
48166fd9
SH
917 * @dev: device to enable power management
918 * @policy: the link power management policy
ca77329f
KCA
919 *
920 * Enable SATA Interface power management. This will enable
921 * Device Interface Power Management (DIPM) for min_power
922 * policy, and then call driver specific callbacks for
923 * enabling Host Initiated Power management.
924 *
925 * Locking: Caller.
926 * Returns: -EINVAL if IPM is not supported, 0 otherwise.
927 */
928void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
929{
930 int rc = 0;
931 struct ata_port *ap = dev->link->ap;
932
933 /* set HIPM first, then DIPM */
934 if (ap->ops->enable_pm)
935 rc = ap->ops->enable_pm(ap, policy);
936 if (rc)
937 goto enable_pm_out;
938 rc = ata_dev_set_dipm(dev, policy);
939
940enable_pm_out:
941 if (rc)
942 ap->pm_policy = MAX_PERFORMANCE;
943 else
944 ap->pm_policy = policy;
945 return /* rc */; /* hopefully we can use 'rc' eventually */
946}
947
1992a5ed 948#ifdef CONFIG_PM
ca77329f
KCA
949/**
950 * ata_dev_disable_pm - disable SATA interface power management
48166fd9 951 * @dev: device to disable power management
ca77329f
KCA
952 *
953 * Disable SATA Interface power management. This will disable
954 * Device Interface Power Management (DIPM) without changing
955 * policy, call driver specific callbacks for disabling Host
956 * Initiated Power management.
957 *
958 * Locking: Caller.
959 * Returns: void
960 */
961static void ata_dev_disable_pm(struct ata_device *dev)
962{
963 struct ata_port *ap = dev->link->ap;
964
965 ata_dev_set_dipm(dev, MAX_PERFORMANCE);
966 if (ap->ops->disable_pm)
967 ap->ops->disable_pm(ap);
968}
1992a5ed 969#endif /* CONFIG_PM */
ca77329f
KCA
970
971void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
972{
973 ap->pm_policy = policy;
974 ap->link.eh_info.action |= ATA_EHI_LPM;
975 ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
976 ata_port_schedule_eh(ap);
977}
978
1992a5ed 979#ifdef CONFIG_PM
ca77329f
KCA
980static void ata_lpm_enable(struct ata_host *host)
981{
982 struct ata_link *link;
983 struct ata_port *ap;
984 struct ata_device *dev;
985 int i;
986
987 for (i = 0; i < host->n_ports; i++) {
988 ap = host->ports[i];
989 ata_port_for_each_link(link, ap) {
990 ata_link_for_each_dev(dev, link)
991 ata_dev_disable_pm(dev);
992 }
993 }
994}
995
996static void ata_lpm_disable(struct ata_host *host)
997{
998 int i;
999
1000 for (i = 0; i < host->n_ports; i++) {
1001 struct ata_port *ap = host->ports[i];
1002 ata_lpm_schedule(ap, ap->pm_policy);
1003 }
1004}
1992a5ed 1005#endif /* CONFIG_PM */
ca77329f
KCA
1006
1007
1da177e4 1008/**
0d5ff566 1009 * ata_devchk - PATA device presence detection
1da177e4
LT
1010 * @ap: ATA channel to examine
1011 * @device: Device to examine (starting at zero)
1012 *
1013 * This technique was originally described in
1014 * Hale Landis's ATADRVR (www.ata-atapi.com), and
1015 * later found its way into the ATA/ATAPI spec.
1016 *
1017 * Write a pattern to the ATA shadow registers,
1018 * and if a device is present, it will respond by
1019 * correctly storing and echoing back the
1020 * ATA shadow register contents.
1021 *
1022 * LOCKING:
1023 * caller.
1024 */
1025
0d5ff566 1026static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1da177e4
LT
1027{
1028 struct ata_ioports *ioaddr = &ap->ioaddr;
1029 u8 nsect, lbal;
1030
1031 ap->ops->dev_select(ap, device);
1032
0d5ff566
TH
1033 iowrite8(0x55, ioaddr->nsect_addr);
1034 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 1035
0d5ff566
TH
1036 iowrite8(0xaa, ioaddr->nsect_addr);
1037 iowrite8(0x55, ioaddr->lbal_addr);
1da177e4 1038
0d5ff566
TH
1039 iowrite8(0x55, ioaddr->nsect_addr);
1040 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 1041
0d5ff566
TH
1042 nsect = ioread8(ioaddr->nsect_addr);
1043 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
1044
1045 if ((nsect == 0x55) && (lbal == 0xaa))
1046 return 1; /* we found a device */
1047
1048 return 0; /* nothing found */
1049}
1050
1da177e4
LT
1051/**
1052 * ata_dev_classify - determine device type based on ATA-spec signature
1053 * @tf: ATA taskfile register set for device to be identified
1054 *
1055 * Determine from taskfile register contents whether a device is
1056 * ATA or ATAPI, as per "Signature and persistence" section
1057 * of ATA/PI spec (volume 1, sect 5.14).
1058 *
1059 * LOCKING:
1060 * None.
1061 *
1062 * RETURNS:
633273a3
TH
1063 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
1064 * %ATA_DEV_UNKNOWN the event of failure.
1da177e4 1065 */
057ace5e 1066unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
1067{
1068 /* Apple's open source Darwin code hints that some devices only
1069 * put a proper signature into the LBA mid/high registers,
1070 * So, we only check those. It's sufficient for uniqueness.
633273a3
TH
1071 *
1072 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1073 * signatures for ATA and ATAPI devices attached on SerialATA,
1074 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
1075 * spec has never mentioned about using different signatures
1076 * for ATA/ATAPI devices. Then, Serial ATA II: Port
1077 * Multiplier specification began to use 0x69/0x96 to identify
1078 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1079 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1080 * 0x69/0x96 shortly and described them as reserved for
1081 * SerialATA.
1082 *
1083 * We follow the current spec and consider that 0x69/0x96
1084 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1da177e4 1085 */
633273a3 1086 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1da177e4
LT
1087 DPRINTK("found ATA device by sig\n");
1088 return ATA_DEV_ATA;
1089 }
1090
633273a3 1091 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1da177e4
LT
1092 DPRINTK("found ATAPI device by sig\n");
1093 return ATA_DEV_ATAPI;
1094 }
1095
633273a3
TH
1096 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1097 DPRINTK("found PMP device by sig\n");
1098 return ATA_DEV_PMP;
1099 }
1100
1101 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
2dcb407e 1102 printk(KERN_INFO "ata: SEMB device ignored\n");
633273a3
TH
1103 return ATA_DEV_SEMB_UNSUP; /* not yet */
1104 }
1105
1da177e4
LT
1106 DPRINTK("unknown device\n");
1107 return ATA_DEV_UNKNOWN;
1108}
1109
1110/**
1111 * ata_dev_try_classify - Parse returned ATA device signature
3f19859e
TH
1112 * @dev: ATA device to classify (starting at zero)
1113 * @present: device seems present
b4dc7623 1114 * @r_err: Value of error register on completion
1da177e4
LT
1115 *
1116 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
1117 * an ATA/ATAPI-defined set of values is placed in the ATA
1118 * shadow registers, indicating the results of device detection
1119 * and diagnostics.
1120 *
1121 * Select the ATA device, and read the values from the ATA shadow
1122 * registers. Then parse according to the Error register value,
1123 * and the spec-defined values examined by ata_dev_classify().
1124 *
1125 * LOCKING:
1126 * caller.
b4dc7623
TH
1127 *
1128 * RETURNS:
1129 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4 1130 */
3f19859e
TH
1131unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
1132 u8 *r_err)
1da177e4 1133{
3f19859e 1134 struct ata_port *ap = dev->link->ap;
1da177e4
LT
1135 struct ata_taskfile tf;
1136 unsigned int class;
1137 u8 err;
1138
3f19859e 1139 ap->ops->dev_select(ap, dev->devno);
1da177e4
LT
1140
1141 memset(&tf, 0, sizeof(tf));
1142
1da177e4 1143 ap->ops->tf_read(ap, &tf);
0169e284 1144 err = tf.feature;
b4dc7623
TH
1145 if (r_err)
1146 *r_err = err;
1da177e4 1147
c5038fc0
AC
1148 /* see if device passed diags: continue and warn later */
1149 if (err == 0)
93590859 1150 /* diagnostic fail : do nothing _YET_ */
3f19859e 1151 dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
93590859 1152 else if (err == 1)
1da177e4 1153 /* do nothing */ ;
3f19859e 1154 else if ((dev->devno == 0) && (err == 0x81))
1da177e4
LT
1155 /* do nothing */ ;
1156 else
b4dc7623 1157 return ATA_DEV_NONE;
1da177e4 1158
b4dc7623 1159 /* determine if device is ATA or ATAPI */
1da177e4 1160 class = ata_dev_classify(&tf);
b4dc7623 1161
d7fbee05
TH
1162 if (class == ATA_DEV_UNKNOWN) {
1163 /* If the device failed diagnostic, it's likely to
1164 * have reported incorrect device signature too.
1165 * Assume ATA device if the device seems present but
1166 * device signature is invalid with diagnostic
1167 * failure.
1168 */
1169 if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
1170 class = ATA_DEV_ATA;
1171 else
1172 class = ATA_DEV_NONE;
1173 } else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
1174 class = ATA_DEV_NONE;
1175
b4dc7623 1176 return class;
1da177e4
LT
1177}
1178
1179/**
6a62a04d 1180 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
1181 * @id: IDENTIFY DEVICE results we will examine
1182 * @s: string into which data is output
1183 * @ofs: offset into identify device page
1184 * @len: length of string to return. must be an even number.
1185 *
1186 * The strings in the IDENTIFY DEVICE page are broken up into
1187 * 16-bit chunks. Run through the string, and output each
1188 * 8-bit chunk linearly, regardless of platform.
1189 *
1190 * LOCKING:
1191 * caller.
1192 */
1193
6a62a04d
TH
1194void ata_id_string(const u16 *id, unsigned char *s,
1195 unsigned int ofs, unsigned int len)
1da177e4
LT
1196{
1197 unsigned int c;
1198
1199 while (len > 0) {
1200 c = id[ofs] >> 8;
1201 *s = c;
1202 s++;
1203
1204 c = id[ofs] & 0xff;
1205 *s = c;
1206 s++;
1207
1208 ofs++;
1209 len -= 2;
1210 }
1211}
1212
0e949ff3 1213/**
6a62a04d 1214 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
1215 * @id: IDENTIFY DEVICE results we will examine
1216 * @s: string into which data is output
1217 * @ofs: offset into identify device page
1218 * @len: length of string to return. must be an odd number.
1219 *
6a62a04d 1220 * This function is identical to ata_id_string except that it
0e949ff3
TH
1221 * trims trailing spaces and terminates the resulting string with
1222 * null. @len must be actual maximum length (even number) + 1.
1223 *
1224 * LOCKING:
1225 * caller.
1226 */
6a62a04d
TH
1227void ata_id_c_string(const u16 *id, unsigned char *s,
1228 unsigned int ofs, unsigned int len)
0e949ff3
TH
1229{
1230 unsigned char *p;
1231
1232 WARN_ON(!(len & 1));
1233
6a62a04d 1234 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
1235
1236 p = s + strnlen(s, len - 1);
1237 while (p > s && p[-1] == ' ')
1238 p--;
1239 *p = '\0';
1240}
0baab86b 1241
db6f8759
TH
1242static u64 ata_id_n_sectors(const u16 *id)
1243{
1244 if (ata_id_has_lba(id)) {
1245 if (ata_id_has_lba48(id))
1246 return ata_id_u64(id, 100);
1247 else
1248 return ata_id_u32(id, 60);
1249 } else {
1250 if (ata_id_current_chs_valid(id))
1251 return ata_id_u32(id, 57);
1252 else
1253 return id[1] * id[3] * id[6];
1254 }
1255}
1256
1e999736
AC
1257static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
1258{
1259 u64 sectors = 0;
1260
1261 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1262 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1263 sectors |= (tf->hob_lbal & 0xff) << 24;
1264 sectors |= (tf->lbah & 0xff) << 16;
1265 sectors |= (tf->lbam & 0xff) << 8;
1266 sectors |= (tf->lbal & 0xff);
1267
1268 return ++sectors;
1269}
1270
1271static u64 ata_tf_to_lba(struct ata_taskfile *tf)
1272{
1273 u64 sectors = 0;
1274
1275 sectors |= (tf->device & 0x0f) << 24;
1276 sectors |= (tf->lbah & 0xff) << 16;
1277 sectors |= (tf->lbam & 0xff) << 8;
1278 sectors |= (tf->lbal & 0xff);
1279
1280 return ++sectors;
1281}
1282
1283/**
c728a914
TH
1284 * ata_read_native_max_address - Read native max address
1285 * @dev: target device
1286 * @max_sectors: out parameter for the result native max address
1e999736 1287 *
c728a914
TH
1288 * Perform an LBA48 or LBA28 native size query upon the device in
1289 * question.
1e999736 1290 *
c728a914
TH
1291 * RETURNS:
1292 * 0 on success, -EACCES if command is aborted by the drive.
1293 * -EIO on other errors.
1e999736 1294 */
c728a914 1295static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1e999736 1296{
c728a914 1297 unsigned int err_mask;
1e999736 1298 struct ata_taskfile tf;
c728a914 1299 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1300
1301 ata_tf_init(dev, &tf);
1302
c728a914 1303 /* always clear all address registers */
1e999736 1304 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1e999736 1305
c728a914
TH
1306 if (lba48) {
1307 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1308 tf.flags |= ATA_TFLAG_LBA48;
1309 } else
1310 tf.command = ATA_CMD_READ_NATIVE_MAX;
1e999736 1311
1e999736 1312 tf.protocol |= ATA_PROT_NODATA;
c728a914
TH
1313 tf.device |= ATA_LBA;
1314
2b789108 1315 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914
TH
1316 if (err_mask) {
1317 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1318 "max address (err_mask=0x%x)\n", err_mask);
1319 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1320 return -EACCES;
1321 return -EIO;
1322 }
1e999736 1323
c728a914
TH
1324 if (lba48)
1325 *max_sectors = ata_tf_to_lba48(&tf);
1326 else
1327 *max_sectors = ata_tf_to_lba(&tf);
2dcb407e 1328 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
93328e11 1329 (*max_sectors)--;
c728a914 1330 return 0;
1e999736
AC
1331}
1332
1333/**
c728a914
TH
1334 * ata_set_max_sectors - Set max sectors
1335 * @dev: target device
6b38d1d1 1336 * @new_sectors: new max sectors value to set for the device
1e999736 1337 *
c728a914
TH
1338 * Set max sectors of @dev to @new_sectors.
1339 *
1340 * RETURNS:
1341 * 0 on success, -EACCES if command is aborted or denied (due to
1342 * previous non-volatile SET_MAX) by the drive. -EIO on other
1343 * errors.
1e999736 1344 */
05027adc 1345static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1e999736 1346{
c728a914 1347 unsigned int err_mask;
1e999736 1348 struct ata_taskfile tf;
c728a914 1349 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1350
1351 new_sectors--;
1352
1353 ata_tf_init(dev, &tf);
1354
1e999736 1355 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
c728a914
TH
1356
1357 if (lba48) {
1358 tf.command = ATA_CMD_SET_MAX_EXT;
1359 tf.flags |= ATA_TFLAG_LBA48;
1360
1361 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1362 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1363 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1e582ba4 1364 } else {
c728a914
TH
1365 tf.command = ATA_CMD_SET_MAX;
1366
1e582ba4
TH
1367 tf.device |= (new_sectors >> 24) & 0xf;
1368 }
1369
1e999736 1370 tf.protocol |= ATA_PROT_NODATA;
c728a914 1371 tf.device |= ATA_LBA;
1e999736
AC
1372
1373 tf.lbal = (new_sectors >> 0) & 0xff;
1374 tf.lbam = (new_sectors >> 8) & 0xff;
1375 tf.lbah = (new_sectors >> 16) & 0xff;
1e999736 1376
2b789108 1377 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914
TH
1378 if (err_mask) {
1379 ata_dev_printk(dev, KERN_WARNING, "failed to set "
1380 "max address (err_mask=0x%x)\n", err_mask);
1381 if (err_mask == AC_ERR_DEV &&
1382 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1383 return -EACCES;
1384 return -EIO;
1385 }
1386
c728a914 1387 return 0;
1e999736
AC
1388}
1389
1390/**
1391 * ata_hpa_resize - Resize a device with an HPA set
1392 * @dev: Device to resize
1393 *
1394 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1395 * it if required to the full size of the media. The caller must check
1396 * the drive has the HPA feature set enabled.
05027adc
TH
1397 *
1398 * RETURNS:
1399 * 0 on success, -errno on failure.
1e999736 1400 */
05027adc 1401static int ata_hpa_resize(struct ata_device *dev)
1e999736 1402{
05027adc
TH
1403 struct ata_eh_context *ehc = &dev->link->eh_context;
1404 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1405 u64 sectors = ata_id_n_sectors(dev->id);
1406 u64 native_sectors;
c728a914 1407 int rc;
a617c09f 1408
05027adc
TH
1409 /* do we need to do it? */
1410 if (dev->class != ATA_DEV_ATA ||
1411 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1412 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
c728a914 1413 return 0;
1e999736 1414
05027adc
TH
1415 /* read native max address */
1416 rc = ata_read_native_max_address(dev, &native_sectors);
1417 if (rc) {
1418 /* If HPA isn't going to be unlocked, skip HPA
1419 * resizing from the next try.
1420 */
1421 if (!ata_ignore_hpa) {
1422 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1423 "broken, will skip HPA handling\n");
1424 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1425
1426 /* we can continue if device aborted the command */
1427 if (rc == -EACCES)
1428 rc = 0;
1e999736 1429 }
37301a55 1430
05027adc
TH
1431 return rc;
1432 }
1433
1434 /* nothing to do? */
1435 if (native_sectors <= sectors || !ata_ignore_hpa) {
1436 if (!print_info || native_sectors == sectors)
1437 return 0;
1438
1439 if (native_sectors > sectors)
1440 ata_dev_printk(dev, KERN_INFO,
1441 "HPA detected: current %llu, native %llu\n",
1442 (unsigned long long)sectors,
1443 (unsigned long long)native_sectors);
1444 else if (native_sectors < sectors)
1445 ata_dev_printk(dev, KERN_WARNING,
1446 "native sectors (%llu) is smaller than "
1447 "sectors (%llu)\n",
1448 (unsigned long long)native_sectors,
1449 (unsigned long long)sectors);
1450 return 0;
1451 }
1452
1453 /* let's unlock HPA */
1454 rc = ata_set_max_sectors(dev, native_sectors);
1455 if (rc == -EACCES) {
1456 /* if device aborted the command, skip HPA resizing */
1457 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1458 "(%llu -> %llu), skipping HPA handling\n",
1459 (unsigned long long)sectors,
1460 (unsigned long long)native_sectors);
1461 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1462 return 0;
1463 } else if (rc)
1464 return rc;
1465
1466 /* re-read IDENTIFY data */
1467 rc = ata_dev_reread_id(dev, 0);
1468 if (rc) {
1469 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1470 "data after HPA resizing\n");
1471 return rc;
1472 }
1473
1474 if (print_info) {
1475 u64 new_sectors = ata_id_n_sectors(dev->id);
1476 ata_dev_printk(dev, KERN_INFO,
1477 "HPA unlocked: %llu -> %llu, native %llu\n",
1478 (unsigned long long)sectors,
1479 (unsigned long long)new_sectors,
1480 (unsigned long long)native_sectors);
1481 }
1482
1483 return 0;
1e999736
AC
1484}
1485
0baab86b
EF
1486/**
1487 * ata_noop_dev_select - Select device 0/1 on ATA bus
1488 * @ap: ATA channel to manipulate
1489 * @device: ATA device (numbered from zero) to select
1490 *
1491 * This function performs no actual function.
1492 *
1493 * May be used as the dev_select() entry in ata_port_operations.
1494 *
1495 * LOCKING:
1496 * caller.
1497 */
2dcb407e 1498void ata_noop_dev_select(struct ata_port *ap, unsigned int device)
1da177e4
LT
1499{
1500}
1501
0baab86b 1502
1da177e4
LT
1503/**
1504 * ata_std_dev_select - Select device 0/1 on ATA bus
1505 * @ap: ATA channel to manipulate
1506 * @device: ATA device (numbered from zero) to select
1507 *
1508 * Use the method defined in the ATA specification to
1509 * make either device 0, or device 1, active on the
0baab86b
EF
1510 * ATA channel. Works with both PIO and MMIO.
1511 *
1512 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
1513 *
1514 * LOCKING:
1515 * caller.
1516 */
1517
2dcb407e 1518void ata_std_dev_select(struct ata_port *ap, unsigned int device)
1da177e4
LT
1519{
1520 u8 tmp;
1521
1522 if (device == 0)
1523 tmp = ATA_DEVICE_OBS;
1524 else
1525 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1526
0d5ff566 1527 iowrite8(tmp, ap->ioaddr.device_addr);
1da177e4
LT
1528 ata_pause(ap); /* needed; also flushes, for mmio */
1529}
1530
1531/**
1532 * ata_dev_select - Select device 0/1 on ATA bus
1533 * @ap: ATA channel to manipulate
1534 * @device: ATA device (numbered from zero) to select
1535 * @wait: non-zero to wait for Status register BSY bit to clear
1536 * @can_sleep: non-zero if context allows sleeping
1537 *
1538 * Use the method defined in the ATA specification to
1539 * make either device 0, or device 1, active on the
1540 * ATA channel.
1541 *
1542 * This is a high-level version of ata_std_dev_select(),
1543 * which additionally provides the services of inserting
1544 * the proper pauses and status polling, where needed.
1545 *
1546 * LOCKING:
1547 * caller.
1548 */
1549
1550void ata_dev_select(struct ata_port *ap, unsigned int device,
1551 unsigned int wait, unsigned int can_sleep)
1552{
88574551 1553 if (ata_msg_probe(ap))
44877b4e
TH
1554 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
1555 "device %u, wait %u\n", device, wait);
1da177e4
LT
1556
1557 if (wait)
1558 ata_wait_idle(ap);
1559
1560 ap->ops->dev_select(ap, device);
1561
1562 if (wait) {
9af5c9c9 1563 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
1da177e4
LT
1564 msleep(150);
1565 ata_wait_idle(ap);
1566 }
1567}
1568
1569/**
1570 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 1571 * @id: IDENTIFY DEVICE page to dump
1da177e4 1572 *
0bd3300a
TH
1573 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1574 * page.
1da177e4
LT
1575 *
1576 * LOCKING:
1577 * caller.
1578 */
1579
0bd3300a 1580static inline void ata_dump_id(const u16 *id)
1da177e4
LT
1581{
1582 DPRINTK("49==0x%04x "
1583 "53==0x%04x "
1584 "63==0x%04x "
1585 "64==0x%04x "
1586 "75==0x%04x \n",
0bd3300a
TH
1587 id[49],
1588 id[53],
1589 id[63],
1590 id[64],
1591 id[75]);
1da177e4
LT
1592 DPRINTK("80==0x%04x "
1593 "81==0x%04x "
1594 "82==0x%04x "
1595 "83==0x%04x "
1596 "84==0x%04x \n",
0bd3300a
TH
1597 id[80],
1598 id[81],
1599 id[82],
1600 id[83],
1601 id[84]);
1da177e4
LT
1602 DPRINTK("88==0x%04x "
1603 "93==0x%04x\n",
0bd3300a
TH
1604 id[88],
1605 id[93]);
1da177e4
LT
1606}
1607
cb95d562
TH
1608/**
1609 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1610 * @id: IDENTIFY data to compute xfer mask from
1611 *
1612 * Compute the xfermask for this device. This is not as trivial
1613 * as it seems if we must consider early devices correctly.
1614 *
1615 * FIXME: pre IDE drive timing (do we care ?).
1616 *
1617 * LOCKING:
1618 * None.
1619 *
1620 * RETURNS:
1621 * Computed xfermask
1622 */
7dc951ae 1623unsigned long ata_id_xfermask(const u16 *id)
cb95d562 1624{
7dc951ae 1625 unsigned long pio_mask, mwdma_mask, udma_mask;
cb95d562
TH
1626
1627 /* Usual case. Word 53 indicates word 64 is valid */
1628 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1629 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1630 pio_mask <<= 3;
1631 pio_mask |= 0x7;
1632 } else {
1633 /* If word 64 isn't valid then Word 51 high byte holds
1634 * the PIO timing number for the maximum. Turn it into
1635 * a mask.
1636 */
7a0f1c8a 1637 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb 1638 if (mode < 5) /* Valid PIO range */
2dcb407e 1639 pio_mask = (2 << mode) - 1;
46767aeb
AC
1640 else
1641 pio_mask = 1;
cb95d562
TH
1642
1643 /* But wait.. there's more. Design your standards by
1644 * committee and you too can get a free iordy field to
1645 * process. However its the speeds not the modes that
1646 * are supported... Note drivers using the timing API
1647 * will get this right anyway
1648 */
1649 }
1650
1651 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1652
b352e57d
AC
1653 if (ata_id_is_cfa(id)) {
1654 /*
1655 * Process compact flash extended modes
1656 */
1657 int pio = id[163] & 0x7;
1658 int dma = (id[163] >> 3) & 7;
1659
1660 if (pio)
1661 pio_mask |= (1 << 5);
1662 if (pio > 1)
1663 pio_mask |= (1 << 6);
1664 if (dma)
1665 mwdma_mask |= (1 << 3);
1666 if (dma > 1)
1667 mwdma_mask |= (1 << 4);
1668 }
1669
fb21f0d0
TH
1670 udma_mask = 0;
1671 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1672 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1673
1674 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1675}
1676
86e45b6b 1677/**
442eacc3 1678 * ata_pio_queue_task - Queue port_task
86e45b6b 1679 * @ap: The ata_port to queue port_task for
e2a7f77a 1680 * @fn: workqueue function to be scheduled
65f27f38 1681 * @data: data for @fn to use
e2a7f77a 1682 * @delay: delay time for workqueue function
86e45b6b
TH
1683 *
1684 * Schedule @fn(@data) for execution after @delay jiffies using
1685 * port_task. There is one port_task per port and it's the
1686 * user(low level driver)'s responsibility to make sure that only
1687 * one task is active at any given time.
1688 *
1689 * libata core layer takes care of synchronization between
442eacc3 1690 * port_task and EH. ata_pio_queue_task() may be ignored for EH
86e45b6b
TH
1691 * synchronization.
1692 *
1693 * LOCKING:
1694 * Inherited from caller.
1695 */
442eacc3
JG
1696static void ata_pio_queue_task(struct ata_port *ap, void *data,
1697 unsigned long delay)
86e45b6b 1698{
65f27f38 1699 ap->port_task_data = data;
86e45b6b 1700
45a66c1c
ON
1701 /* may fail if ata_port_flush_task() in progress */
1702 queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1703}
1704
1705/**
1706 * ata_port_flush_task - Flush port_task
1707 * @ap: The ata_port to flush port_task for
1708 *
1709 * After this function completes, port_task is guranteed not to
1710 * be running or scheduled.
1711 *
1712 * LOCKING:
1713 * Kernel thread context (may sleep)
1714 */
1715void ata_port_flush_task(struct ata_port *ap)
1716{
86e45b6b
TH
1717 DPRINTK("ENTER\n");
1718
45a66c1c 1719 cancel_rearming_delayed_work(&ap->port_task);
86e45b6b 1720
0dd4b21f
BP
1721 if (ata_msg_ctl(ap))
1722 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
86e45b6b
TH
1723}
1724
7102d230 1725static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1726{
77853bf2 1727 struct completion *waiting = qc->private_data;
a2a7a662 1728
a2a7a662 1729 complete(waiting);
a2a7a662
TH
1730}
1731
1732/**
2432697b 1733 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1734 * @dev: Device to which the command is sent
1735 * @tf: Taskfile registers for the command and the result
d69cf37d 1736 * @cdb: CDB for packet command
a2a7a662 1737 * @dma_dir: Data tranfer direction of the command
5c1ad8b3 1738 * @sgl: sg list for the data buffer of the command
2432697b 1739 * @n_elem: Number of sg entries
2b789108 1740 * @timeout: Timeout in msecs (0 for default)
a2a7a662
TH
1741 *
1742 * Executes libata internal command with timeout. @tf contains
1743 * command on entry and result on return. Timeout and error
1744 * conditions are reported via return value. No recovery action
1745 * is taken after a command times out. It's caller's duty to
1746 * clean up after timeout.
1747 *
1748 * LOCKING:
1749 * None. Should be called with kernel context, might sleep.
551e8889
TH
1750 *
1751 * RETURNS:
1752 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1753 */
2432697b
TH
1754unsigned ata_exec_internal_sg(struct ata_device *dev,
1755 struct ata_taskfile *tf, const u8 *cdb,
87260216 1756 int dma_dir, struct scatterlist *sgl,
2b789108 1757 unsigned int n_elem, unsigned long timeout)
a2a7a662 1758{
9af5c9c9
TH
1759 struct ata_link *link = dev->link;
1760 struct ata_port *ap = link->ap;
a2a7a662
TH
1761 u8 command = tf->command;
1762 struct ata_queued_cmd *qc;
2ab7db1f 1763 unsigned int tag, preempted_tag;
dedaf2b0 1764 u32 preempted_sactive, preempted_qc_active;
da917d69 1765 int preempted_nr_active_links;
60be6b9a 1766 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1767 unsigned long flags;
77853bf2 1768 unsigned int err_mask;
d95a717f 1769 int rc;
a2a7a662 1770
ba6a1308 1771 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1772
e3180499 1773 /* no internal command while frozen */
b51e9e5d 1774 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1775 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1776 return AC_ERR_SYSTEM;
1777 }
1778
2ab7db1f 1779 /* initialize internal qc */
a2a7a662 1780
2ab7db1f
TH
1781 /* XXX: Tag 0 is used for drivers with legacy EH as some
1782 * drivers choke if any other tag is given. This breaks
1783 * ata_tag_internal() test for those drivers. Don't use new
1784 * EH stuff without converting to it.
1785 */
1786 if (ap->ops->error_handler)
1787 tag = ATA_TAG_INTERNAL;
1788 else
1789 tag = 0;
1790
6cec4a39 1791 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1792 BUG();
f69499f4 1793 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1794
1795 qc->tag = tag;
1796 qc->scsicmd = NULL;
1797 qc->ap = ap;
1798 qc->dev = dev;
1799 ata_qc_reinit(qc);
1800
9af5c9c9
TH
1801 preempted_tag = link->active_tag;
1802 preempted_sactive = link->sactive;
dedaf2b0 1803 preempted_qc_active = ap->qc_active;
da917d69 1804 preempted_nr_active_links = ap->nr_active_links;
9af5c9c9
TH
1805 link->active_tag = ATA_TAG_POISON;
1806 link->sactive = 0;
dedaf2b0 1807 ap->qc_active = 0;
da917d69 1808 ap->nr_active_links = 0;
2ab7db1f
TH
1809
1810 /* prepare & issue qc */
a2a7a662 1811 qc->tf = *tf;
d69cf37d
TH
1812 if (cdb)
1813 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1814 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1815 qc->dma_dir = dma_dir;
1816 if (dma_dir != DMA_NONE) {
2432697b 1817 unsigned int i, buflen = 0;
87260216 1818 struct scatterlist *sg;
2432697b 1819
87260216
JA
1820 for_each_sg(sgl, sg, n_elem, i)
1821 buflen += sg->length;
2432697b 1822
87260216 1823 ata_sg_init(qc, sgl, n_elem);
49c80429 1824 qc->nbytes = buflen;
a2a7a662
TH
1825 }
1826
77853bf2 1827 qc->private_data = &wait;
a2a7a662
TH
1828 qc->complete_fn = ata_qc_complete_internal;
1829
8e0e694a 1830 ata_qc_issue(qc);
a2a7a662 1831
ba6a1308 1832 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1833
2b789108
TH
1834 if (!timeout)
1835 timeout = ata_probe_timeout * 1000 / HZ;
1836
1837 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
d95a717f
TH
1838
1839 ata_port_flush_task(ap);
41ade50c 1840
d95a717f 1841 if (!rc) {
ba6a1308 1842 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1843
1844 /* We're racing with irq here. If we lose, the
1845 * following test prevents us from completing the qc
d95a717f
TH
1846 * twice. If we win, the port is frozen and will be
1847 * cleaned up by ->post_internal_cmd().
a2a7a662 1848 */
77853bf2 1849 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1850 qc->err_mask |= AC_ERR_TIMEOUT;
1851
1852 if (ap->ops->error_handler)
1853 ata_port_freeze(ap);
1854 else
1855 ata_qc_complete(qc);
f15a1daf 1856
0dd4b21f
BP
1857 if (ata_msg_warn(ap))
1858 ata_dev_printk(dev, KERN_WARNING,
88574551 1859 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1860 }
1861
ba6a1308 1862 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1863 }
1864
d95a717f
TH
1865 /* do post_internal_cmd */
1866 if (ap->ops->post_internal_cmd)
1867 ap->ops->post_internal_cmd(qc);
1868
a51d644a
TH
1869 /* perform minimal error analysis */
1870 if (qc->flags & ATA_QCFLAG_FAILED) {
1871 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1872 qc->err_mask |= AC_ERR_DEV;
1873
1874 if (!qc->err_mask)
1875 qc->err_mask |= AC_ERR_OTHER;
1876
1877 if (qc->err_mask & ~AC_ERR_OTHER)
1878 qc->err_mask &= ~AC_ERR_OTHER;
d95a717f
TH
1879 }
1880
15869303 1881 /* finish up */
ba6a1308 1882 spin_lock_irqsave(ap->lock, flags);
15869303 1883
e61e0672 1884 *tf = qc->result_tf;
77853bf2
TH
1885 err_mask = qc->err_mask;
1886
1887 ata_qc_free(qc);
9af5c9c9
TH
1888 link->active_tag = preempted_tag;
1889 link->sactive = preempted_sactive;
dedaf2b0 1890 ap->qc_active = preempted_qc_active;
da917d69 1891 ap->nr_active_links = preempted_nr_active_links;
77853bf2 1892
1f7dd3e9
TH
1893 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1894 * Until those drivers are fixed, we detect the condition
1895 * here, fail the command with AC_ERR_SYSTEM and reenable the
1896 * port.
1897 *
1898 * Note that this doesn't change any behavior as internal
1899 * command failure results in disabling the device in the
1900 * higher layer for LLDDs without new reset/EH callbacks.
1901 *
1902 * Kill the following code as soon as those drivers are fixed.
1903 */
198e0fed 1904 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1905 err_mask |= AC_ERR_SYSTEM;
1906 ata_port_probe(ap);
1907 }
1908
ba6a1308 1909 spin_unlock_irqrestore(ap->lock, flags);
15869303 1910
77853bf2 1911 return err_mask;
a2a7a662
TH
1912}
1913
2432697b 1914/**
33480a0e 1915 * ata_exec_internal - execute libata internal command
2432697b
TH
1916 * @dev: Device to which the command is sent
1917 * @tf: Taskfile registers for the command and the result
1918 * @cdb: CDB for packet command
1919 * @dma_dir: Data tranfer direction of the command
1920 * @buf: Data buffer of the command
1921 * @buflen: Length of data buffer
2b789108 1922 * @timeout: Timeout in msecs (0 for default)
2432697b
TH
1923 *
1924 * Wrapper around ata_exec_internal_sg() which takes simple
1925 * buffer instead of sg list.
1926 *
1927 * LOCKING:
1928 * None. Should be called with kernel context, might sleep.
1929 *
1930 * RETURNS:
1931 * Zero on success, AC_ERR_* mask on failure
1932 */
1933unsigned ata_exec_internal(struct ata_device *dev,
1934 struct ata_taskfile *tf, const u8 *cdb,
2b789108
TH
1935 int dma_dir, void *buf, unsigned int buflen,
1936 unsigned long timeout)
2432697b 1937{
33480a0e
TH
1938 struct scatterlist *psg = NULL, sg;
1939 unsigned int n_elem = 0;
2432697b 1940
33480a0e
TH
1941 if (dma_dir != DMA_NONE) {
1942 WARN_ON(!buf);
1943 sg_init_one(&sg, buf, buflen);
1944 psg = &sg;
1945 n_elem++;
1946 }
2432697b 1947
2b789108
TH
1948 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1949 timeout);
2432697b
TH
1950}
1951
977e6b9f
TH
1952/**
1953 * ata_do_simple_cmd - execute simple internal command
1954 * @dev: Device to which the command is sent
1955 * @cmd: Opcode to execute
1956 *
1957 * Execute a 'simple' command, that only consists of the opcode
1958 * 'cmd' itself, without filling any other registers
1959 *
1960 * LOCKING:
1961 * Kernel thread context (may sleep).
1962 *
1963 * RETURNS:
1964 * Zero on success, AC_ERR_* mask on failure
e58eb583 1965 */
77b08fb5 1966unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1967{
1968 struct ata_taskfile tf;
e58eb583
TH
1969
1970 ata_tf_init(dev, &tf);
1971
1972 tf.command = cmd;
1973 tf.flags |= ATA_TFLAG_DEVICE;
1974 tf.protocol = ATA_PROT_NODATA;
1975
2b789108 1976 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
e58eb583
TH
1977}
1978
1bc4ccff
AC
1979/**
1980 * ata_pio_need_iordy - check if iordy needed
1981 * @adev: ATA device
1982 *
1983 * Check if the current speed of the device requires IORDY. Used
1984 * by various controllers for chip configuration.
1985 */
a617c09f 1986
1bc4ccff
AC
1987unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1988{
432729f0
AC
1989 /* Controller doesn't support IORDY. Probably a pointless check
1990 as the caller should know this */
9af5c9c9 1991 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1bc4ccff 1992 return 0;
432729f0
AC
1993 /* PIO3 and higher it is mandatory */
1994 if (adev->pio_mode > XFER_PIO_2)
1995 return 1;
1996 /* We turn it on when possible */
1997 if (ata_id_has_iordy(adev->id))
1bc4ccff 1998 return 1;
432729f0
AC
1999 return 0;
2000}
2e9edbf8 2001
432729f0
AC
2002/**
2003 * ata_pio_mask_no_iordy - Return the non IORDY mask
2004 * @adev: ATA device
2005 *
2006 * Compute the highest mode possible if we are not using iordy. Return
2007 * -1 if no iordy mode is available.
2008 */
a617c09f 2009
432729f0
AC
2010static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
2011{
1bc4ccff 2012 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1bc4ccff 2013 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
432729f0 2014 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1bc4ccff
AC
2015 /* Is the speed faster than the drive allows non IORDY ? */
2016 if (pio) {
2017 /* This is cycle times not frequency - watch the logic! */
2018 if (pio > 240) /* PIO2 is 240nS per cycle */
432729f0
AC
2019 return 3 << ATA_SHIFT_PIO;
2020 return 7 << ATA_SHIFT_PIO;
1bc4ccff
AC
2021 }
2022 }
432729f0 2023 return 3 << ATA_SHIFT_PIO;
1bc4ccff
AC
2024}
2025
1da177e4 2026/**
49016aca 2027 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
2028 * @dev: target device
2029 * @p_class: pointer to class of the target device (may be changed)
bff04647 2030 * @flags: ATA_READID_* flags
fe635c7e 2031 * @id: buffer to read IDENTIFY data into
1da177e4 2032 *
49016aca
TH
2033 * Read ID data from the specified device. ATA_CMD_ID_ATA is
2034 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
2035 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
2036 * for pre-ATA4 drives.
1da177e4 2037 *
50a99018 2038 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
2dcb407e 2039 * now we abort if we hit that case.
50a99018 2040 *
1da177e4 2041 * LOCKING:
49016aca
TH
2042 * Kernel thread context (may sleep)
2043 *
2044 * RETURNS:
2045 * 0 on success, -errno otherwise.
1da177e4 2046 */
a9beec95 2047int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 2048 unsigned int flags, u16 *id)
1da177e4 2049{
9af5c9c9 2050 struct ata_port *ap = dev->link->ap;
49016aca 2051 unsigned int class = *p_class;
a0123703 2052 struct ata_taskfile tf;
49016aca
TH
2053 unsigned int err_mask = 0;
2054 const char *reason;
54936f8b 2055 int may_fallback = 1, tried_spinup = 0;
49016aca 2056 int rc;
1da177e4 2057
0dd4b21f 2058 if (ata_msg_ctl(ap))
44877b4e 2059 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 2060
49016aca 2061 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
49016aca 2062 retry:
3373efd8 2063 ata_tf_init(dev, &tf);
a0123703 2064
49016aca
TH
2065 switch (class) {
2066 case ATA_DEV_ATA:
a0123703 2067 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
2068 break;
2069 case ATA_DEV_ATAPI:
a0123703 2070 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
2071 break;
2072 default:
2073 rc = -ENODEV;
2074 reason = "unsupported class";
2075 goto err_out;
1da177e4
LT
2076 }
2077
a0123703 2078 tf.protocol = ATA_PROT_PIO;
81afe893
TH
2079
2080 /* Some devices choke if TF registers contain garbage. Make
2081 * sure those are properly initialized.
2082 */
2083 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2084
2085 /* Device presence detection is unreliable on some
2086 * controllers. Always poll IDENTIFY if available.
2087 */
2088 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 2089
3373efd8 2090 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
2b789108 2091 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
a0123703 2092 if (err_mask) {
800b3996 2093 if (err_mask & AC_ERR_NODEV_HINT) {
55a8e2c8 2094 DPRINTK("ata%u.%d: NODEV after polling detection\n",
44877b4e 2095 ap->print_id, dev->devno);
55a8e2c8
TH
2096 return -ENOENT;
2097 }
2098
54936f8b
TH
2099 /* Device or controller might have reported the wrong
2100 * device class. Give a shot at the other IDENTIFY if
2101 * the current one is aborted by the device.
2102 */
2103 if (may_fallback &&
2104 (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
2105 may_fallback = 0;
2106
2107 if (class == ATA_DEV_ATA)
2108 class = ATA_DEV_ATAPI;
2109 else
2110 class = ATA_DEV_ATA;
2111 goto retry;
2112 }
2113
49016aca
TH
2114 rc = -EIO;
2115 reason = "I/O error";
1da177e4
LT
2116 goto err_out;
2117 }
2118
54936f8b
TH
2119 /* Falling back doesn't make sense if ID data was read
2120 * successfully at least once.
2121 */
2122 may_fallback = 0;
2123
49016aca 2124 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 2125
49016aca 2126 /* sanity check */
a4f5749b 2127 rc = -EINVAL;
6070068b 2128 reason = "device reports invalid type";
a4f5749b
TH
2129
2130 if (class == ATA_DEV_ATA) {
2131 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
2132 goto err_out;
2133 } else {
2134 if (ata_id_is_ata(id))
2135 goto err_out;
49016aca
TH
2136 }
2137
169439c2
ML
2138 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
2139 tried_spinup = 1;
2140 /*
2141 * Drive powered-up in standby mode, and requires a specific
2142 * SET_FEATURES spin-up subcommand before it will accept
2143 * anything other than the original IDENTIFY command.
2144 */
218f3d30 2145 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
fb0582f9 2146 if (err_mask && id[2] != 0x738c) {
169439c2
ML
2147 rc = -EIO;
2148 reason = "SPINUP failed";
2149 goto err_out;
2150 }
2151 /*
2152 * If the drive initially returned incomplete IDENTIFY info,
2153 * we now must reissue the IDENTIFY command.
2154 */
2155 if (id[2] == 0x37c8)
2156 goto retry;
2157 }
2158
bff04647 2159 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
2160 /*
2161 * The exact sequence expected by certain pre-ATA4 drives is:
2162 * SRST RESET
50a99018
AC
2163 * IDENTIFY (optional in early ATA)
2164 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
49016aca
TH
2165 * anything else..
2166 * Some drives were very specific about that exact sequence.
50a99018
AC
2167 *
2168 * Note that ATA4 says lba is mandatory so the second check
2169 * shoud never trigger.
49016aca
TH
2170 */
2171 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 2172 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
2173 if (err_mask) {
2174 rc = -EIO;
2175 reason = "INIT_DEV_PARAMS failed";
2176 goto err_out;
2177 }
2178
2179 /* current CHS translation info (id[53-58]) might be
2180 * changed. reread the identify device info.
2181 */
bff04647 2182 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
2183 goto retry;
2184 }
2185 }
2186
2187 *p_class = class;
fe635c7e 2188
49016aca
TH
2189 return 0;
2190
2191 err_out:
88574551 2192 if (ata_msg_warn(ap))
0dd4b21f 2193 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 2194 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
2195 return rc;
2196}
2197
3373efd8 2198static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 2199{
9af5c9c9
TH
2200 struct ata_port *ap = dev->link->ap;
2201 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
2202}
2203
a6e6ce8e
TH
2204static void ata_dev_config_ncq(struct ata_device *dev,
2205 char *desc, size_t desc_sz)
2206{
9af5c9c9 2207 struct ata_port *ap = dev->link->ap;
a6e6ce8e
TH
2208 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2209
2210 if (!ata_id_has_ncq(dev->id)) {
2211 desc[0] = '\0';
2212 return;
2213 }
75683fe7 2214 if (dev->horkage & ATA_HORKAGE_NONCQ) {
6919a0a6
AC
2215 snprintf(desc, desc_sz, "NCQ (not used)");
2216 return;
2217 }
a6e6ce8e 2218 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 2219 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
2220 dev->flags |= ATA_DFLAG_NCQ;
2221 }
2222
2223 if (hdepth >= ddepth)
2224 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
2225 else
2226 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
2227}
2228
49016aca 2229/**
ffeae418 2230 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
2231 * @dev: Target device to configure
2232 *
2233 * Configure @dev according to @dev->id. Generic and low-level
2234 * driver specific fixups are also applied.
49016aca
TH
2235 *
2236 * LOCKING:
ffeae418
TH
2237 * Kernel thread context (may sleep)
2238 *
2239 * RETURNS:
2240 * 0 on success, -errno otherwise
49016aca 2241 */
efdaedc4 2242int ata_dev_configure(struct ata_device *dev)
49016aca 2243{
9af5c9c9
TH
2244 struct ata_port *ap = dev->link->ap;
2245 struct ata_eh_context *ehc = &dev->link->eh_context;
6746544c 2246 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1148c3a7 2247 const u16 *id = dev->id;
7dc951ae 2248 unsigned long xfer_mask;
b352e57d 2249 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
2250 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2251 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 2252 int rc;
49016aca 2253
0dd4b21f 2254 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
44877b4e
TH
2255 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
2256 __FUNCTION__);
ffeae418 2257 return 0;
49016aca
TH
2258 }
2259
0dd4b21f 2260 if (ata_msg_probe(ap))
44877b4e 2261 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 2262
75683fe7
TH
2263 /* set horkage */
2264 dev->horkage |= ata_dev_blacklisted(dev);
33267325 2265 ata_force_horkage(dev);
75683fe7 2266
6746544c
TH
2267 /* let ACPI work its magic */
2268 rc = ata_acpi_on_devcfg(dev);
2269 if (rc)
2270 return rc;
08573a86 2271
05027adc
TH
2272 /* massage HPA, do it early as it might change IDENTIFY data */
2273 rc = ata_hpa_resize(dev);
2274 if (rc)
2275 return rc;
2276
c39f5ebe 2277 /* print device capabilities */
0dd4b21f 2278 if (ata_msg_probe(ap))
88574551
TH
2279 ata_dev_printk(dev, KERN_DEBUG,
2280 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2281 "85:%04x 86:%04x 87:%04x 88:%04x\n",
0dd4b21f 2282 __FUNCTION__,
f15a1daf
TH
2283 id[49], id[82], id[83], id[84],
2284 id[85], id[86], id[87], id[88]);
c39f5ebe 2285
208a9933 2286 /* initialize to-be-configured parameters */
ea1dd4e1 2287 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
2288 dev->max_sectors = 0;
2289 dev->cdb_len = 0;
2290 dev->n_sectors = 0;
2291 dev->cylinders = 0;
2292 dev->heads = 0;
2293 dev->sectors = 0;
2294
1da177e4
LT
2295 /*
2296 * common ATA, ATAPI feature tests
2297 */
2298
ff8854b2 2299 /* find max transfer mode; for printk only */
1148c3a7 2300 xfer_mask = ata_id_xfermask(id);
1da177e4 2301
0dd4b21f
BP
2302 if (ata_msg_probe(ap))
2303 ata_dump_id(id);
1da177e4 2304
ef143d57
AL
2305 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2306 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2307 sizeof(fwrevbuf));
2308
2309 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2310 sizeof(modelbuf));
2311
1da177e4
LT
2312 /* ATA-specific feature tests */
2313 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
2314 if (ata_id_is_cfa(id)) {
2315 if (id[162] & 1) /* CPRM may make this media unusable */
44877b4e
TH
2316 ata_dev_printk(dev, KERN_WARNING,
2317 "supports DRM functions and may "
2318 "not be fully accessable.\n");
b352e57d 2319 snprintf(revbuf, 7, "CFA");
ae8d4ee7 2320 } else {
2dcb407e 2321 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
ae8d4ee7
AC
2322 /* Warn the user if the device has TPM extensions */
2323 if (ata_id_has_tpm(id))
2324 ata_dev_printk(dev, KERN_WARNING,
2325 "supports DRM functions and may "
2326 "not be fully accessable.\n");
2327 }
b352e57d 2328
1148c3a7 2329 dev->n_sectors = ata_id_n_sectors(id);
2940740b 2330
3f64f565
EM
2331 if (dev->id[59] & 0x100)
2332 dev->multi_count = dev->id[59] & 0xff;
2333
1148c3a7 2334 if (ata_id_has_lba(id)) {
4c2d721a 2335 const char *lba_desc;
a6e6ce8e 2336 char ncq_desc[20];
8bf62ece 2337
4c2d721a
TH
2338 lba_desc = "LBA";
2339 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 2340 if (ata_id_has_lba48(id)) {
8bf62ece 2341 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 2342 lba_desc = "LBA48";
6fc49adb
TH
2343
2344 if (dev->n_sectors >= (1UL << 28) &&
2345 ata_id_has_flush_ext(id))
2346 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 2347 }
8bf62ece 2348
a6e6ce8e
TH
2349 /* config NCQ */
2350 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2351
8bf62ece 2352 /* print device info to dmesg */
3f64f565
EM
2353 if (ata_msg_drv(ap) && print_info) {
2354 ata_dev_printk(dev, KERN_INFO,
2355 "%s: %s, %s, max %s\n",
2356 revbuf, modelbuf, fwrevbuf,
2357 ata_mode_string(xfer_mask));
2358 ata_dev_printk(dev, KERN_INFO,
2359 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 2360 (unsigned long long)dev->n_sectors,
3f64f565
EM
2361 dev->multi_count, lba_desc, ncq_desc);
2362 }
ffeae418 2363 } else {
8bf62ece
AL
2364 /* CHS */
2365
2366 /* Default translation */
1148c3a7
TH
2367 dev->cylinders = id[1];
2368 dev->heads = id[3];
2369 dev->sectors = id[6];
8bf62ece 2370
1148c3a7 2371 if (ata_id_current_chs_valid(id)) {
8bf62ece 2372 /* Current CHS translation is valid. */
1148c3a7
TH
2373 dev->cylinders = id[54];
2374 dev->heads = id[55];
2375 dev->sectors = id[56];
8bf62ece
AL
2376 }
2377
2378 /* print device info to dmesg */
3f64f565 2379 if (ata_msg_drv(ap) && print_info) {
88574551 2380 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2381 "%s: %s, %s, max %s\n",
2382 revbuf, modelbuf, fwrevbuf,
2383 ata_mode_string(xfer_mask));
a84471fe 2384 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2385 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2386 (unsigned long long)dev->n_sectors,
2387 dev->multi_count, dev->cylinders,
2388 dev->heads, dev->sectors);
2389 }
07f6f7d0
AL
2390 }
2391
6e7846e9 2392 dev->cdb_len = 16;
1da177e4
LT
2393 }
2394
2395 /* ATAPI-specific feature tests */
2c13b7ce 2396 else if (dev->class == ATA_DEV_ATAPI) {
854c73a2
TH
2397 const char *cdb_intr_string = "";
2398 const char *atapi_an_string = "";
7d77b247 2399 u32 sntf;
08a556db 2400
1148c3a7 2401 rc = atapi_cdb_len(id);
1da177e4 2402 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 2403 if (ata_msg_warn(ap))
88574551
TH
2404 ata_dev_printk(dev, KERN_WARNING,
2405 "unsupported CDB len\n");
ffeae418 2406 rc = -EINVAL;
1da177e4
LT
2407 goto err_out_nosup;
2408 }
6e7846e9 2409 dev->cdb_len = (unsigned int) rc;
1da177e4 2410
7d77b247
TH
2411 /* Enable ATAPI AN if both the host and device have
2412 * the support. If PMP is attached, SNTF is required
2413 * to enable ATAPI AN to discern between PHY status
2414 * changed notifications and ATAPI ANs.
9f45cbd3 2415 */
7d77b247
TH
2416 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2417 (!ap->nr_pmp_links ||
2418 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
854c73a2
TH
2419 unsigned int err_mask;
2420
9f45cbd3 2421 /* issue SET feature command to turn this on */
218f3d30
JG
2422 err_mask = ata_dev_set_feature(dev,
2423 SETFEATURES_SATA_ENABLE, SATA_AN);
854c73a2 2424 if (err_mask)
9f45cbd3 2425 ata_dev_printk(dev, KERN_ERR,
854c73a2
TH
2426 "failed to enable ATAPI AN "
2427 "(err_mask=0x%x)\n", err_mask);
2428 else {
9f45cbd3 2429 dev->flags |= ATA_DFLAG_AN;
854c73a2
TH
2430 atapi_an_string = ", ATAPI AN";
2431 }
9f45cbd3
KCA
2432 }
2433
08a556db 2434 if (ata_id_cdb_intr(dev->id)) {
312f7da2 2435 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
2436 cdb_intr_string = ", CDB intr";
2437 }
312f7da2 2438
1da177e4 2439 /* print device info to dmesg */
5afc8142 2440 if (ata_msg_drv(ap) && print_info)
ef143d57 2441 ata_dev_printk(dev, KERN_INFO,
854c73a2 2442 "ATAPI: %s, %s, max %s%s%s\n",
ef143d57 2443 modelbuf, fwrevbuf,
12436c30 2444 ata_mode_string(xfer_mask),
854c73a2 2445 cdb_intr_string, atapi_an_string);
1da177e4
LT
2446 }
2447
914ed354
TH
2448 /* determine max_sectors */
2449 dev->max_sectors = ATA_MAX_SECTORS;
2450 if (dev->flags & ATA_DFLAG_LBA48)
2451 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2452
ca77329f
KCA
2453 if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2454 if (ata_id_has_hipm(dev->id))
2455 dev->flags |= ATA_DFLAG_HIPM;
2456 if (ata_id_has_dipm(dev->id))
2457 dev->flags |= ATA_DFLAG_DIPM;
2458 }
2459
c5038fc0
AC
2460 /* Limit PATA drive on SATA cable bridge transfers to udma5,
2461 200 sectors */
3373efd8 2462 if (ata_dev_knobble(dev)) {
5afc8142 2463 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
2464 ata_dev_printk(dev, KERN_INFO,
2465 "applying bridge limits\n");
5a529139 2466 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
2467 dev->max_sectors = ATA_MAX_SECTORS;
2468 }
2469
f8d8e579 2470 if ((dev->class == ATA_DEV_ATAPI) &&
f442cd86 2471 (atapi_command_packet_set(id) == TYPE_TAPE)) {
f8d8e579 2472 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
f442cd86
AL
2473 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2474 }
f8d8e579 2475
75683fe7 2476 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
03ec52de
TH
2477 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2478 dev->max_sectors);
18d6e9d5 2479
ca77329f
KCA
2480 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2481 dev->horkage |= ATA_HORKAGE_IPM;
2482
2483 /* reset link pm_policy for this port to no pm */
2484 ap->pm_policy = MAX_PERFORMANCE;
2485 }
2486
4b2f3ede 2487 if (ap->ops->dev_config)
cd0d3bbc 2488 ap->ops->dev_config(dev);
4b2f3ede 2489
c5038fc0
AC
2490 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2491 /* Let the user know. We don't want to disallow opens for
2492 rescue purposes, or in case the vendor is just a blithering
2493 idiot. Do this after the dev_config call as some controllers
2494 with buggy firmware may want to avoid reporting false device
2495 bugs */
2496
2497 if (print_info) {
2498 ata_dev_printk(dev, KERN_WARNING,
2499"Drive reports diagnostics failure. This may indicate a drive\n");
2500 ata_dev_printk(dev, KERN_WARNING,
2501"fault or invalid emulation. Contact drive vendor for information.\n");
2502 }
2503 }
2504
0dd4b21f
BP
2505 if (ata_msg_probe(ap))
2506 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2507 __FUNCTION__, ata_chk_status(ap));
ffeae418 2508 return 0;
1da177e4
LT
2509
2510err_out_nosup:
0dd4b21f 2511 if (ata_msg_probe(ap))
88574551
TH
2512 ata_dev_printk(dev, KERN_DEBUG,
2513 "%s: EXIT, err\n", __FUNCTION__);
ffeae418 2514 return rc;
1da177e4
LT
2515}
2516
be0d18df 2517/**
2e41e8e6 2518 * ata_cable_40wire - return 40 wire cable type
be0d18df
AC
2519 * @ap: port
2520 *
2e41e8e6 2521 * Helper method for drivers which want to hardwire 40 wire cable
be0d18df
AC
2522 * detection.
2523 */
2524
2525int ata_cable_40wire(struct ata_port *ap)
2526{
2527 return ATA_CBL_PATA40;
2528}
2529
2530/**
2e41e8e6 2531 * ata_cable_80wire - return 80 wire cable type
be0d18df
AC
2532 * @ap: port
2533 *
2e41e8e6 2534 * Helper method for drivers which want to hardwire 80 wire cable
be0d18df
AC
2535 * detection.
2536 */
2537
2538int ata_cable_80wire(struct ata_port *ap)
2539{
2540 return ATA_CBL_PATA80;
2541}
2542
2543/**
2544 * ata_cable_unknown - return unknown PATA cable.
2545 * @ap: port
2546 *
2547 * Helper method for drivers which have no PATA cable detection.
2548 */
2549
2550int ata_cable_unknown(struct ata_port *ap)
2551{
2552 return ATA_CBL_PATA_UNK;
2553}
2554
c88f90c3
TH
2555/**
2556 * ata_cable_ignore - return ignored PATA cable.
2557 * @ap: port
2558 *
2559 * Helper method for drivers which don't use cable type to limit
2560 * transfer mode.
2561 */
2562int ata_cable_ignore(struct ata_port *ap)
2563{
2564 return ATA_CBL_PATA_IGN;
2565}
2566
be0d18df
AC
2567/**
2568 * ata_cable_sata - return SATA cable type
2569 * @ap: port
2570 *
2571 * Helper method for drivers which have SATA cables
2572 */
2573
2574int ata_cable_sata(struct ata_port *ap)
2575{
2576 return ATA_CBL_SATA;
2577}
2578
1da177e4
LT
2579/**
2580 * ata_bus_probe - Reset and probe ATA bus
2581 * @ap: Bus to probe
2582 *
0cba632b
JG
2583 * Master ATA bus probing function. Initiates a hardware-dependent
2584 * bus reset, then attempts to identify any devices found on
2585 * the bus.
2586 *
1da177e4 2587 * LOCKING:
0cba632b 2588 * PCI/etc. bus probe sem.
1da177e4
LT
2589 *
2590 * RETURNS:
96072e69 2591 * Zero on success, negative errno otherwise.
1da177e4
LT
2592 */
2593
80289167 2594int ata_bus_probe(struct ata_port *ap)
1da177e4 2595{
28ca5c57 2596 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 2597 int tries[ATA_MAX_DEVICES];
f58229f8 2598 int rc;
e82cbdb9 2599 struct ata_device *dev;
1da177e4 2600
28ca5c57 2601 ata_port_probe(ap);
c19ba8af 2602
f58229f8
TH
2603 ata_link_for_each_dev(dev, &ap->link)
2604 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
14d2bac1
TH
2605
2606 retry:
cdeab114
TH
2607 ata_link_for_each_dev(dev, &ap->link) {
2608 /* If we issue an SRST then an ATA drive (not ATAPI)
2609 * may change configuration and be in PIO0 timing. If
2610 * we do a hard reset (or are coming from power on)
2611 * this is true for ATA or ATAPI. Until we've set a
2612 * suitable controller mode we should not touch the
2613 * bus as we may be talking too fast.
2614 */
2615 dev->pio_mode = XFER_PIO_0;
2616
2617 /* If the controller has a pio mode setup function
2618 * then use it to set the chipset to rights. Don't
2619 * touch the DMA setup as that will be dealt with when
2620 * configuring devices.
2621 */
2622 if (ap->ops->set_piomode)
2623 ap->ops->set_piomode(ap, dev);
2624 }
2625
2044470c 2626 /* reset and determine device classes */
52783c5d 2627 ap->ops->phy_reset(ap);
2061a47a 2628
f58229f8 2629 ata_link_for_each_dev(dev, &ap->link) {
52783c5d
TH
2630 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2631 dev->class != ATA_DEV_UNKNOWN)
2632 classes[dev->devno] = dev->class;
2633 else
2634 classes[dev->devno] = ATA_DEV_NONE;
2044470c 2635
52783c5d 2636 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 2637 }
1da177e4 2638
52783c5d 2639 ata_port_probe(ap);
2044470c 2640
f31f0cc2
JG
2641 /* read IDENTIFY page and configure devices. We have to do the identify
2642 specific sequence bass-ackwards so that PDIAG- is released by
2643 the slave device */
2644
f58229f8
TH
2645 ata_link_for_each_dev(dev, &ap->link) {
2646 if (tries[dev->devno])
2647 dev->class = classes[dev->devno];
ffeae418 2648
14d2bac1 2649 if (!ata_dev_enabled(dev))
ffeae418 2650 continue;
ffeae418 2651
bff04647
TH
2652 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2653 dev->id);
14d2bac1
TH
2654 if (rc)
2655 goto fail;
f31f0cc2
JG
2656 }
2657
be0d18df
AC
2658 /* Now ask for the cable type as PDIAG- should have been released */
2659 if (ap->ops->cable_detect)
2660 ap->cbl = ap->ops->cable_detect(ap);
2661
614fe29b
AC
2662 /* We may have SATA bridge glue hiding here irrespective of the
2663 reported cable types and sensed types */
2664 ata_link_for_each_dev(dev, &ap->link) {
2665 if (!ata_dev_enabled(dev))
2666 continue;
2667 /* SATA drives indicate we have a bridge. We don't know which
2668 end of the link the bridge is which is a problem */
2669 if (ata_id_is_sata(dev->id))
2670 ap->cbl = ATA_CBL_SATA;
2671 }
2672
f31f0cc2
JG
2673 /* After the identify sequence we can now set up the devices. We do
2674 this in the normal order so that the user doesn't get confused */
2675
f58229f8 2676 ata_link_for_each_dev(dev, &ap->link) {
f31f0cc2
JG
2677 if (!ata_dev_enabled(dev))
2678 continue;
14d2bac1 2679
9af5c9c9 2680 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
efdaedc4 2681 rc = ata_dev_configure(dev);
9af5c9c9 2682 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
2683 if (rc)
2684 goto fail;
1da177e4
LT
2685 }
2686
e82cbdb9 2687 /* configure transfer mode */
0260731f 2688 rc = ata_set_mode(&ap->link, &dev);
4ae72a1e 2689 if (rc)
51713d35 2690 goto fail;
1da177e4 2691
f58229f8
TH
2692 ata_link_for_each_dev(dev, &ap->link)
2693 if (ata_dev_enabled(dev))
e82cbdb9 2694 return 0;
1da177e4 2695
e82cbdb9
TH
2696 /* no device present, disable port */
2697 ata_port_disable(ap);
96072e69 2698 return -ENODEV;
14d2bac1
TH
2699
2700 fail:
4ae72a1e
TH
2701 tries[dev->devno]--;
2702
14d2bac1
TH
2703 switch (rc) {
2704 case -EINVAL:
4ae72a1e 2705 /* eeek, something went very wrong, give up */
14d2bac1
TH
2706 tries[dev->devno] = 0;
2707 break;
4ae72a1e
TH
2708
2709 case -ENODEV:
2710 /* give it just one more chance */
2711 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 2712 case -EIO:
4ae72a1e
TH
2713 if (tries[dev->devno] == 1) {
2714 /* This is the last chance, better to slow
2715 * down than lose it.
2716 */
936fd732 2717 sata_down_spd_limit(&ap->link);
4ae72a1e
TH
2718 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2719 }
14d2bac1
TH
2720 }
2721
4ae72a1e 2722 if (!tries[dev->devno])
3373efd8 2723 ata_dev_disable(dev);
ec573755 2724
14d2bac1 2725 goto retry;
1da177e4
LT
2726}
2727
2728/**
0cba632b
JG
2729 * ata_port_probe - Mark port as enabled
2730 * @ap: Port for which we indicate enablement
1da177e4 2731 *
0cba632b
JG
2732 * Modify @ap data structure such that the system
2733 * thinks that the entire port is enabled.
2734 *
cca3974e 2735 * LOCKING: host lock, or some other form of
0cba632b 2736 * serialization.
1da177e4
LT
2737 */
2738
2739void ata_port_probe(struct ata_port *ap)
2740{
198e0fed 2741 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
2742}
2743
3be680b7
TH
2744/**
2745 * sata_print_link_status - Print SATA link status
936fd732 2746 * @link: SATA link to printk link status about
3be680b7
TH
2747 *
2748 * This function prints link speed and status of a SATA link.
2749 *
2750 * LOCKING:
2751 * None.
2752 */
936fd732 2753void sata_print_link_status(struct ata_link *link)
3be680b7 2754{
6d5f9732 2755 u32 sstatus, scontrol, tmp;
3be680b7 2756
936fd732 2757 if (sata_scr_read(link, SCR_STATUS, &sstatus))
3be680b7 2758 return;
936fd732 2759 sata_scr_read(link, SCR_CONTROL, &scontrol);
3be680b7 2760
936fd732 2761 if (ata_link_online(link)) {
3be680b7 2762 tmp = (sstatus >> 4) & 0xf;
936fd732 2763 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2764 "SATA link up %s (SStatus %X SControl %X)\n",
2765 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 2766 } else {
936fd732 2767 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2768 "SATA link down (SStatus %X SControl %X)\n",
2769 sstatus, scontrol);
3be680b7
TH
2770 }
2771}
2772
ebdfca6e
AC
2773/**
2774 * ata_dev_pair - return other device on cable
ebdfca6e
AC
2775 * @adev: device
2776 *
2777 * Obtain the other device on the same cable, or if none is
2778 * present NULL is returned
2779 */
2e9edbf8 2780
3373efd8 2781struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2782{
9af5c9c9
TH
2783 struct ata_link *link = adev->link;
2784 struct ata_device *pair = &link->device[1 - adev->devno];
e1211e3f 2785 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2786 return NULL;
2787 return pair;
2788}
2789
1da177e4 2790/**
780a87f7
JG
2791 * ata_port_disable - Disable port.
2792 * @ap: Port to be disabled.
1da177e4 2793 *
780a87f7
JG
2794 * Modify @ap data structure such that the system
2795 * thinks that the entire port is disabled, and should
2796 * never attempt to probe or communicate with devices
2797 * on this port.
2798 *
cca3974e 2799 * LOCKING: host lock, or some other form of
780a87f7 2800 * serialization.
1da177e4
LT
2801 */
2802
2803void ata_port_disable(struct ata_port *ap)
2804{
9af5c9c9
TH
2805 ap->link.device[0].class = ATA_DEV_NONE;
2806 ap->link.device[1].class = ATA_DEV_NONE;
198e0fed 2807 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2808}
2809
1c3fae4d 2810/**
3c567b7d 2811 * sata_down_spd_limit - adjust SATA spd limit downward
936fd732 2812 * @link: Link to adjust SATA spd limit for
1c3fae4d 2813 *
936fd732 2814 * Adjust SATA spd limit of @link downward. Note that this
1c3fae4d 2815 * function only adjusts the limit. The change must be applied
3c567b7d 2816 * using sata_set_spd().
1c3fae4d
TH
2817 *
2818 * LOCKING:
2819 * Inherited from caller.
2820 *
2821 * RETURNS:
2822 * 0 on success, negative errno on failure
2823 */
936fd732 2824int sata_down_spd_limit(struct ata_link *link)
1c3fae4d 2825{
81952c54
TH
2826 u32 sstatus, spd, mask;
2827 int rc, highbit;
1c3fae4d 2828
936fd732 2829 if (!sata_scr_valid(link))
008a7896
TH
2830 return -EOPNOTSUPP;
2831
2832 /* If SCR can be read, use it to determine the current SPD.
936fd732 2833 * If not, use cached value in link->sata_spd.
008a7896 2834 */
936fd732 2835 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
008a7896
TH
2836 if (rc == 0)
2837 spd = (sstatus >> 4) & 0xf;
2838 else
936fd732 2839 spd = link->sata_spd;
1c3fae4d 2840
936fd732 2841 mask = link->sata_spd_limit;
1c3fae4d
TH
2842 if (mask <= 1)
2843 return -EINVAL;
008a7896
TH
2844
2845 /* unconditionally mask off the highest bit */
1c3fae4d
TH
2846 highbit = fls(mask) - 1;
2847 mask &= ~(1 << highbit);
2848
008a7896
TH
2849 /* Mask off all speeds higher than or equal to the current
2850 * one. Force 1.5Gbps if current SPD is not available.
2851 */
2852 if (spd > 1)
2853 mask &= (1 << (spd - 1)) - 1;
2854 else
2855 mask &= 1;
2856
2857 /* were we already at the bottom? */
1c3fae4d
TH
2858 if (!mask)
2859 return -EINVAL;
2860
936fd732 2861 link->sata_spd_limit = mask;
1c3fae4d 2862
936fd732 2863 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
f15a1daf 2864 sata_spd_string(fls(mask)));
1c3fae4d
TH
2865
2866 return 0;
2867}
2868
936fd732 2869static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
1c3fae4d 2870{
5270222f
TH
2871 struct ata_link *host_link = &link->ap->link;
2872 u32 limit, target, spd;
1c3fae4d 2873
5270222f
TH
2874 limit = link->sata_spd_limit;
2875
2876 /* Don't configure downstream link faster than upstream link.
2877 * It doesn't speed up anything and some PMPs choke on such
2878 * configuration.
2879 */
2880 if (!ata_is_host_link(link) && host_link->sata_spd)
2881 limit &= (1 << host_link->sata_spd) - 1;
2882
2883 if (limit == UINT_MAX)
2884 target = 0;
1c3fae4d 2885 else
5270222f 2886 target = fls(limit);
1c3fae4d
TH
2887
2888 spd = (*scontrol >> 4) & 0xf;
5270222f 2889 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
1c3fae4d 2890
5270222f 2891 return spd != target;
1c3fae4d
TH
2892}
2893
2894/**
3c567b7d 2895 * sata_set_spd_needed - is SATA spd configuration needed
936fd732 2896 * @link: Link in question
1c3fae4d
TH
2897 *
2898 * Test whether the spd limit in SControl matches
936fd732 2899 * @link->sata_spd_limit. This function is used to determine
1c3fae4d
TH
2900 * whether hardreset is necessary to apply SATA spd
2901 * configuration.
2902 *
2903 * LOCKING:
2904 * Inherited from caller.
2905 *
2906 * RETURNS:
2907 * 1 if SATA spd configuration is needed, 0 otherwise.
2908 */
936fd732 2909int sata_set_spd_needed(struct ata_link *link)
1c3fae4d
TH
2910{
2911 u32 scontrol;
2912
936fd732 2913 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
db64bcf3 2914 return 1;
1c3fae4d 2915
936fd732 2916 return __sata_set_spd_needed(link, &scontrol);
1c3fae4d
TH
2917}
2918
2919/**
3c567b7d 2920 * sata_set_spd - set SATA spd according to spd limit
936fd732 2921 * @link: Link to set SATA spd for
1c3fae4d 2922 *
936fd732 2923 * Set SATA spd of @link according to sata_spd_limit.
1c3fae4d
TH
2924 *
2925 * LOCKING:
2926 * Inherited from caller.
2927 *
2928 * RETURNS:
2929 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2930 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2931 */
936fd732 2932int sata_set_spd(struct ata_link *link)
1c3fae4d
TH
2933{
2934 u32 scontrol;
81952c54 2935 int rc;
1c3fae4d 2936
936fd732 2937 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 2938 return rc;
1c3fae4d 2939
936fd732 2940 if (!__sata_set_spd_needed(link, &scontrol))
1c3fae4d
TH
2941 return 0;
2942
936fd732 2943 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54
TH
2944 return rc;
2945
1c3fae4d
TH
2946 return 1;
2947}
2948
452503f9
AC
2949/*
2950 * This mode timing computation functionality is ported over from
2951 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2952 */
2953/*
b352e57d 2954 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2955 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2956 * for UDMA6, which is currently supported only by Maxtor drives.
2957 *
2958 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2959 */
2960
2961static const struct ata_timing ata_timing[] = {
70cd071e
TH
2962/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2963 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2964 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2965 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2966 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2967 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2968 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
2969 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
452503f9 2970
70cd071e
TH
2971 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2972 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2973 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
452503f9 2974
70cd071e
TH
2975 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2976 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2977 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
b352e57d 2978 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
70cd071e 2979 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
452503f9
AC
2980
2981/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
70cd071e
TH
2982 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2983 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2984 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2985 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2986 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2987 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2988 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
452503f9
AC
2989
2990 { 0xFF }
2991};
2992
2dcb407e
JG
2993#define ENOUGH(v, unit) (((v)-1)/(unit)+1)
2994#define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
452503f9
AC
2995
2996static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2997{
2998 q->setup = EZ(t->setup * 1000, T);
2999 q->act8b = EZ(t->act8b * 1000, T);
3000 q->rec8b = EZ(t->rec8b * 1000, T);
3001 q->cyc8b = EZ(t->cyc8b * 1000, T);
3002 q->active = EZ(t->active * 1000, T);
3003 q->recover = EZ(t->recover * 1000, T);
3004 q->cycle = EZ(t->cycle * 1000, T);
3005 q->udma = EZ(t->udma * 1000, UT);
3006}
3007
3008void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
3009 struct ata_timing *m, unsigned int what)
3010{
3011 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
3012 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
3013 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
3014 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
3015 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
3016 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
3017 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
3018 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
3019}
3020
6357357c 3021const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
452503f9 3022{
70cd071e
TH
3023 const struct ata_timing *t = ata_timing;
3024
3025 while (xfer_mode > t->mode)
3026 t++;
452503f9 3027
70cd071e
TH
3028 if (xfer_mode == t->mode)
3029 return t;
3030 return NULL;
452503f9
AC
3031}
3032
3033int ata_timing_compute(struct ata_device *adev, unsigned short speed,
3034 struct ata_timing *t, int T, int UT)
3035{
3036 const struct ata_timing *s;
3037 struct ata_timing p;
3038
3039 /*
2e9edbf8 3040 * Find the mode.
75b1f2f8 3041 */
452503f9
AC
3042
3043 if (!(s = ata_timing_find_mode(speed)))
3044 return -EINVAL;
3045
75b1f2f8
AL
3046 memcpy(t, s, sizeof(*s));
3047
452503f9
AC
3048 /*
3049 * If the drive is an EIDE drive, it can tell us it needs extended
3050 * PIO/MW_DMA cycle timing.
3051 */
3052
3053 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
3054 memset(&p, 0, sizeof(p));
2dcb407e 3055 if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
452503f9
AC
3056 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
3057 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2dcb407e 3058 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
452503f9
AC
3059 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
3060 }
3061 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
3062 }
3063
3064 /*
3065 * Convert the timing to bus clock counts.
3066 */
3067
75b1f2f8 3068 ata_timing_quantize(t, t, T, UT);
452503f9
AC
3069
3070 /*
c893a3ae
RD
3071 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
3072 * S.M.A.R.T * and some other commands. We have to ensure that the
3073 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
3074 */
3075
fd3367af 3076 if (speed > XFER_PIO_6) {
452503f9
AC
3077 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3078 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3079 }
3080
3081 /*
c893a3ae 3082 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
3083 */
3084
3085 if (t->act8b + t->rec8b < t->cyc8b) {
3086 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3087 t->rec8b = t->cyc8b - t->act8b;
3088 }
3089
3090 if (t->active + t->recover < t->cycle) {
3091 t->active += (t->cycle - (t->active + t->recover)) / 2;
3092 t->recover = t->cycle - t->active;
3093 }
a617c09f 3094
4f701d1e
AC
3095 /* In a few cases quantisation may produce enough errors to
3096 leave t->cycle too low for the sum of active and recovery
3097 if so we must correct this */
3098 if (t->active + t->recover > t->cycle)
3099 t->cycle = t->active + t->recover;
452503f9
AC
3100
3101 return 0;
3102}
3103
a0f79b92
TH
3104/**
3105 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3106 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3107 * @cycle: cycle duration in ns
3108 *
3109 * Return matching xfer mode for @cycle. The returned mode is of
3110 * the transfer type specified by @xfer_shift. If @cycle is too
3111 * slow for @xfer_shift, 0xff is returned. If @cycle is faster
3112 * than the fastest known mode, the fasted mode is returned.
3113 *
3114 * LOCKING:
3115 * None.
3116 *
3117 * RETURNS:
3118 * Matching xfer_mode, 0xff if no match found.
3119 */
3120u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3121{
3122 u8 base_mode = 0xff, last_mode = 0xff;
3123 const struct ata_xfer_ent *ent;
3124 const struct ata_timing *t;
3125
3126 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3127 if (ent->shift == xfer_shift)
3128 base_mode = ent->base;
3129
3130 for (t = ata_timing_find_mode(base_mode);
3131 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3132 unsigned short this_cycle;
3133
3134 switch (xfer_shift) {
3135 case ATA_SHIFT_PIO:
3136 case ATA_SHIFT_MWDMA:
3137 this_cycle = t->cycle;
3138 break;
3139 case ATA_SHIFT_UDMA:
3140 this_cycle = t->udma;
3141 break;
3142 default:
3143 return 0xff;
3144 }
3145
3146 if (cycle > this_cycle)
3147 break;
3148
3149 last_mode = t->mode;
3150 }
3151
3152 return last_mode;
3153}
3154
cf176e1a
TH
3155/**
3156 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 3157 * @dev: Device to adjust xfer masks
458337db 3158 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
3159 *
3160 * Adjust xfer masks of @dev downward. Note that this function
3161 * does not apply the change. Invoking ata_set_mode() afterwards
3162 * will apply the limit.
3163 *
3164 * LOCKING:
3165 * Inherited from caller.
3166 *
3167 * RETURNS:
3168 * 0 on success, negative errno on failure
3169 */
458337db 3170int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 3171{
458337db 3172 char buf[32];
7dc951ae
TH
3173 unsigned long orig_mask, xfer_mask;
3174 unsigned long pio_mask, mwdma_mask, udma_mask;
458337db 3175 int quiet, highbit;
cf176e1a 3176
458337db
TH
3177 quiet = !!(sel & ATA_DNXFER_QUIET);
3178 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 3179
458337db
TH
3180 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3181 dev->mwdma_mask,
3182 dev->udma_mask);
3183 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 3184
458337db
TH
3185 switch (sel) {
3186 case ATA_DNXFER_PIO:
3187 highbit = fls(pio_mask) - 1;
3188 pio_mask &= ~(1 << highbit);
3189 break;
3190
3191 case ATA_DNXFER_DMA:
3192 if (udma_mask) {
3193 highbit = fls(udma_mask) - 1;
3194 udma_mask &= ~(1 << highbit);
3195 if (!udma_mask)
3196 return -ENOENT;
3197 } else if (mwdma_mask) {
3198 highbit = fls(mwdma_mask) - 1;
3199 mwdma_mask &= ~(1 << highbit);
3200 if (!mwdma_mask)
3201 return -ENOENT;
3202 }
3203 break;
3204
3205 case ATA_DNXFER_40C:
3206 udma_mask &= ATA_UDMA_MASK_40C;
3207 break;
3208
3209 case ATA_DNXFER_FORCE_PIO0:
3210 pio_mask &= 1;
3211 case ATA_DNXFER_FORCE_PIO:
3212 mwdma_mask = 0;
3213 udma_mask = 0;
3214 break;
3215
458337db
TH
3216 default:
3217 BUG();
3218 }
3219
3220 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3221
3222 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3223 return -ENOENT;
3224
3225 if (!quiet) {
3226 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3227 snprintf(buf, sizeof(buf), "%s:%s",
3228 ata_mode_string(xfer_mask),
3229 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3230 else
3231 snprintf(buf, sizeof(buf), "%s",
3232 ata_mode_string(xfer_mask));
3233
3234 ata_dev_printk(dev, KERN_WARNING,
3235 "limiting speed to %s\n", buf);
3236 }
cf176e1a
TH
3237
3238 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3239 &dev->udma_mask);
3240
cf176e1a 3241 return 0;
cf176e1a
TH
3242}
3243
3373efd8 3244static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 3245{
9af5c9c9 3246 struct ata_eh_context *ehc = &dev->link->eh_context;
4055dee7
TH
3247 const char *dev_err_whine = "";
3248 int ign_dev_err = 0;
83206a29
TH
3249 unsigned int err_mask;
3250 int rc;
1da177e4 3251
e8384607 3252 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
3253 if (dev->xfer_shift == ATA_SHIFT_PIO)
3254 dev->flags |= ATA_DFLAG_PIO;
3255
3373efd8 3256 err_mask = ata_dev_set_xfermode(dev);
2dcb407e 3257
4055dee7
TH
3258 if (err_mask & ~AC_ERR_DEV)
3259 goto fail;
3260
3261 /* revalidate */
3262 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3263 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3264 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3265 if (rc)
3266 return rc;
3267
11750a40
AC
3268 /* Old CFA may refuse this command, which is just fine */
3269 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
4055dee7 3270 ign_dev_err = 1;
2dcb407e 3271
0bc2a79a
AC
3272 /* Some very old devices and some bad newer ones fail any kind of
3273 SET_XFERMODE request but support PIO0-2 timings and no IORDY */
3274 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
3275 dev->pio_mode <= XFER_PIO_2)
4055dee7 3276 ign_dev_err = 1;
2dcb407e 3277
3acaf94b
AC
3278 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3279 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
c5038fc0 3280 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3acaf94b
AC
3281 dev->dma_mode == XFER_MW_DMA_0 &&
3282 (dev->id[63] >> 8) & 1)
4055dee7 3283 ign_dev_err = 1;
3acaf94b 3284
4055dee7
TH
3285 /* if the device is actually configured correctly, ignore dev err */
3286 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3287 ign_dev_err = 1;
1da177e4 3288
4055dee7
TH
3289 if (err_mask & AC_ERR_DEV) {
3290 if (!ign_dev_err)
3291 goto fail;
3292 else
3293 dev_err_whine = " (device error ignored)";
3294 }
48a8a14f 3295
23e71c3d
TH
3296 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3297 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 3298
4055dee7
TH
3299 ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n",
3300 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3301 dev_err_whine);
3302
83206a29 3303 return 0;
4055dee7
TH
3304
3305 fail:
3306 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3307 "(err_mask=0x%x)\n", err_mask);
3308 return -EIO;
1da177e4
LT
3309}
3310
1da177e4 3311/**
04351821 3312 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 3313 * @link: link on which timings will be programmed
1967b7ff 3314 * @r_failed_dev: out parameter for failed device
1da177e4 3315 *
04351821
AC
3316 * Standard implementation of the function used to tune and set
3317 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3318 * ata_dev_set_mode() fails, pointer to the failing device is
e82cbdb9 3319 * returned in @r_failed_dev.
780a87f7 3320 *
1da177e4 3321 * LOCKING:
0cba632b 3322 * PCI/etc. bus probe sem.
e82cbdb9
TH
3323 *
3324 * RETURNS:
3325 * 0 on success, negative errno otherwise
1da177e4 3326 */
04351821 3327
0260731f 3328int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
1da177e4 3329{
0260731f 3330 struct ata_port *ap = link->ap;
e8e0619f 3331 struct ata_device *dev;
f58229f8 3332 int rc = 0, used_dma = 0, found = 0;
3adcebb2 3333
a6d5a51c 3334 /* step 1: calculate xfer_mask */
f58229f8 3335 ata_link_for_each_dev(dev, link) {
7dc951ae 3336 unsigned long pio_mask, dma_mask;
b3a70601 3337 unsigned int mode_mask;
a6d5a51c 3338
e1211e3f 3339 if (!ata_dev_enabled(dev))
a6d5a51c
TH
3340 continue;
3341
b3a70601
AC
3342 mode_mask = ATA_DMA_MASK_ATA;
3343 if (dev->class == ATA_DEV_ATAPI)
3344 mode_mask = ATA_DMA_MASK_ATAPI;
3345 else if (ata_id_is_cfa(dev->id))
3346 mode_mask = ATA_DMA_MASK_CFA;
3347
3373efd8 3348 ata_dev_xfermask(dev);
33267325 3349 ata_force_xfermask(dev);
1da177e4 3350
acf356b1
TH
3351 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3352 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
b3a70601
AC
3353
3354 if (libata_dma_mask & mode_mask)
3355 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3356 else
3357 dma_mask = 0;
3358
acf356b1
TH
3359 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3360 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 3361
4f65977d 3362 found = 1;
70cd071e 3363 if (dev->dma_mode != 0xff)
5444a6f4 3364 used_dma = 1;
a6d5a51c 3365 }
4f65977d 3366 if (!found)
e82cbdb9 3367 goto out;
a6d5a51c
TH
3368
3369 /* step 2: always set host PIO timings */
f58229f8 3370 ata_link_for_each_dev(dev, link) {
e8e0619f
TH
3371 if (!ata_dev_enabled(dev))
3372 continue;
3373
70cd071e 3374 if (dev->pio_mode == 0xff) {
f15a1daf 3375 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 3376 rc = -EINVAL;
e82cbdb9 3377 goto out;
e8e0619f
TH
3378 }
3379
3380 dev->xfer_mode = dev->pio_mode;
3381 dev->xfer_shift = ATA_SHIFT_PIO;
3382 if (ap->ops->set_piomode)
3383 ap->ops->set_piomode(ap, dev);
3384 }
1da177e4 3385
a6d5a51c 3386 /* step 3: set host DMA timings */
f58229f8 3387 ata_link_for_each_dev(dev, link) {
70cd071e 3388 if (!ata_dev_enabled(dev) || dev->dma_mode == 0xff)
e8e0619f
TH
3389 continue;
3390
3391 dev->xfer_mode = dev->dma_mode;
3392 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3393 if (ap->ops->set_dmamode)
3394 ap->ops->set_dmamode(ap, dev);
3395 }
1da177e4
LT
3396
3397 /* step 4: update devices' xfer mode */
f58229f8 3398 ata_link_for_each_dev(dev, link) {
18d90deb 3399 /* don't update suspended devices' xfer mode */
9666f400 3400 if (!ata_dev_enabled(dev))
83206a29
TH
3401 continue;
3402
3373efd8 3403 rc = ata_dev_set_mode(dev);
5bbc53f4 3404 if (rc)
e82cbdb9 3405 goto out;
83206a29 3406 }
1da177e4 3407
e8e0619f
TH
3408 /* Record simplex status. If we selected DMA then the other
3409 * host channels are not permitted to do so.
5444a6f4 3410 */
cca3974e 3411 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 3412 ap->host->simplex_claimed = ap;
5444a6f4 3413
e82cbdb9
TH
3414 out:
3415 if (rc)
3416 *r_failed_dev = dev;
3417 return rc;
1da177e4
LT
3418}
3419
1fdffbce
JG
3420/**
3421 * ata_tf_to_host - issue ATA taskfile to host controller
3422 * @ap: port to which command is being issued
3423 * @tf: ATA taskfile register set
3424 *
3425 * Issues ATA taskfile register set to ATA host controller,
3426 * with proper synchronization with interrupt handler and
3427 * other threads.
3428 *
3429 * LOCKING:
cca3974e 3430 * spin_lock_irqsave(host lock)
1fdffbce
JG
3431 */
3432
3433static inline void ata_tf_to_host(struct ata_port *ap,
3434 const struct ata_taskfile *tf)
3435{
3436 ap->ops->tf_load(ap, tf);
3437 ap->ops->exec_command(ap, tf);
3438}
3439
1da177e4
LT
3440/**
3441 * ata_busy_sleep - sleep until BSY clears, or timeout
3442 * @ap: port containing status register to be polled
3443 * @tmout_pat: impatience timeout
3444 * @tmout: overall timeout
3445 *
780a87f7
JG
3446 * Sleep until ATA Status register bit BSY clears,
3447 * or a timeout occurs.
3448 *
d1adc1bb
TH
3449 * LOCKING:
3450 * Kernel thread context (may sleep).
3451 *
3452 * RETURNS:
3453 * 0 on success, -errno otherwise.
1da177e4 3454 */
d1adc1bb
TH
3455int ata_busy_sleep(struct ata_port *ap,
3456 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
3457{
3458 unsigned long timer_start, timeout;
3459 u8 status;
3460
3461 status = ata_busy_wait(ap, ATA_BUSY, 300);
3462 timer_start = jiffies;
3463 timeout = timer_start + tmout_pat;
d1adc1bb
TH
3464 while (status != 0xff && (status & ATA_BUSY) &&
3465 time_before(jiffies, timeout)) {
1da177e4
LT
3466 msleep(50);
3467 status = ata_busy_wait(ap, ATA_BUSY, 3);
3468 }
3469
d1adc1bb 3470 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 3471 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
3472 "port is slow to respond, please be patient "
3473 "(Status 0x%x)\n", status);
1da177e4
LT
3474
3475 timeout = timer_start + tmout;
d1adc1bb
TH
3476 while (status != 0xff && (status & ATA_BUSY) &&
3477 time_before(jiffies, timeout)) {
1da177e4
LT
3478 msleep(50);
3479 status = ata_chk_status(ap);
3480 }
3481
d1adc1bb
TH
3482 if (status == 0xff)
3483 return -ENODEV;
3484
1da177e4 3485 if (status & ATA_BUSY) {
f15a1daf 3486 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
3487 "(%lu secs, Status 0x%x)\n",
3488 tmout / HZ, status);
d1adc1bb 3489 return -EBUSY;
1da177e4
LT
3490 }
3491
3492 return 0;
3493}
3494
88ff6eaf
TH
3495/**
3496 * ata_wait_after_reset - wait before checking status after reset
3497 * @ap: port containing status register to be polled
3498 * @deadline: deadline jiffies for the operation
3499 *
3500 * After reset, we need to pause a while before reading status.
3501 * Also, certain combination of controller and device report 0xff
3502 * for some duration (e.g. until SATA PHY is up and running)
3503 * which is interpreted as empty port in ATA world. This
3504 * function also waits for such devices to get out of 0xff
3505 * status.
3506 *
3507 * LOCKING:
3508 * Kernel thread context (may sleep).
3509 */
3510void ata_wait_after_reset(struct ata_port *ap, unsigned long deadline)
3511{
3512 unsigned long until = jiffies + ATA_TMOUT_FF_WAIT;
3513
3514 if (time_before(until, deadline))
3515 deadline = until;
3516
3517 /* Spec mandates ">= 2ms" before checking status. We wait
3518 * 150ms, because that was the magic delay used for ATAPI
3519 * devices in Hale Landis's ATADRVR, for the period of time
3520 * between when the ATA command register is written, and then
3521 * status is checked. Because waiting for "a while" before
3522 * checking status is fine, post SRST, we perform this magic
3523 * delay here as well.
3524 *
3525 * Old drivers/ide uses the 2mS rule and then waits for ready.
3526 */
3527 msleep(150);
3528
3529 /* Wait for 0xff to clear. Some SATA devices take a long time
3530 * to clear 0xff after reset. For example, HHD424020F7SV00
3531 * iVDR needs >= 800ms while. Quantum GoVault needs even more
3532 * than that.
1974e201
TH
3533 *
3534 * Note that some PATA controllers (pata_ali) explode if
3535 * status register is read more than once when there's no
3536 * device attached.
88ff6eaf 3537 */
1974e201
TH
3538 if (ap->flags & ATA_FLAG_SATA) {
3539 while (1) {
3540 u8 status = ata_chk_status(ap);
88ff6eaf 3541
1974e201
TH
3542 if (status != 0xff || time_after(jiffies, deadline))
3543 return;
88ff6eaf 3544
1974e201
TH
3545 msleep(50);
3546 }
88ff6eaf
TH
3547 }
3548}
3549
d4b2bab4
TH
3550/**
3551 * ata_wait_ready - sleep until BSY clears, or timeout
3552 * @ap: port containing status register to be polled
3553 * @deadline: deadline jiffies for the operation
3554 *
3555 * Sleep until ATA Status register bit BSY clears, or timeout
3556 * occurs.
3557 *
3558 * LOCKING:
3559 * Kernel thread context (may sleep).
3560 *
3561 * RETURNS:
3562 * 0 on success, -errno otherwise.
3563 */
3564int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3565{
3566 unsigned long start = jiffies;
3567 int warned = 0;
3568
3569 while (1) {
3570 u8 status = ata_chk_status(ap);
3571 unsigned long now = jiffies;
3572
3573 if (!(status & ATA_BUSY))
3574 return 0;
936fd732 3575 if (!ata_link_online(&ap->link) && status == 0xff)
d4b2bab4
TH
3576 return -ENODEV;
3577 if (time_after(now, deadline))
3578 return -EBUSY;
3579
3580 if (!warned && time_after(now, start + 5 * HZ) &&
3581 (deadline - now > 3 * HZ)) {
3582 ata_port_printk(ap, KERN_WARNING,
3583 "port is slow to respond, please be patient "
3584 "(Status 0x%x)\n", status);
3585 warned = 1;
3586 }
3587
3588 msleep(50);
3589 }
3590}
3591
3592static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3593 unsigned long deadline)
1da177e4
LT
3594{
3595 struct ata_ioports *ioaddr = &ap->ioaddr;
3596 unsigned int dev0 = devmask & (1 << 0);
3597 unsigned int dev1 = devmask & (1 << 1);
9b89391c 3598 int rc, ret = 0;
1da177e4
LT
3599
3600 /* if device 0 was found in ata_devchk, wait for its
3601 * BSY bit to clear
3602 */
d4b2bab4
TH
3603 if (dev0) {
3604 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3605 if (rc) {
3606 if (rc != -ENODEV)
3607 return rc;
3608 ret = rc;
3609 }
d4b2bab4 3610 }
1da177e4 3611
e141d999
TH
3612 /* if device 1 was found in ata_devchk, wait for register
3613 * access briefly, then wait for BSY to clear.
1da177e4 3614 */
e141d999
TH
3615 if (dev1) {
3616 int i;
1da177e4
LT
3617
3618 ap->ops->dev_select(ap, 1);
e141d999
TH
3619
3620 /* Wait for register access. Some ATAPI devices fail
3621 * to set nsect/lbal after reset, so don't waste too
3622 * much time on it. We're gonna wait for !BSY anyway.
3623 */
3624 for (i = 0; i < 2; i++) {
3625 u8 nsect, lbal;
3626
3627 nsect = ioread8(ioaddr->nsect_addr);
3628 lbal = ioread8(ioaddr->lbal_addr);
3629 if ((nsect == 1) && (lbal == 1))
3630 break;
3631 msleep(50); /* give drive a breather */
3632 }
3633
d4b2bab4 3634 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3635 if (rc) {
3636 if (rc != -ENODEV)
3637 return rc;
3638 ret = rc;
3639 }
d4b2bab4 3640 }
1da177e4
LT
3641
3642 /* is all this really necessary? */
3643 ap->ops->dev_select(ap, 0);
3644 if (dev1)
3645 ap->ops->dev_select(ap, 1);
3646 if (dev0)
3647 ap->ops->dev_select(ap, 0);
d4b2bab4 3648
9b89391c 3649 return ret;
1da177e4
LT
3650}
3651
d4b2bab4
TH
3652static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3653 unsigned long deadline)
1da177e4
LT
3654{
3655 struct ata_ioports *ioaddr = &ap->ioaddr;
3656
44877b4e 3657 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
1da177e4
LT
3658
3659 /* software reset. causes dev0 to be selected */
0d5ff566
TH
3660 iowrite8(ap->ctl, ioaddr->ctl_addr);
3661 udelay(20); /* FIXME: flush */
3662 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3663 udelay(20); /* FIXME: flush */
3664 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4 3665
88ff6eaf
TH
3666 /* wait a while before checking status */
3667 ata_wait_after_reset(ap, deadline);
1da177e4 3668
2e9edbf8 3669 /* Before we perform post reset processing we want to see if
298a41ca
TH
3670 * the bus shows 0xFF because the odd clown forgets the D7
3671 * pulldown resistor.
3672 */
150981b0 3673 if (ata_chk_status(ap) == 0xFF)
9b89391c 3674 return -ENODEV;
09c7ad79 3675
d4b2bab4 3676 return ata_bus_post_reset(ap, devmask, deadline);
1da177e4
LT
3677}
3678
3679/**
3680 * ata_bus_reset - reset host port and associated ATA channel
3681 * @ap: port to reset
3682 *
3683 * This is typically the first time we actually start issuing
3684 * commands to the ATA channel. We wait for BSY to clear, then
3685 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3686 * result. Determine what devices, if any, are on the channel
3687 * by looking at the device 0/1 error register. Look at the signature
3688 * stored in each device's taskfile registers, to determine if
3689 * the device is ATA or ATAPI.
3690 *
3691 * LOCKING:
0cba632b 3692 * PCI/etc. bus probe sem.
cca3974e 3693 * Obtains host lock.
1da177e4
LT
3694 *
3695 * SIDE EFFECTS:
198e0fed 3696 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
3697 */
3698
3699void ata_bus_reset(struct ata_port *ap)
3700{
9af5c9c9 3701 struct ata_device *device = ap->link.device;
1da177e4
LT
3702 struct ata_ioports *ioaddr = &ap->ioaddr;
3703 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3704 u8 err;
aec5c3c1 3705 unsigned int dev0, dev1 = 0, devmask = 0;
9b89391c 3706 int rc;
1da177e4 3707
44877b4e 3708 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
1da177e4
LT
3709
3710 /* determine if device 0/1 are present */
3711 if (ap->flags & ATA_FLAG_SATA_RESET)
3712 dev0 = 1;
3713 else {
3714 dev0 = ata_devchk(ap, 0);
3715 if (slave_possible)
3716 dev1 = ata_devchk(ap, 1);
3717 }
3718
3719 if (dev0)
3720 devmask |= (1 << 0);
3721 if (dev1)
3722 devmask |= (1 << 1);
3723
3724 /* select device 0 again */
3725 ap->ops->dev_select(ap, 0);
3726
3727 /* issue bus reset */
9b89391c
TH
3728 if (ap->flags & ATA_FLAG_SRST) {
3729 rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
3730 if (rc && rc != -ENODEV)
aec5c3c1 3731 goto err_out;
9b89391c 3732 }
1da177e4
LT
3733
3734 /*
3735 * determine by signature whether we have ATA or ATAPI devices
3736 */
3f19859e 3737 device[0].class = ata_dev_try_classify(&device[0], dev0, &err);
1da177e4 3738 if ((slave_possible) && (err != 0x81))
3f19859e 3739 device[1].class = ata_dev_try_classify(&device[1], dev1, &err);
1da177e4 3740
1da177e4 3741 /* is double-select really necessary? */
9af5c9c9 3742 if (device[1].class != ATA_DEV_NONE)
1da177e4 3743 ap->ops->dev_select(ap, 1);
9af5c9c9 3744 if (device[0].class != ATA_DEV_NONE)
1da177e4
LT
3745 ap->ops->dev_select(ap, 0);
3746
3747 /* if no devices were detected, disable this port */
9af5c9c9
TH
3748 if ((device[0].class == ATA_DEV_NONE) &&
3749 (device[1].class == ATA_DEV_NONE))
1da177e4
LT
3750 goto err_out;
3751
3752 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3753 /* set up device control for ATA_FLAG_SATA_RESET */
0d5ff566 3754 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3755 }
3756
3757 DPRINTK("EXIT\n");
3758 return;
3759
3760err_out:
f15a1daf 3761 ata_port_printk(ap, KERN_ERR, "disabling port\n");
ac8869d5 3762 ata_port_disable(ap);
1da177e4
LT
3763
3764 DPRINTK("EXIT\n");
3765}
3766
d7bb4cc7 3767/**
936fd732
TH
3768 * sata_link_debounce - debounce SATA phy status
3769 * @link: ATA link to debounce SATA phy status for
d7bb4cc7 3770 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3771 * @deadline: deadline jiffies for the operation
d7bb4cc7 3772 *
936fd732 3773* Make sure SStatus of @link reaches stable state, determined by
d7bb4cc7
TH
3774 * holding the same value where DET is not 1 for @duration polled
3775 * every @interval, before @timeout. Timeout constraints the
d4b2bab4
TH
3776 * beginning of the stable state. Because DET gets stuck at 1 on
3777 * some controllers after hot unplugging, this functions waits
d7bb4cc7
TH
3778 * until timeout then returns 0 if DET is stable at 1.
3779 *
d4b2bab4
TH
3780 * @timeout is further limited by @deadline. The sooner of the
3781 * two is used.
3782 *
d7bb4cc7
TH
3783 * LOCKING:
3784 * Kernel thread context (may sleep)
3785 *
3786 * RETURNS:
3787 * 0 on success, -errno on failure.
3788 */
936fd732
TH
3789int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3790 unsigned long deadline)
7a7921e8 3791{
d7bb4cc7 3792 unsigned long interval_msec = params[0];
d4b2bab4
TH
3793 unsigned long duration = msecs_to_jiffies(params[1]);
3794 unsigned long last_jiffies, t;
d7bb4cc7
TH
3795 u32 last, cur;
3796 int rc;
3797
d4b2bab4
TH
3798 t = jiffies + msecs_to_jiffies(params[2]);
3799 if (time_before(t, deadline))
3800 deadline = t;
3801
936fd732 3802 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3803 return rc;
3804 cur &= 0xf;
3805
3806 last = cur;
3807 last_jiffies = jiffies;
3808
3809 while (1) {
3810 msleep(interval_msec);
936fd732 3811 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3812 return rc;
3813 cur &= 0xf;
3814
3815 /* DET stable? */
3816 if (cur == last) {
d4b2bab4 3817 if (cur == 1 && time_before(jiffies, deadline))
d7bb4cc7
TH
3818 continue;
3819 if (time_after(jiffies, last_jiffies + duration))
3820 return 0;
3821 continue;
3822 }
3823
3824 /* unstable, start over */
3825 last = cur;
3826 last_jiffies = jiffies;
3827
f1545154
TH
3828 /* Check deadline. If debouncing failed, return
3829 * -EPIPE to tell upper layer to lower link speed.
3830 */
d4b2bab4 3831 if (time_after(jiffies, deadline))
f1545154 3832 return -EPIPE;
d7bb4cc7
TH
3833 }
3834}
3835
3836/**
936fd732
TH
3837 * sata_link_resume - resume SATA link
3838 * @link: ATA link to resume SATA
d7bb4cc7 3839 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3840 * @deadline: deadline jiffies for the operation
d7bb4cc7 3841 *
936fd732 3842 * Resume SATA phy @link and debounce it.
d7bb4cc7
TH
3843 *
3844 * LOCKING:
3845 * Kernel thread context (may sleep)
3846 *
3847 * RETURNS:
3848 * 0 on success, -errno on failure.
3849 */
936fd732
TH
3850int sata_link_resume(struct ata_link *link, const unsigned long *params,
3851 unsigned long deadline)
d7bb4cc7
TH
3852{
3853 u32 scontrol;
81952c54
TH
3854 int rc;
3855
936fd732 3856 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 3857 return rc;
7a7921e8 3858
852ee16a 3859 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54 3860
936fd732 3861 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54 3862 return rc;
7a7921e8 3863
d7bb4cc7
TH
3864 /* Some PHYs react badly if SStatus is pounded immediately
3865 * after resuming. Delay 200ms before debouncing.
3866 */
3867 msleep(200);
7a7921e8 3868
936fd732 3869 return sata_link_debounce(link, params, deadline);
7a7921e8
TH
3870}
3871
f5914a46
TH
3872/**
3873 * ata_std_prereset - prepare for reset
cc0680a5 3874 * @link: ATA link to be reset
d4b2bab4 3875 * @deadline: deadline jiffies for the operation
f5914a46 3876 *
cc0680a5 3877 * @link is about to be reset. Initialize it. Failure from
b8cffc6a
TH
3878 * prereset makes libata abort whole reset sequence and give up
3879 * that port, so prereset should be best-effort. It does its
3880 * best to prepare for reset sequence but if things go wrong, it
3881 * should just whine, not fail.
f5914a46
TH
3882 *
3883 * LOCKING:
3884 * Kernel thread context (may sleep)
3885 *
3886 * RETURNS:
3887 * 0 on success, -errno otherwise.
3888 */
cc0680a5 3889int ata_std_prereset(struct ata_link *link, unsigned long deadline)
f5914a46 3890{
cc0680a5 3891 struct ata_port *ap = link->ap;
936fd732 3892 struct ata_eh_context *ehc = &link->eh_context;
e9c83914 3893 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
3894 int rc;
3895
31daabda 3896 /* handle link resume */
28324304 3897 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
0c88758b 3898 (link->flags & ATA_LFLAG_HRST_TO_RESUME))
28324304
TH
3899 ehc->i.action |= ATA_EH_HARDRESET;
3900
633273a3
TH
3901 /* Some PMPs don't work with only SRST, force hardreset if PMP
3902 * is supported.
3903 */
3904 if (ap->flags & ATA_FLAG_PMP)
3905 ehc->i.action |= ATA_EH_HARDRESET;
3906
f5914a46
TH
3907 /* if we're about to do hardreset, nothing more to do */
3908 if (ehc->i.action & ATA_EH_HARDRESET)
3909 return 0;
3910
936fd732 3911 /* if SATA, resume link */
a16abc0b 3912 if (ap->flags & ATA_FLAG_SATA) {
936fd732 3913 rc = sata_link_resume(link, timing, deadline);
b8cffc6a
TH
3914 /* whine about phy resume failure but proceed */
3915 if (rc && rc != -EOPNOTSUPP)
cc0680a5 3916 ata_link_printk(link, KERN_WARNING, "failed to resume "
f5914a46 3917 "link for reset (errno=%d)\n", rc);
f5914a46
TH
3918 }
3919
3920 /* Wait for !BSY if the controller can wait for the first D2H
3921 * Reg FIS and we don't know that no device is attached.
3922 */
0c88758b 3923 if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) {
b8cffc6a 3924 rc = ata_wait_ready(ap, deadline);
6dffaf61 3925 if (rc && rc != -ENODEV) {
cc0680a5 3926 ata_link_printk(link, KERN_WARNING, "device not ready "
b8cffc6a
TH
3927 "(errno=%d), forcing hardreset\n", rc);
3928 ehc->i.action |= ATA_EH_HARDRESET;
3929 }
3930 }
f5914a46
TH
3931
3932 return 0;
3933}
3934
c2bd5804
TH
3935/**
3936 * ata_std_softreset - reset host port via ATA SRST
cc0680a5 3937 * @link: ATA link to reset
c2bd5804 3938 * @classes: resulting classes of attached devices
d4b2bab4 3939 * @deadline: deadline jiffies for the operation
c2bd5804 3940 *
52783c5d 3941 * Reset host port using ATA SRST.
c2bd5804
TH
3942 *
3943 * LOCKING:
3944 * Kernel thread context (may sleep)
3945 *
3946 * RETURNS:
3947 * 0 on success, -errno otherwise.
3948 */
cc0680a5 3949int ata_std_softreset(struct ata_link *link, unsigned int *classes,
d4b2bab4 3950 unsigned long deadline)
c2bd5804 3951{
cc0680a5 3952 struct ata_port *ap = link->ap;
c2bd5804 3953 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
d4b2bab4
TH
3954 unsigned int devmask = 0;
3955 int rc;
c2bd5804
TH
3956 u8 err;
3957
3958 DPRINTK("ENTER\n");
3959
936fd732 3960 if (ata_link_offline(link)) {
3a39746a
TH
3961 classes[0] = ATA_DEV_NONE;
3962 goto out;
3963 }
3964
c2bd5804
TH
3965 /* determine if device 0/1 are present */
3966 if (ata_devchk(ap, 0))
3967 devmask |= (1 << 0);
3968 if (slave_possible && ata_devchk(ap, 1))
3969 devmask |= (1 << 1);
3970
c2bd5804
TH
3971 /* select device 0 again */
3972 ap->ops->dev_select(ap, 0);
3973
3974 /* issue bus reset */
3975 DPRINTK("about to softreset, devmask=%x\n", devmask);
d4b2bab4 3976 rc = ata_bus_softreset(ap, devmask, deadline);
9b89391c 3977 /* if link is occupied, -ENODEV too is an error */
936fd732 3978 if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
cc0680a5 3979 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
d4b2bab4 3980 return rc;
c2bd5804
TH
3981 }
3982
3983 /* determine by signature whether we have ATA or ATAPI devices */
3f19859e
TH
3984 classes[0] = ata_dev_try_classify(&link->device[0],
3985 devmask & (1 << 0), &err);
c2bd5804 3986 if (slave_possible && err != 0x81)
3f19859e
TH
3987 classes[1] = ata_dev_try_classify(&link->device[1],
3988 devmask & (1 << 1), &err);
c2bd5804 3989
3a39746a 3990 out:
c2bd5804
TH
3991 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3992 return 0;
3993}
3994
3995/**
cc0680a5
TH
3996 * sata_link_hardreset - reset link via SATA phy reset
3997 * @link: link to reset
b6103f6d 3998 * @timing: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3999 * @deadline: deadline jiffies for the operation
c2bd5804 4000 *
cc0680a5 4001 * SATA phy-reset @link using DET bits of SControl register.
c2bd5804
TH
4002 *
4003 * LOCKING:
4004 * Kernel thread context (may sleep)
4005 *
4006 * RETURNS:
4007 * 0 on success, -errno otherwise.
4008 */
cc0680a5 4009int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
d4b2bab4 4010 unsigned long deadline)
c2bd5804 4011{
852ee16a 4012 u32 scontrol;
81952c54 4013 int rc;
852ee16a 4014
c2bd5804
TH
4015 DPRINTK("ENTER\n");
4016
936fd732 4017 if (sata_set_spd_needed(link)) {
1c3fae4d
TH
4018 /* SATA spec says nothing about how to reconfigure
4019 * spd. To be on the safe side, turn off phy during
4020 * reconfiguration. This works for at least ICH7 AHCI
4021 * and Sil3124.
4022 */
936fd732 4023 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 4024 goto out;
81952c54 4025
a34b6fc0 4026 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54 4027
936fd732 4028 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
b6103f6d 4029 goto out;
1c3fae4d 4030
936fd732 4031 sata_set_spd(link);
1c3fae4d
TH
4032 }
4033
4034 /* issue phy wake/reset */
936fd732 4035 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 4036 goto out;
81952c54 4037
852ee16a 4038 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54 4039
936fd732 4040 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
b6103f6d 4041 goto out;
c2bd5804 4042
1c3fae4d 4043 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
4044 * 10.4.2 says at least 1 ms.
4045 */
4046 msleep(1);
4047
936fd732
TH
4048 /* bring link back */
4049 rc = sata_link_resume(link, timing, deadline);
b6103f6d
TH
4050 out:
4051 DPRINTK("EXIT, rc=%d\n", rc);
4052 return rc;
4053}
4054
4055/**
4056 * sata_std_hardreset - reset host port via SATA phy reset
cc0680a5 4057 * @link: link to reset
b6103f6d 4058 * @class: resulting class of attached device
d4b2bab4 4059 * @deadline: deadline jiffies for the operation
b6103f6d
TH
4060 *
4061 * SATA phy-reset host port using DET bits of SControl register,
4062 * wait for !BSY and classify the attached device.
4063 *
4064 * LOCKING:
4065 * Kernel thread context (may sleep)
4066 *
4067 * RETURNS:
4068 * 0 on success, -errno otherwise.
4069 */
cc0680a5 4070int sata_std_hardreset(struct ata_link *link, unsigned int *class,
d4b2bab4 4071 unsigned long deadline)
b6103f6d 4072{
cc0680a5 4073 struct ata_port *ap = link->ap;
936fd732 4074 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
b6103f6d
TH
4075 int rc;
4076
4077 DPRINTK("ENTER\n");
4078
4079 /* do hardreset */
cc0680a5 4080 rc = sata_link_hardreset(link, timing, deadline);
b6103f6d 4081 if (rc) {
cc0680a5 4082 ata_link_printk(link, KERN_ERR,
b6103f6d
TH
4083 "COMRESET failed (errno=%d)\n", rc);
4084 return rc;
4085 }
c2bd5804 4086
c2bd5804 4087 /* TODO: phy layer with polling, timeouts, etc. */
936fd732 4088 if (ata_link_offline(link)) {
c2bd5804
TH
4089 *class = ATA_DEV_NONE;
4090 DPRINTK("EXIT, link offline\n");
4091 return 0;
4092 }
4093
88ff6eaf
TH
4094 /* wait a while before checking status */
4095 ata_wait_after_reset(ap, deadline);
34fee227 4096
633273a3
TH
4097 /* If PMP is supported, we have to do follow-up SRST. Note
4098 * that some PMPs don't send D2H Reg FIS after hardreset at
4099 * all if the first port is empty. Wait for it just for a
4100 * second and request follow-up SRST.
4101 */
4102 if (ap->flags & ATA_FLAG_PMP) {
4103 ata_wait_ready(ap, jiffies + HZ);
4104 return -EAGAIN;
4105 }
4106
d4b2bab4 4107 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
4108 /* link occupied, -ENODEV too is an error */
4109 if (rc) {
cc0680a5 4110 ata_link_printk(link, KERN_ERR,
d4b2bab4
TH
4111 "COMRESET failed (errno=%d)\n", rc);
4112 return rc;
c2bd5804
TH
4113 }
4114
3a39746a
TH
4115 ap->ops->dev_select(ap, 0); /* probably unnecessary */
4116
3f19859e 4117 *class = ata_dev_try_classify(link->device, 1, NULL);
c2bd5804
TH
4118
4119 DPRINTK("EXIT, class=%u\n", *class);
4120 return 0;
4121}
4122
4123/**
4124 * ata_std_postreset - standard postreset callback
cc0680a5 4125 * @link: the target ata_link
c2bd5804
TH
4126 * @classes: classes of attached devices
4127 *
4128 * This function is invoked after a successful reset. Note that
4129 * the device might have been reset more than once using
4130 * different reset methods before postreset is invoked.
c2bd5804 4131 *
c2bd5804
TH
4132 * LOCKING:
4133 * Kernel thread context (may sleep)
4134 */
cc0680a5 4135void ata_std_postreset(struct ata_link *link, unsigned int *classes)
c2bd5804 4136{
cc0680a5 4137 struct ata_port *ap = link->ap;
dc2b3515
TH
4138 u32 serror;
4139
c2bd5804
TH
4140 DPRINTK("ENTER\n");
4141
c2bd5804 4142 /* print link status */
936fd732 4143 sata_print_link_status(link);
c2bd5804 4144
dc2b3515 4145 /* clear SError */
936fd732
TH
4146 if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
4147 sata_scr_write(link, SCR_ERROR, serror);
f7fe7ad4 4148 link->eh_info.serror = 0;
dc2b3515 4149
c2bd5804
TH
4150 /* is double-select really necessary? */
4151 if (classes[0] != ATA_DEV_NONE)
4152 ap->ops->dev_select(ap, 1);
4153 if (classes[1] != ATA_DEV_NONE)
4154 ap->ops->dev_select(ap, 0);
4155
3a39746a
TH
4156 /* bail out if no device is present */
4157 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
4158 DPRINTK("EXIT, no device\n");
4159 return;
4160 }
4161
4162 /* set up device control */
0d5ff566
TH
4163 if (ap->ioaddr.ctl_addr)
4164 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
c2bd5804
TH
4165
4166 DPRINTK("EXIT\n");
4167}
4168
623a3128
TH
4169/**
4170 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
4171 * @dev: device to compare against
4172 * @new_class: class of the new device
4173 * @new_id: IDENTIFY page of the new device
4174 *
4175 * Compare @new_class and @new_id against @dev and determine
4176 * whether @dev is the device indicated by @new_class and
4177 * @new_id.
4178 *
4179 * LOCKING:
4180 * None.
4181 *
4182 * RETURNS:
4183 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
4184 */
3373efd8
TH
4185static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
4186 const u16 *new_id)
623a3128
TH
4187{
4188 const u16 *old_id = dev->id;
a0cf733b
TH
4189 unsigned char model[2][ATA_ID_PROD_LEN + 1];
4190 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
4191
4192 if (dev->class != new_class) {
f15a1daf
TH
4193 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
4194 dev->class, new_class);
623a3128
TH
4195 return 0;
4196 }
4197
a0cf733b
TH
4198 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
4199 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
4200 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
4201 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
4202
4203 if (strcmp(model[0], model[1])) {
f15a1daf
TH
4204 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
4205 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
4206 return 0;
4207 }
4208
4209 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
4210 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
4211 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
4212 return 0;
4213 }
4214
623a3128
TH
4215 return 1;
4216}
4217
4218/**
fe30911b 4219 * ata_dev_reread_id - Re-read IDENTIFY data
3fae450c 4220 * @dev: target ATA device
bff04647 4221 * @readid_flags: read ID flags
623a3128
TH
4222 *
4223 * Re-read IDENTIFY page and make sure @dev is still attached to
4224 * the port.
4225 *
4226 * LOCKING:
4227 * Kernel thread context (may sleep)
4228 *
4229 * RETURNS:
4230 * 0 on success, negative errno otherwise
4231 */
fe30911b 4232int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
623a3128 4233{
5eb45c02 4234 unsigned int class = dev->class;
9af5c9c9 4235 u16 *id = (void *)dev->link->ap->sector_buf;
623a3128
TH
4236 int rc;
4237
fe635c7e 4238 /* read ID data */
bff04647 4239 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128 4240 if (rc)
fe30911b 4241 return rc;
623a3128
TH
4242
4243 /* is the device still there? */
fe30911b
TH
4244 if (!ata_dev_same_device(dev, class, id))
4245 return -ENODEV;
623a3128 4246
fe635c7e 4247 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
fe30911b
TH
4248 return 0;
4249}
4250
4251/**
4252 * ata_dev_revalidate - Revalidate ATA device
4253 * @dev: device to revalidate
422c9daa 4254 * @new_class: new class code
fe30911b
TH
4255 * @readid_flags: read ID flags
4256 *
4257 * Re-read IDENTIFY page, make sure @dev is still attached to the
4258 * port and reconfigure it according to the new IDENTIFY page.
4259 *
4260 * LOCKING:
4261 * Kernel thread context (may sleep)
4262 *
4263 * RETURNS:
4264 * 0 on success, negative errno otherwise
4265 */
422c9daa
TH
4266int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4267 unsigned int readid_flags)
fe30911b 4268{
6ddcd3b0 4269 u64 n_sectors = dev->n_sectors;
fe30911b
TH
4270 int rc;
4271
4272 if (!ata_dev_enabled(dev))
4273 return -ENODEV;
4274
422c9daa
TH
4275 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4276 if (ata_class_enabled(new_class) &&
4277 new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
4278 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
4279 dev->class, new_class);
4280 rc = -ENODEV;
4281 goto fail;
4282 }
4283
fe30911b
TH
4284 /* re-read ID */
4285 rc = ata_dev_reread_id(dev, readid_flags);
4286 if (rc)
4287 goto fail;
623a3128
TH
4288
4289 /* configure device according to the new ID */
efdaedc4 4290 rc = ata_dev_configure(dev);
6ddcd3b0
TH
4291 if (rc)
4292 goto fail;
4293
4294 /* verify n_sectors hasn't changed */
b54eebd6
TH
4295 if (dev->class == ATA_DEV_ATA && n_sectors &&
4296 dev->n_sectors != n_sectors) {
6ddcd3b0
TH
4297 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
4298 "%llu != %llu\n",
4299 (unsigned long long)n_sectors,
4300 (unsigned long long)dev->n_sectors);
8270bec4
TH
4301
4302 /* restore original n_sectors */
4303 dev->n_sectors = n_sectors;
4304
6ddcd3b0
TH
4305 rc = -ENODEV;
4306 goto fail;
4307 }
4308
4309 return 0;
623a3128
TH
4310
4311 fail:
f15a1daf 4312 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
4313 return rc;
4314}
4315
6919a0a6
AC
4316struct ata_blacklist_entry {
4317 const char *model_num;
4318 const char *model_rev;
4319 unsigned long horkage;
4320};
4321
4322static const struct ata_blacklist_entry ata_device_blacklist [] = {
4323 /* Devices with DMA related problems under Linux */
4324 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
4325 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
4326 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
4327 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
4328 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
4329 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
4330 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
4331 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4332 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
4333 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
4334 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
4335 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4336 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4337 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4338 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4339 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
4340 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
4341 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
4342 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4343 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4344 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
4345 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
4346 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
4347 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
4348 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
4349 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
4350 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4351 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
2dcb407e 4352 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
39f19886 4353 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
3af9a77a
TH
4354 /* Odd clown on sil3726/4726 PMPs */
4355 { "Config Disk", NULL, ATA_HORKAGE_NODMA |
4356 ATA_HORKAGE_SKIP_PM },
6919a0a6 4357
18d6e9d5 4358 /* Weird ATAPI devices */
40a1d531 4359 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
18d6e9d5 4360
6919a0a6
AC
4361 /* Devices we expect to fail diagnostics */
4362
4363 /* Devices where NCQ should be avoided */
4364 /* NCQ is slow */
2dcb407e 4365 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
459ad688 4366 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
09125ea6
TH
4367 /* http://thread.gmane.org/gmane.linux.ide/14907 */
4368 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30 4369 /* NCQ is broken */
539cc7c7 4370 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
0e3dbc01 4371 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
da6f0ec2 4372 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
e41bd3e8 4373 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
539cc7c7 4374
36e337d0
RH
4375 /* Blacklist entries taken from Silicon Image 3124/3132
4376 Windows driver .inf file - also several Linux problem reports */
4377 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4378 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4379 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
6919a0a6 4380
16c55b03
TH
4381 /* devices which puke on READ_NATIVE_MAX */
4382 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4383 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4384 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4385 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
6919a0a6 4386
93328e11
AC
4387 /* Devices which report 1 sector over size HPA */
4388 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4389 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
b152fcd3 4390 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
93328e11 4391
6bbfd53d
AC
4392 /* Devices which get the IVB wrong */
4393 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4394 { "TSSTcorp CDDVDW SH-S202J", "SB00", ATA_HORKAGE_IVB, },
e9f33406
PM
4395 { "TSSTcorp CDDVDW SH-S202J", "SB01", ATA_HORKAGE_IVB, },
4396 { "TSSTcorp CDDVDW SH-S202N", "SB00", ATA_HORKAGE_IVB, },
4397 { "TSSTcorp CDDVDW SH-S202N", "SB01", ATA_HORKAGE_IVB, },
6bbfd53d 4398
6919a0a6
AC
4399 /* End Marker */
4400 { }
1da177e4 4401};
2e9edbf8 4402
741b7763 4403static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
539cc7c7
JG
4404{
4405 const char *p;
4406 int len;
4407
4408 /*
4409 * check for trailing wildcard: *\0
4410 */
4411 p = strchr(patt, wildchar);
4412 if (p && ((*(p + 1)) == 0))
4413 len = p - patt;
317b50b8 4414 else {
539cc7c7 4415 len = strlen(name);
317b50b8
AP
4416 if (!len) {
4417 if (!*patt)
4418 return 0;
4419 return -1;
4420 }
4421 }
539cc7c7
JG
4422
4423 return strncmp(patt, name, len);
4424}
4425
75683fe7 4426static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
1da177e4 4427{
8bfa79fc
TH
4428 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4429 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 4430 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 4431
8bfa79fc
TH
4432 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4433 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 4434
6919a0a6 4435 while (ad->model_num) {
539cc7c7 4436 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
6919a0a6
AC
4437 if (ad->model_rev == NULL)
4438 return ad->horkage;
539cc7c7 4439 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
6919a0a6 4440 return ad->horkage;
f4b15fef 4441 }
6919a0a6 4442 ad++;
f4b15fef 4443 }
1da177e4
LT
4444 return 0;
4445}
4446
6919a0a6
AC
4447static int ata_dma_blacklisted(const struct ata_device *dev)
4448{
4449 /* We don't support polling DMA.
4450 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4451 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4452 */
9af5c9c9 4453 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
6919a0a6
AC
4454 (dev->flags & ATA_DFLAG_CDB_INTR))
4455 return 1;
75683fe7 4456 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
6919a0a6
AC
4457}
4458
6bbfd53d
AC
4459/**
4460 * ata_is_40wire - check drive side detection
4461 * @dev: device
4462 *
4463 * Perform drive side detection decoding, allowing for device vendors
4464 * who can't follow the documentation.
4465 */
4466
4467static int ata_is_40wire(struct ata_device *dev)
4468{
4469 if (dev->horkage & ATA_HORKAGE_IVB)
4470 return ata_drive_40wire_relaxed(dev->id);
4471 return ata_drive_40wire(dev->id);
4472}
4473
a6d5a51c
TH
4474/**
4475 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
4476 * @dev: Device to compute xfermask for
4477 *
acf356b1
TH
4478 * Compute supported xfermask of @dev and store it in
4479 * dev->*_mask. This function is responsible for applying all
4480 * known limits including host controller limits, device
4481 * blacklist, etc...
a6d5a51c
TH
4482 *
4483 * LOCKING:
4484 * None.
a6d5a51c 4485 */
3373efd8 4486static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 4487{
9af5c9c9
TH
4488 struct ata_link *link = dev->link;
4489 struct ata_port *ap = link->ap;
cca3974e 4490 struct ata_host *host = ap->host;
a6d5a51c 4491 unsigned long xfer_mask;
1da177e4 4492
37deecb5 4493 /* controller modes available */
565083e1
TH
4494 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4495 ap->mwdma_mask, ap->udma_mask);
4496
8343f889 4497 /* drive modes available */
37deecb5
TH
4498 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4499 dev->mwdma_mask, dev->udma_mask);
4500 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 4501
b352e57d
AC
4502 /*
4503 * CFA Advanced TrueIDE timings are not allowed on a shared
4504 * cable
4505 */
4506 if (ata_dev_pair(dev)) {
4507 /* No PIO5 or PIO6 */
4508 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4509 /* No MWDMA3 or MWDMA 4 */
4510 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4511 }
4512
37deecb5
TH
4513 if (ata_dma_blacklisted(dev)) {
4514 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
4515 ata_dev_printk(dev, KERN_WARNING,
4516 "device is on DMA blacklist, disabling DMA\n");
37deecb5 4517 }
a6d5a51c 4518
14d66ab7 4519 if ((host->flags & ATA_HOST_SIMPLEX) &&
2dcb407e 4520 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5
TH
4521 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4522 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4523 "other device, disabling DMA\n");
5444a6f4 4524 }
565083e1 4525
e424675f
JG
4526 if (ap->flags & ATA_FLAG_NO_IORDY)
4527 xfer_mask &= ata_pio_mask_no_iordy(dev);
4528
5444a6f4 4529 if (ap->ops->mode_filter)
a76b62ca 4530 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
5444a6f4 4531
8343f889
RH
4532 /* Apply cable rule here. Don't apply it early because when
4533 * we handle hot plug the cable type can itself change.
4534 * Check this last so that we know if the transfer rate was
4535 * solely limited by the cable.
4536 * Unknown or 80 wire cables reported host side are checked
4537 * drive side as well. Cases where we know a 40wire cable
4538 * is used safely for 80 are not checked here.
4539 */
4540 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4541 /* UDMA/44 or higher would be available */
2dcb407e 4542 if ((ap->cbl == ATA_CBL_PATA40) ||
6bbfd53d 4543 (ata_is_40wire(dev) &&
2dcb407e
JG
4544 (ap->cbl == ATA_CBL_PATA_UNK ||
4545 ap->cbl == ATA_CBL_PATA80))) {
4546 ata_dev_printk(dev, KERN_WARNING,
8343f889
RH
4547 "limited to UDMA/33 due to 40-wire cable\n");
4548 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4549 }
4550
565083e1
TH
4551 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4552 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
4553}
4554
1da177e4
LT
4555/**
4556 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
4557 * @dev: Device to which command will be sent
4558 *
780a87f7
JG
4559 * Issue SET FEATURES - XFER MODE command to device @dev
4560 * on port @ap.
4561 *
1da177e4 4562 * LOCKING:
0cba632b 4563 * PCI/etc. bus probe sem.
83206a29
TH
4564 *
4565 * RETURNS:
4566 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
4567 */
4568
3373efd8 4569static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 4570{
a0123703 4571 struct ata_taskfile tf;
83206a29 4572 unsigned int err_mask;
1da177e4
LT
4573
4574 /* set up set-features taskfile */
4575 DPRINTK("set features - xfer mode\n");
4576
464cf177
TH
4577 /* Some controllers and ATAPI devices show flaky interrupt
4578 * behavior after setting xfer mode. Use polling instead.
4579 */
3373efd8 4580 ata_tf_init(dev, &tf);
a0123703
TH
4581 tf.command = ATA_CMD_SET_FEATURES;
4582 tf.feature = SETFEATURES_XFER;
464cf177 4583 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
a0123703 4584 tf.protocol = ATA_PROT_NODATA;
b9f8ab2d 4585 /* If we are using IORDY we must send the mode setting command */
11b7becc
JG
4586 if (ata_pio_need_iordy(dev))
4587 tf.nsect = dev->xfer_mode;
b9f8ab2d
AC
4588 /* If the device has IORDY and the controller does not - turn it off */
4589 else if (ata_id_has_iordy(dev->id))
11b7becc 4590 tf.nsect = 0x01;
b9f8ab2d
AC
4591 else /* In the ancient relic department - skip all of this */
4592 return 0;
1da177e4 4593
2b789108 4594 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
9f45cbd3
KCA
4595
4596 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4597 return err_mask;
4598}
9f45cbd3 4599/**
218f3d30 4600 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
9f45cbd3
KCA
4601 * @dev: Device to which command will be sent
4602 * @enable: Whether to enable or disable the feature
218f3d30 4603 * @feature: The sector count represents the feature to set
9f45cbd3
KCA
4604 *
4605 * Issue SET FEATURES - SATA FEATURES command to device @dev
218f3d30 4606 * on port @ap with sector count
9f45cbd3
KCA
4607 *
4608 * LOCKING:
4609 * PCI/etc. bus probe sem.
4610 *
4611 * RETURNS:
4612 * 0 on success, AC_ERR_* mask otherwise.
4613 */
218f3d30
JG
4614static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4615 u8 feature)
9f45cbd3
KCA
4616{
4617 struct ata_taskfile tf;
4618 unsigned int err_mask;
4619
4620 /* set up set-features taskfile */
4621 DPRINTK("set features - SATA features\n");
4622
4623 ata_tf_init(dev, &tf);
4624 tf.command = ATA_CMD_SET_FEATURES;
4625 tf.feature = enable;
4626 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4627 tf.protocol = ATA_PROT_NODATA;
218f3d30 4628 tf.nsect = feature;
9f45cbd3 4629
2b789108 4630 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1da177e4 4631
83206a29
TH
4632 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4633 return err_mask;
1da177e4
LT
4634}
4635
8bf62ece
AL
4636/**
4637 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 4638 * @dev: Device to which command will be sent
e2a7f77a
RD
4639 * @heads: Number of heads (taskfile parameter)
4640 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
4641 *
4642 * LOCKING:
6aff8f1f
TH
4643 * Kernel thread context (may sleep)
4644 *
4645 * RETURNS:
4646 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 4647 */
3373efd8
TH
4648static unsigned int ata_dev_init_params(struct ata_device *dev,
4649 u16 heads, u16 sectors)
8bf62ece 4650{
a0123703 4651 struct ata_taskfile tf;
6aff8f1f 4652 unsigned int err_mask;
8bf62ece
AL
4653
4654 /* Number of sectors per track 1-255. Number of heads 1-16 */
4655 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 4656 return AC_ERR_INVALID;
8bf62ece
AL
4657
4658 /* set up init dev params taskfile */
4659 DPRINTK("init dev params \n");
4660
3373efd8 4661 ata_tf_init(dev, &tf);
a0123703
TH
4662 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4663 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4664 tf.protocol = ATA_PROT_NODATA;
4665 tf.nsect = sectors;
4666 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 4667
2b789108 4668 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
18b2466c
AC
4669 /* A clean abort indicates an original or just out of spec drive
4670 and we should continue as we issue the setup based on the
4671 drive reported working geometry */
4672 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4673 err_mask = 0;
8bf62ece 4674
6aff8f1f
TH
4675 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4676 return err_mask;
8bf62ece
AL
4677}
4678
1da177e4 4679/**
0cba632b
JG
4680 * ata_sg_clean - Unmap DMA memory associated with command
4681 * @qc: Command containing DMA memory to be released
4682 *
4683 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
4684 *
4685 * LOCKING:
cca3974e 4686 * spin_lock_irqsave(host lock)
1da177e4 4687 */
70e6ad0c 4688void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
4689{
4690 struct ata_port *ap = qc->ap;
ff2aeb1e 4691 struct scatterlist *sg = qc->sg;
1da177e4
LT
4692 int dir = qc->dma_dir;
4693
a4631474 4694 WARN_ON(sg == NULL);
1da177e4 4695
dde20207 4696 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 4697
dde20207
JB
4698 if (qc->n_elem)
4699 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
1da177e4
LT
4700
4701 qc->flags &= ~ATA_QCFLAG_DMAMAP;
ff2aeb1e 4702 qc->sg = NULL;
1da177e4
LT
4703}
4704
4705/**
4706 * ata_fill_sg - Fill PCI IDE PRD table
4707 * @qc: Metadata associated with taskfile to be transferred
4708 *
780a87f7
JG
4709 * Fill PCI IDE PRD (scatter-gather) table with segments
4710 * associated with the current disk command.
4711 *
1da177e4 4712 * LOCKING:
cca3974e 4713 * spin_lock_irqsave(host lock)
1da177e4
LT
4714 *
4715 */
4716static void ata_fill_sg(struct ata_queued_cmd *qc)
4717{
1da177e4 4718 struct ata_port *ap = qc->ap;
cedc9a47 4719 struct scatterlist *sg;
ff2aeb1e 4720 unsigned int si, pi;
1da177e4 4721
ff2aeb1e
TH
4722 pi = 0;
4723 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1da177e4
LT
4724 u32 addr, offset;
4725 u32 sg_len, len;
4726
4727 /* determine if physical DMA addr spans 64K boundary.
4728 * Note h/w doesn't support 64-bit, so we unconditionally
4729 * truncate dma_addr_t to u32.
4730 */
4731 addr = (u32) sg_dma_address(sg);
4732 sg_len = sg_dma_len(sg);
4733
4734 while (sg_len) {
4735 offset = addr & 0xffff;
4736 len = sg_len;
4737 if ((offset + sg_len) > 0x10000)
4738 len = 0x10000 - offset;
4739
ff2aeb1e
TH
4740 ap->prd[pi].addr = cpu_to_le32(addr);
4741 ap->prd[pi].flags_len = cpu_to_le32(len & 0xffff);
4742 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
1da177e4 4743
ff2aeb1e 4744 pi++;
1da177e4
LT
4745 sg_len -= len;
4746 addr += len;
4747 }
4748 }
4749
ff2aeb1e 4750 ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
1da177e4 4751}
b9a4197e 4752
d26fc955
AC
4753/**
4754 * ata_fill_sg_dumb - Fill PCI IDE PRD table
4755 * @qc: Metadata associated with taskfile to be transferred
4756 *
4757 * Fill PCI IDE PRD (scatter-gather) table with segments
4758 * associated with the current disk command. Perform the fill
4759 * so that we avoid writing any length 64K records for
4760 * controllers that don't follow the spec.
4761 *
4762 * LOCKING:
4763 * spin_lock_irqsave(host lock)
4764 *
4765 */
4766static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4767{
4768 struct ata_port *ap = qc->ap;
4769 struct scatterlist *sg;
ff2aeb1e 4770 unsigned int si, pi;
d26fc955 4771
ff2aeb1e
TH
4772 pi = 0;
4773 for_each_sg(qc->sg, sg, qc->n_elem, si) {
d26fc955
AC
4774 u32 addr, offset;
4775 u32 sg_len, len, blen;
4776
2dcb407e 4777 /* determine if physical DMA addr spans 64K boundary.
d26fc955
AC
4778 * Note h/w doesn't support 64-bit, so we unconditionally
4779 * truncate dma_addr_t to u32.
4780 */
4781 addr = (u32) sg_dma_address(sg);
4782 sg_len = sg_dma_len(sg);
4783
4784 while (sg_len) {
4785 offset = addr & 0xffff;
4786 len = sg_len;
4787 if ((offset + sg_len) > 0x10000)
4788 len = 0x10000 - offset;
4789
4790 blen = len & 0xffff;
ff2aeb1e 4791 ap->prd[pi].addr = cpu_to_le32(addr);
d26fc955
AC
4792 if (blen == 0) {
4793 /* Some PATA chipsets like the CS5530 can't
4794 cope with 0x0000 meaning 64K as the spec says */
ff2aeb1e 4795 ap->prd[pi].flags_len = cpu_to_le32(0x8000);
d26fc955 4796 blen = 0x8000;
ff2aeb1e 4797 ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000);
d26fc955 4798 }
ff2aeb1e
TH
4799 ap->prd[pi].flags_len = cpu_to_le32(blen);
4800 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
d26fc955 4801
ff2aeb1e 4802 pi++;
d26fc955
AC
4803 sg_len -= len;
4804 addr += len;
4805 }
4806 }
4807
ff2aeb1e 4808 ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
d26fc955
AC
4809}
4810
1da177e4
LT
4811/**
4812 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4813 * @qc: Metadata associated with taskfile to check
4814 *
780a87f7
JG
4815 * Allow low-level driver to filter ATA PACKET commands, returning
4816 * a status indicating whether or not it is OK to use DMA for the
4817 * supplied PACKET command.
4818 *
1da177e4 4819 * LOCKING:
cca3974e 4820 * spin_lock_irqsave(host lock)
0cba632b 4821 *
1da177e4
LT
4822 * RETURNS: 0 when ATAPI DMA can be used
4823 * nonzero otherwise
4824 */
4825int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4826{
4827 struct ata_port *ap = qc->ap;
b9a4197e
TH
4828
4829 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4830 * few ATAPI devices choke on such DMA requests.
4831 */
4832 if (unlikely(qc->nbytes & 15))
4833 return 1;
6f23a31d 4834
1da177e4 4835 if (ap->ops->check_atapi_dma)
b9a4197e 4836 return ap->ops->check_atapi_dma(qc);
1da177e4 4837
b9a4197e 4838 return 0;
1da177e4 4839}
b9a4197e 4840
31cc23b3
TH
4841/**
4842 * ata_std_qc_defer - Check whether a qc needs to be deferred
4843 * @qc: ATA command in question
4844 *
4845 * Non-NCQ commands cannot run with any other command, NCQ or
4846 * not. As upper layer only knows the queue depth, we are
4847 * responsible for maintaining exclusion. This function checks
4848 * whether a new command @qc can be issued.
4849 *
4850 * LOCKING:
4851 * spin_lock_irqsave(host lock)
4852 *
4853 * RETURNS:
4854 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4855 */
4856int ata_std_qc_defer(struct ata_queued_cmd *qc)
4857{
4858 struct ata_link *link = qc->dev->link;
4859
4860 if (qc->tf.protocol == ATA_PROT_NCQ) {
4861 if (!ata_tag_valid(link->active_tag))
4862 return 0;
4863 } else {
4864 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4865 return 0;
4866 }
4867
4868 return ATA_DEFER_LINK;
4869}
4870
1da177e4
LT
4871/**
4872 * ata_qc_prep - Prepare taskfile for submission
4873 * @qc: Metadata associated with taskfile to be prepared
4874 *
780a87f7
JG
4875 * Prepare ATA taskfile for submission.
4876 *
1da177e4 4877 * LOCKING:
cca3974e 4878 * spin_lock_irqsave(host lock)
1da177e4
LT
4879 */
4880void ata_qc_prep(struct ata_queued_cmd *qc)
4881{
4882 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4883 return;
4884
4885 ata_fill_sg(qc);
4886}
4887
d26fc955
AC
4888/**
4889 * ata_dumb_qc_prep - Prepare taskfile for submission
4890 * @qc: Metadata associated with taskfile to be prepared
4891 *
4892 * Prepare ATA taskfile for submission.
4893 *
4894 * LOCKING:
4895 * spin_lock_irqsave(host lock)
4896 */
4897void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4898{
4899 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4900 return;
4901
4902 ata_fill_sg_dumb(qc);
4903}
4904
e46834cd
BK
4905void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4906
0cba632b
JG
4907/**
4908 * ata_sg_init - Associate command with scatter-gather table.
4909 * @qc: Command to be associated
4910 * @sg: Scatter-gather table.
4911 * @n_elem: Number of elements in s/g table.
4912 *
4913 * Initialize the data-related elements of queued_cmd @qc
4914 * to point to a scatter-gather table @sg, containing @n_elem
4915 * elements.
4916 *
4917 * LOCKING:
cca3974e 4918 * spin_lock_irqsave(host lock)
0cba632b 4919 */
1da177e4
LT
4920void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4921 unsigned int n_elem)
4922{
ff2aeb1e 4923 qc->sg = sg;
1da177e4 4924 qc->n_elem = n_elem;
ff2aeb1e 4925 qc->cursg = qc->sg;
1da177e4
LT
4926}
4927
ff2aeb1e
TH
4928/**
4929 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4930 * @qc: Command with scatter-gather table to be mapped.
4931 *
4932 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4933 *
4934 * LOCKING:
4935 * spin_lock_irqsave(host lock)
4936 *
4937 * RETURNS:
4938 * Zero on success, negative on error.
4939 *
4940 */
4941static int ata_sg_setup(struct ata_queued_cmd *qc)
4942{
4943 struct ata_port *ap = qc->ap;
dde20207 4944 unsigned int n_elem;
ff2aeb1e
TH
4945
4946 VPRINTK("ENTER, ata%u\n", ap->print_id);
4947
dde20207
JB
4948 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4949 if (n_elem < 1)
4950 return -1;
ff2aeb1e 4951
dde20207 4952 DPRINTK("%d sg elements mapped\n", n_elem);
1da177e4 4953
dde20207 4954 qc->n_elem = n_elem;
f92a2636 4955 qc->flags |= ATA_QCFLAG_DMAMAP;
1da177e4
LT
4956
4957 return 0;
4958}
4959
0baab86b 4960/**
c893a3ae 4961 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
4962 * @buf: Buffer to swap
4963 * @buf_words: Number of 16-bit words in buffer.
4964 *
4965 * Swap halves of 16-bit words if needed to convert from
4966 * little-endian byte order to native cpu byte order, or
4967 * vice-versa.
4968 *
4969 * LOCKING:
6f0ef4fa 4970 * Inherited from caller.
0baab86b 4971 */
1da177e4
LT
4972void swap_buf_le16(u16 *buf, unsigned int buf_words)
4973{
4974#ifdef __BIG_ENDIAN
4975 unsigned int i;
4976
4977 for (i = 0; i < buf_words; i++)
4978 buf[i] = le16_to_cpu(buf[i]);
4979#endif /* __BIG_ENDIAN */
4980}
4981
6ae4cfb5 4982/**
0d5ff566 4983 * ata_data_xfer - Transfer data by PIO
55dba312 4984 * @dev: device to target
6ae4cfb5
AL
4985 * @buf: data buffer
4986 * @buflen: buffer length
0affa456 4987 * @rw: read/write
6ae4cfb5
AL
4988 *
4989 * Transfer data from/to the device data register by PIO.
4990 *
4991 * LOCKING:
4992 * Inherited from caller.
55dba312
TH
4993 *
4994 * RETURNS:
4995 * Bytes consumed.
6ae4cfb5 4996 */
55dba312
TH
4997unsigned int ata_data_xfer(struct ata_device *dev, unsigned char *buf,
4998 unsigned int buflen, int rw)
1da177e4 4999{
55dba312
TH
5000 struct ata_port *ap = dev->link->ap;
5001 void __iomem *data_addr = ap->ioaddr.data_addr;
6ae4cfb5 5002 unsigned int words = buflen >> 1;
1da177e4 5003
6ae4cfb5 5004 /* Transfer multiple of 2 bytes */
55dba312
TH
5005 if (rw == READ)
5006 ioread16_rep(data_addr, buf, words);
1da177e4 5007 else
55dba312 5008 iowrite16_rep(data_addr, buf, words);
6ae4cfb5
AL
5009
5010 /* Transfer trailing 1 byte, if any. */
5011 if (unlikely(buflen & 0x01)) {
4ca4e439 5012 __le16 align_buf[1] = { 0 };
6ae4cfb5
AL
5013 unsigned char *trailing_buf = buf + buflen - 1;
5014
55dba312
TH
5015 if (rw == READ) {
5016 align_buf[0] = cpu_to_le16(ioread16(data_addr));
6ae4cfb5 5017 memcpy(trailing_buf, align_buf, 1);
55dba312
TH
5018 } else {
5019 memcpy(align_buf, trailing_buf, 1);
5020 iowrite16(le16_to_cpu(align_buf[0]), data_addr);
6ae4cfb5 5021 }
55dba312 5022 words++;
6ae4cfb5 5023 }
55dba312
TH
5024
5025 return words << 1;
1da177e4
LT
5026}
5027
75e99585 5028/**
0d5ff566 5029 * ata_data_xfer_noirq - Transfer data by PIO
55dba312 5030 * @dev: device to target
75e99585
AC
5031 * @buf: data buffer
5032 * @buflen: buffer length
0affa456 5033 * @rw: read/write
75e99585 5034 *
88574551 5035 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
5036 * transfer with interrupts disabled.
5037 *
5038 * LOCKING:
5039 * Inherited from caller.
55dba312
TH
5040 *
5041 * RETURNS:
5042 * Bytes consumed.
75e99585 5043 */
55dba312
TH
5044unsigned int ata_data_xfer_noirq(struct ata_device *dev, unsigned char *buf,
5045 unsigned int buflen, int rw)
75e99585
AC
5046{
5047 unsigned long flags;
55dba312
TH
5048 unsigned int consumed;
5049
75e99585 5050 local_irq_save(flags);
55dba312 5051 consumed = ata_data_xfer(dev, buf, buflen, rw);
75e99585 5052 local_irq_restore(flags);
55dba312
TH
5053
5054 return consumed;
75e99585
AC
5055}
5056
5057
6ae4cfb5 5058/**
5a5dbd18 5059 * ata_pio_sector - Transfer a sector of data.
6ae4cfb5
AL
5060 * @qc: Command on going
5061 *
5a5dbd18 5062 * Transfer qc->sect_size bytes of data from/to the ATA device.
6ae4cfb5
AL
5063 *
5064 * LOCKING:
5065 * Inherited from caller.
5066 */
5067
1da177e4
LT
5068static void ata_pio_sector(struct ata_queued_cmd *qc)
5069{
5070 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
1da177e4
LT
5071 struct ata_port *ap = qc->ap;
5072 struct page *page;
5073 unsigned int offset;
5074 unsigned char *buf;
5075
5a5dbd18 5076 if (qc->curbytes == qc->nbytes - qc->sect_size)
14be71f4 5077 ap->hsm_task_state = HSM_ST_LAST;
1da177e4 5078
45711f1a 5079 page = sg_page(qc->cursg);
87260216 5080 offset = qc->cursg->offset + qc->cursg_ofs;
1da177e4
LT
5081
5082 /* get the current page and offset */
5083 page = nth_page(page, (offset >> PAGE_SHIFT));
5084 offset %= PAGE_SIZE;
5085
1da177e4
LT
5086 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5087
91b8b313
AL
5088 if (PageHighMem(page)) {
5089 unsigned long flags;
5090
a6b2c5d4 5091 /* FIXME: use a bounce buffer */
91b8b313
AL
5092 local_irq_save(flags);
5093 buf = kmap_atomic(page, KM_IRQ0);
083958d3 5094
91b8b313 5095 /* do the actual data transfer */
5a5dbd18 5096 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
1da177e4 5097
91b8b313
AL
5098 kunmap_atomic(buf, KM_IRQ0);
5099 local_irq_restore(flags);
5100 } else {
5101 buf = page_address(page);
5a5dbd18 5102 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
91b8b313 5103 }
1da177e4 5104
5a5dbd18
ML
5105 qc->curbytes += qc->sect_size;
5106 qc->cursg_ofs += qc->sect_size;
1da177e4 5107
87260216
JA
5108 if (qc->cursg_ofs == qc->cursg->length) {
5109 qc->cursg = sg_next(qc->cursg);
1da177e4
LT
5110 qc->cursg_ofs = 0;
5111 }
1da177e4 5112}
1da177e4 5113
07f6f7d0 5114/**
5a5dbd18 5115 * ata_pio_sectors - Transfer one or many sectors.
07f6f7d0
AL
5116 * @qc: Command on going
5117 *
5a5dbd18 5118 * Transfer one or many sectors of data from/to the
07f6f7d0
AL
5119 * ATA device for the DRQ request.
5120 *
5121 * LOCKING:
5122 * Inherited from caller.
5123 */
1da177e4 5124
07f6f7d0
AL
5125static void ata_pio_sectors(struct ata_queued_cmd *qc)
5126{
5127 if (is_multi_taskfile(&qc->tf)) {
5128 /* READ/WRITE MULTIPLE */
5129 unsigned int nsect;
5130
587005de 5131 WARN_ON(qc->dev->multi_count == 0);
1da177e4 5132
5a5dbd18 5133 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
726f0785 5134 qc->dev->multi_count);
07f6f7d0
AL
5135 while (nsect--)
5136 ata_pio_sector(qc);
5137 } else
5138 ata_pio_sector(qc);
4cc980b3
AL
5139
5140 ata_altstatus(qc->ap); /* flush */
07f6f7d0
AL
5141}
5142
c71c1857
AL
5143/**
5144 * atapi_send_cdb - Write CDB bytes to hardware
5145 * @ap: Port to which ATAPI device is attached.
5146 * @qc: Taskfile currently active
5147 *
5148 * When device has indicated its readiness to accept
5149 * a CDB, this function is called. Send the CDB.
5150 *
5151 * LOCKING:
5152 * caller.
5153 */
5154
5155static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
5156{
5157 /* send SCSI cdb */
5158 DPRINTK("send cdb\n");
db024d53 5159 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 5160
a6b2c5d4 5161 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
5162 ata_altstatus(ap); /* flush */
5163
5164 switch (qc->tf.protocol) {
0dc36888 5165 case ATAPI_PROT_PIO:
c71c1857
AL
5166 ap->hsm_task_state = HSM_ST;
5167 break;
0dc36888 5168 case ATAPI_PROT_NODATA:
c71c1857
AL
5169 ap->hsm_task_state = HSM_ST_LAST;
5170 break;
0dc36888 5171 case ATAPI_PROT_DMA:
c71c1857
AL
5172 ap->hsm_task_state = HSM_ST_LAST;
5173 /* initiate bmdma */
5174 ap->ops->bmdma_start(qc);
5175 break;
5176 }
1da177e4
LT
5177}
5178
6ae4cfb5
AL
5179/**
5180 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
5181 * @qc: Command on going
5182 * @bytes: number of bytes
5183 *
5184 * Transfer Transfer data from/to the ATAPI device.
5185 *
5186 * LOCKING:
5187 * Inherited from caller.
5188 *
5189 */
140b5e59 5190static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
1da177e4 5191{
56c819df 5192 int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ;
1da177e4 5193 struct ata_port *ap = qc->ap;
56c819df
TH
5194 struct ata_device *dev = qc->dev;
5195 struct ata_eh_info *ehi = &dev->link->eh_info;
140b5e59 5196 struct scatterlist *sg;
1da177e4
LT
5197 struct page *page;
5198 unsigned char *buf;
56c819df 5199 unsigned int offset, count, consumed;
1da177e4
LT
5200
5201next_sg:
140b5e59
TH
5202 sg = qc->cursg;
5203 if (unlikely(!sg)) {
fa2fc7f4
JB
5204 ata_ehi_push_desc(ehi, "unexpected or too much trailing data "
5205 "buf=%u cur=%u bytes=%u",
5206 qc->nbytes, qc->curbytes, bytes);
5207 return -1;
140b5e59 5208 }
1da177e4 5209
45711f1a 5210 page = sg_page(sg);
1da177e4
LT
5211 offset = sg->offset + qc->cursg_ofs;
5212
5213 /* get the current page and offset */
5214 page = nth_page(page, (offset >> PAGE_SHIFT));
5215 offset %= PAGE_SIZE;
5216
6952df03 5217 /* don't overrun current sg */
32529e01 5218 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
5219
5220 /* don't cross page boundaries */
5221 count = min(count, (unsigned int)PAGE_SIZE - offset);
5222
7282aa4b
AL
5223 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5224
91b8b313
AL
5225 if (PageHighMem(page)) {
5226 unsigned long flags;
5227
a6b2c5d4 5228 /* FIXME: use bounce buffer */
91b8b313
AL
5229 local_irq_save(flags);
5230 buf = kmap_atomic(page, KM_IRQ0);
083958d3 5231
91b8b313 5232 /* do the actual data transfer */
56c819df 5233 consumed = ap->ops->data_xfer(dev, buf + offset, count, rw);
7282aa4b 5234
91b8b313
AL
5235 kunmap_atomic(buf, KM_IRQ0);
5236 local_irq_restore(flags);
5237 } else {
5238 buf = page_address(page);
56c819df 5239 consumed = ap->ops->data_xfer(dev, buf + offset, count, rw);
91b8b313 5240 }
1da177e4 5241
56c819df 5242 bytes -= min(bytes, consumed);
1da177e4
LT
5243 qc->curbytes += count;
5244 qc->cursg_ofs += count;
5245
32529e01 5246 if (qc->cursg_ofs == sg->length) {
87260216 5247 qc->cursg = sg_next(qc->cursg);
1da177e4
LT
5248 qc->cursg_ofs = 0;
5249 }
5250
56c819df
TH
5251 /* consumed can be larger than count only for the last transfer */
5252 WARN_ON(qc->cursg && count != consumed);
5253
563a6e1f 5254 if (bytes)
1da177e4 5255 goto next_sg;
140b5e59 5256 return 0;
1da177e4
LT
5257}
5258
6ae4cfb5
AL
5259/**
5260 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
5261 * @qc: Command on going
5262 *
5263 * Transfer Transfer data from/to the ATAPI device.
5264 *
5265 * LOCKING:
5266 * Inherited from caller.
6ae4cfb5
AL
5267 */
5268
1da177e4
LT
5269static void atapi_pio_bytes(struct ata_queued_cmd *qc)
5270{
5271 struct ata_port *ap = qc->ap;
5272 struct ata_device *dev = qc->dev;
56c819df 5273 struct ata_eh_info *ehi = &dev->link->eh_info;
1da177e4
LT
5274 unsigned int ireason, bc_lo, bc_hi, bytes;
5275 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
5276
eec4c3f3
AL
5277 /* Abuse qc->result_tf for temp storage of intermediate TF
5278 * here to save some kernel stack usage.
5279 * For normal completion, qc->result_tf is not relevant. For
5280 * error, qc->result_tf is later overwritten by ata_qc_complete().
5281 * So, the correctness of qc->result_tf is not affected.
5282 */
5283 ap->ops->tf_read(ap, &qc->result_tf);
5284 ireason = qc->result_tf.nsect;
5285 bc_lo = qc->result_tf.lbam;
5286 bc_hi = qc->result_tf.lbah;
1da177e4
LT
5287 bytes = (bc_hi << 8) | bc_lo;
5288
5289 /* shall be cleared to zero, indicating xfer of data */
0106372d 5290 if (unlikely(ireason & (1 << 0)))
56c819df 5291 goto atapi_check;
1da177e4
LT
5292
5293 /* make sure transfer direction matches expected */
5294 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
0106372d 5295 if (unlikely(do_write != i_write))
56c819df 5296 goto atapi_check;
0106372d
AL
5297
5298 if (unlikely(!bytes))
56c819df 5299 goto atapi_check;
1da177e4 5300
44877b4e 5301 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
312f7da2 5302
56c819df 5303 if (unlikely(__atapi_pio_bytes(qc, bytes)))
140b5e59 5304 goto err_out;
4cc980b3 5305 ata_altstatus(ap); /* flush */
1da177e4
LT
5306
5307 return;
5308
56c819df
TH
5309 atapi_check:
5310 ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)",
5311 ireason, bytes);
5312 err_out:
11a56d24 5313 qc->err_mask |= AC_ERR_HSM;
14be71f4 5314 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
5315}
5316
5317/**
c234fb00
AL
5318 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
5319 * @ap: the target ata_port
5320 * @qc: qc on going
1da177e4 5321 *
c234fb00
AL
5322 * RETURNS:
5323 * 1 if ok in workqueue, 0 otherwise.
1da177e4 5324 */
c234fb00
AL
5325
5326static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 5327{
c234fb00
AL
5328 if (qc->tf.flags & ATA_TFLAG_POLLING)
5329 return 1;
1da177e4 5330
c234fb00
AL
5331 if (ap->hsm_task_state == HSM_ST_FIRST) {
5332 if (qc->tf.protocol == ATA_PROT_PIO &&
5333 (qc->tf.flags & ATA_TFLAG_WRITE))
5334 return 1;
1da177e4 5335
405e66b3 5336 if (ata_is_atapi(qc->tf.protocol) &&
c234fb00
AL
5337 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5338 return 1;
fe79e683
AL
5339 }
5340
c234fb00
AL
5341 return 0;
5342}
1da177e4 5343
c17ea20d
TH
5344/**
5345 * ata_hsm_qc_complete - finish a qc running on standard HSM
5346 * @qc: Command to complete
5347 * @in_wq: 1 if called from workqueue, 0 otherwise
5348 *
5349 * Finish @qc which is running on standard HSM.
5350 *
5351 * LOCKING:
cca3974e 5352 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
5353 * Otherwise, none on entry and grabs host lock.
5354 */
5355static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
5356{
5357 struct ata_port *ap = qc->ap;
5358 unsigned long flags;
5359
5360 if (ap->ops->error_handler) {
5361 if (in_wq) {
ba6a1308 5362 spin_lock_irqsave(ap->lock, flags);
c17ea20d 5363
cca3974e
JG
5364 /* EH might have kicked in while host lock is
5365 * released.
c17ea20d
TH
5366 */
5367 qc = ata_qc_from_tag(ap, qc->tag);
5368 if (qc) {
5369 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
83625006 5370 ap->ops->irq_on(ap);
c17ea20d
TH
5371 ata_qc_complete(qc);
5372 } else
5373 ata_port_freeze(ap);
5374 }
5375
ba6a1308 5376 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
5377 } else {
5378 if (likely(!(qc->err_mask & AC_ERR_HSM)))
5379 ata_qc_complete(qc);
5380 else
5381 ata_port_freeze(ap);
5382 }
5383 } else {
5384 if (in_wq) {
ba6a1308 5385 spin_lock_irqsave(ap->lock, flags);
83625006 5386 ap->ops->irq_on(ap);
c17ea20d 5387 ata_qc_complete(qc);
ba6a1308 5388 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
5389 } else
5390 ata_qc_complete(qc);
5391 }
5392}
5393
bb5cb290
AL
5394/**
5395 * ata_hsm_move - move the HSM to the next state.
5396 * @ap: the target ata_port
5397 * @qc: qc on going
5398 * @status: current device status
5399 * @in_wq: 1 if called from workqueue, 0 otherwise
5400 *
5401 * RETURNS:
5402 * 1 when poll next status needed, 0 otherwise.
5403 */
9a1004d0
TH
5404int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
5405 u8 status, int in_wq)
e2cec771 5406{
bb5cb290
AL
5407 unsigned long flags = 0;
5408 int poll_next;
5409
6912ccd5
AL
5410 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
5411
bb5cb290
AL
5412 /* Make sure ata_qc_issue_prot() does not throw things
5413 * like DMA polling into the workqueue. Notice that
5414 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
5415 */
c234fb00 5416 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 5417
e2cec771 5418fsm_start:
999bb6f4 5419 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
44877b4e 5420 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
999bb6f4 5421
e2cec771
AL
5422 switch (ap->hsm_task_state) {
5423 case HSM_ST_FIRST:
bb5cb290
AL
5424 /* Send first data block or PACKET CDB */
5425
5426 /* If polling, we will stay in the work queue after
5427 * sending the data. Otherwise, interrupt handler
5428 * takes over after sending the data.
5429 */
5430 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
5431
e2cec771 5432 /* check device status */
3655d1d3
AL
5433 if (unlikely((status & ATA_DRQ) == 0)) {
5434 /* handle BSY=0, DRQ=0 as error */
5435 if (likely(status & (ATA_ERR | ATA_DF)))
5436 /* device stops HSM for abort/error */
5437 qc->err_mask |= AC_ERR_DEV;
5438 else
5439 /* HSM violation. Let EH handle this */
5440 qc->err_mask |= AC_ERR_HSM;
5441
14be71f4 5442 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 5443 goto fsm_start;
1da177e4
LT
5444 }
5445
71601958
AL
5446 /* Device should not ask for data transfer (DRQ=1)
5447 * when it finds something wrong.
eee6c32f
AL
5448 * We ignore DRQ here and stop the HSM by
5449 * changing hsm_task_state to HSM_ST_ERR and
5450 * let the EH abort the command or reset the device.
71601958
AL
5451 */
5452 if (unlikely(status & (ATA_ERR | ATA_DF))) {
2d3b8eea
AL
5453 /* Some ATAPI tape drives forget to clear the ERR bit
5454 * when doing the next command (mostly request sense).
5455 * We ignore ERR here to workaround and proceed sending
5456 * the CDB.
5457 */
5458 if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
5459 ata_port_printk(ap, KERN_WARNING,
5460 "DRQ=1 with device error, "
5461 "dev_stat 0x%X\n", status);
5462 qc->err_mask |= AC_ERR_HSM;
5463 ap->hsm_task_state = HSM_ST_ERR;
5464 goto fsm_start;
5465 }
71601958 5466 }
1da177e4 5467
bb5cb290
AL
5468 /* Send the CDB (atapi) or the first data block (ata pio out).
5469 * During the state transition, interrupt handler shouldn't
5470 * be invoked before the data transfer is complete and
5471 * hsm_task_state is changed. Hence, the following locking.
5472 */
5473 if (in_wq)
ba6a1308 5474 spin_lock_irqsave(ap->lock, flags);
1da177e4 5475
bb5cb290
AL
5476 if (qc->tf.protocol == ATA_PROT_PIO) {
5477 /* PIO data out protocol.
5478 * send first data block.
5479 */
0565c26d 5480
bb5cb290
AL
5481 /* ata_pio_sectors() might change the state
5482 * to HSM_ST_LAST. so, the state is changed here
5483 * before ata_pio_sectors().
5484 */
5485 ap->hsm_task_state = HSM_ST;
5486 ata_pio_sectors(qc);
bb5cb290
AL
5487 } else
5488 /* send CDB */
5489 atapi_send_cdb(ap, qc);
5490
5491 if (in_wq)
ba6a1308 5492 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
5493
5494 /* if polling, ata_pio_task() handles the rest.
5495 * otherwise, interrupt handler takes over from here.
5496 */
e2cec771 5497 break;
1c848984 5498
e2cec771
AL
5499 case HSM_ST:
5500 /* complete command or read/write the data register */
0dc36888 5501 if (qc->tf.protocol == ATAPI_PROT_PIO) {
e2cec771
AL
5502 /* ATAPI PIO protocol */
5503 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
5504 /* No more data to transfer or device error.
5505 * Device error will be tagged in HSM_ST_LAST.
5506 */
e2cec771
AL
5507 ap->hsm_task_state = HSM_ST_LAST;
5508 goto fsm_start;
5509 }
1da177e4 5510
71601958
AL
5511 /* Device should not ask for data transfer (DRQ=1)
5512 * when it finds something wrong.
eee6c32f
AL
5513 * We ignore DRQ here and stop the HSM by
5514 * changing hsm_task_state to HSM_ST_ERR and
5515 * let the EH abort the command or reset the device.
71601958
AL
5516 */
5517 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
5518 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
5519 "device error, dev_stat 0x%X\n",
5520 status);
3655d1d3 5521 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
5522 ap->hsm_task_state = HSM_ST_ERR;
5523 goto fsm_start;
71601958 5524 }
1da177e4 5525
e2cec771 5526 atapi_pio_bytes(qc);
7fb6ec28 5527
e2cec771
AL
5528 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
5529 /* bad ireason reported by device */
5530 goto fsm_start;
1da177e4 5531
e2cec771
AL
5532 } else {
5533 /* ATA PIO protocol */
5534 if (unlikely((status & ATA_DRQ) == 0)) {
5535 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
5536 if (likely(status & (ATA_ERR | ATA_DF)))
5537 /* device stops HSM for abort/error */
5538 qc->err_mask |= AC_ERR_DEV;
5539 else
55a8e2c8
TH
5540 /* HSM violation. Let EH handle this.
5541 * Phantom devices also trigger this
5542 * condition. Mark hint.
5543 */
5544 qc->err_mask |= AC_ERR_HSM |
5545 AC_ERR_NODEV_HINT;
3655d1d3 5546
e2cec771
AL
5547 ap->hsm_task_state = HSM_ST_ERR;
5548 goto fsm_start;
5549 }
1da177e4 5550
eee6c32f
AL
5551 /* For PIO reads, some devices may ask for
5552 * data transfer (DRQ=1) alone with ERR=1.
5553 * We respect DRQ here and transfer one
5554 * block of junk data before changing the
5555 * hsm_task_state to HSM_ST_ERR.
5556 *
5557 * For PIO writes, ERR=1 DRQ=1 doesn't make
5558 * sense since the data block has been
5559 * transferred to the device.
71601958
AL
5560 */
5561 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
5562 /* data might be corrputed */
5563 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
5564
5565 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5566 ata_pio_sectors(qc);
eee6c32f
AL
5567 status = ata_wait_idle(ap);
5568 }
5569
3655d1d3
AL
5570 if (status & (ATA_BUSY | ATA_DRQ))
5571 qc->err_mask |= AC_ERR_HSM;
5572
eee6c32f
AL
5573 /* ata_pio_sectors() might change the
5574 * state to HSM_ST_LAST. so, the state
5575 * is changed after ata_pio_sectors().
5576 */
5577 ap->hsm_task_state = HSM_ST_ERR;
5578 goto fsm_start;
71601958
AL
5579 }
5580
e2cec771
AL
5581 ata_pio_sectors(qc);
5582
5583 if (ap->hsm_task_state == HSM_ST_LAST &&
5584 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5585 /* all data read */
52a32205 5586 status = ata_wait_idle(ap);
e2cec771
AL
5587 goto fsm_start;
5588 }
5589 }
5590
bb5cb290 5591 poll_next = 1;
1da177e4
LT
5592 break;
5593
14be71f4 5594 case HSM_ST_LAST:
6912ccd5
AL
5595 if (unlikely(!ata_ok(status))) {
5596 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
5597 ap->hsm_task_state = HSM_ST_ERR;
5598 goto fsm_start;
5599 }
5600
5601 /* no more data to transfer */
4332a771 5602 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
44877b4e 5603 ap->print_id, qc->dev->devno, status);
e2cec771 5604
6912ccd5
AL
5605 WARN_ON(qc->err_mask);
5606
e2cec771 5607 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 5608
e2cec771 5609 /* complete taskfile transaction */
c17ea20d 5610 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5611
5612 poll_next = 0;
1da177e4
LT
5613 break;
5614
14be71f4 5615 case HSM_ST_ERR:
e2cec771
AL
5616 /* make sure qc->err_mask is available to
5617 * know what's wrong and recover
5618 */
5619 WARN_ON(qc->err_mask == 0);
5620
5621 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 5622
999bb6f4 5623 /* complete taskfile transaction */
c17ea20d 5624 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5625
5626 poll_next = 0;
e2cec771
AL
5627 break;
5628 default:
bb5cb290 5629 poll_next = 0;
6912ccd5 5630 BUG();
1da177e4
LT
5631 }
5632
bb5cb290 5633 return poll_next;
1da177e4
LT
5634}
5635
65f27f38 5636static void ata_pio_task(struct work_struct *work)
8061f5f0 5637{
65f27f38
DH
5638 struct ata_port *ap =
5639 container_of(work, struct ata_port, port_task.work);
5640 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 5641 u8 status;
a1af3734 5642 int poll_next;
8061f5f0 5643
7fb6ec28 5644fsm_start:
a1af3734 5645 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 5646
a1af3734
AL
5647 /*
5648 * This is purely heuristic. This is a fast path.
5649 * Sometimes when we enter, BSY will be cleared in
5650 * a chk-status or two. If not, the drive is probably seeking
5651 * or something. Snooze for a couple msecs, then
5652 * chk-status again. If still busy, queue delayed work.
5653 */
5654 status = ata_busy_wait(ap, ATA_BUSY, 5);
5655 if (status & ATA_BUSY) {
5656 msleep(2);
5657 status = ata_busy_wait(ap, ATA_BUSY, 10);
5658 if (status & ATA_BUSY) {
442eacc3 5659 ata_pio_queue_task(ap, qc, ATA_SHORT_PAUSE);
a1af3734
AL
5660 return;
5661 }
8061f5f0
TH
5662 }
5663
a1af3734
AL
5664 /* move the HSM */
5665 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 5666
a1af3734
AL
5667 /* another command or interrupt handler
5668 * may be running at this point.
5669 */
5670 if (poll_next)
7fb6ec28 5671 goto fsm_start;
8061f5f0
TH
5672}
5673
1da177e4
LT
5674/**
5675 * ata_qc_new - Request an available ATA command, for queueing
5676 * @ap: Port associated with device @dev
5677 * @dev: Device from whom we request an available command structure
5678 *
5679 * LOCKING:
0cba632b 5680 * None.
1da177e4
LT
5681 */
5682
5683static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5684{
5685 struct ata_queued_cmd *qc = NULL;
5686 unsigned int i;
5687
e3180499 5688 /* no command while frozen */
b51e9e5d 5689 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
5690 return NULL;
5691
2ab7db1f
TH
5692 /* the last tag is reserved for internal command. */
5693 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 5694 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 5695 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
5696 break;
5697 }
5698
5699 if (qc)
5700 qc->tag = i;
5701
5702 return qc;
5703}
5704
5705/**
5706 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
5707 * @dev: Device from whom we request an available command structure
5708 *
5709 * LOCKING:
0cba632b 5710 * None.
1da177e4
LT
5711 */
5712
3373efd8 5713struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 5714{
9af5c9c9 5715 struct ata_port *ap = dev->link->ap;
1da177e4
LT
5716 struct ata_queued_cmd *qc;
5717
5718 qc = ata_qc_new(ap);
5719 if (qc) {
1da177e4
LT
5720 qc->scsicmd = NULL;
5721 qc->ap = ap;
5722 qc->dev = dev;
1da177e4 5723
2c13b7ce 5724 ata_qc_reinit(qc);
1da177e4
LT
5725 }
5726
5727 return qc;
5728}
5729
1da177e4
LT
5730/**
5731 * ata_qc_free - free unused ata_queued_cmd
5732 * @qc: Command to complete
5733 *
5734 * Designed to free unused ata_queued_cmd object
5735 * in case something prevents using it.
5736 *
5737 * LOCKING:
cca3974e 5738 * spin_lock_irqsave(host lock)
1da177e4
LT
5739 */
5740void ata_qc_free(struct ata_queued_cmd *qc)
5741{
4ba946e9
TH
5742 struct ata_port *ap = qc->ap;
5743 unsigned int tag;
5744
a4631474 5745 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 5746
4ba946e9
TH
5747 qc->flags = 0;
5748 tag = qc->tag;
5749 if (likely(ata_tag_valid(tag))) {
4ba946e9 5750 qc->tag = ATA_TAG_POISON;
6cec4a39 5751 clear_bit(tag, &ap->qc_allocated);
4ba946e9 5752 }
1da177e4
LT
5753}
5754
76014427 5755void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 5756{
dedaf2b0 5757 struct ata_port *ap = qc->ap;
9af5c9c9 5758 struct ata_link *link = qc->dev->link;
dedaf2b0 5759
a4631474
TH
5760 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5761 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
5762
5763 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5764 ata_sg_clean(qc);
5765
7401abf2 5766 /* command should be marked inactive atomically with qc completion */
da917d69 5767 if (qc->tf.protocol == ATA_PROT_NCQ) {
9af5c9c9 5768 link->sactive &= ~(1 << qc->tag);
da917d69
TH
5769 if (!link->sactive)
5770 ap->nr_active_links--;
5771 } else {
9af5c9c9 5772 link->active_tag = ATA_TAG_POISON;
da917d69
TH
5773 ap->nr_active_links--;
5774 }
5775
5776 /* clear exclusive status */
5777 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5778 ap->excl_link == link))
5779 ap->excl_link = NULL;
7401abf2 5780
3f3791d3
AL
5781 /* atapi: mark qc as inactive to prevent the interrupt handler
5782 * from completing the command twice later, before the error handler
5783 * is called. (when rc != 0 and atapi request sense is needed)
5784 */
5785 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 5786 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 5787
1da177e4 5788 /* call completion callback */
77853bf2 5789 qc->complete_fn(qc);
1da177e4
LT
5790}
5791
39599a53
TH
5792static void fill_result_tf(struct ata_queued_cmd *qc)
5793{
5794 struct ata_port *ap = qc->ap;
5795
39599a53 5796 qc->result_tf.flags = qc->tf.flags;
4742d54f 5797 ap->ops->tf_read(ap, &qc->result_tf);
39599a53
TH
5798}
5799
00115e0f
TH
5800static void ata_verify_xfer(struct ata_queued_cmd *qc)
5801{
5802 struct ata_device *dev = qc->dev;
5803
5804 if (ata_tag_internal(qc->tag))
5805 return;
5806
5807 if (ata_is_nodata(qc->tf.protocol))
5808 return;
5809
5810 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
5811 return;
5812
5813 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
5814}
5815
f686bcb8
TH
5816/**
5817 * ata_qc_complete - Complete an active ATA command
5818 * @qc: Command to complete
5819 * @err_mask: ATA Status register contents
5820 *
5821 * Indicate to the mid and upper layers that an ATA
5822 * command has completed, with either an ok or not-ok status.
5823 *
5824 * LOCKING:
cca3974e 5825 * spin_lock_irqsave(host lock)
f686bcb8
TH
5826 */
5827void ata_qc_complete(struct ata_queued_cmd *qc)
5828{
5829 struct ata_port *ap = qc->ap;
5830
5831 /* XXX: New EH and old EH use different mechanisms to
5832 * synchronize EH with regular execution path.
5833 *
5834 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5835 * Normal execution path is responsible for not accessing a
5836 * failed qc. libata core enforces the rule by returning NULL
5837 * from ata_qc_from_tag() for failed qcs.
5838 *
5839 * Old EH depends on ata_qc_complete() nullifying completion
5840 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5841 * not synchronize with interrupt handler. Only PIO task is
5842 * taken care of.
5843 */
5844 if (ap->ops->error_handler) {
4dbfa39b
TH
5845 struct ata_device *dev = qc->dev;
5846 struct ata_eh_info *ehi = &dev->link->eh_info;
5847
b51e9e5d 5848 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
5849
5850 if (unlikely(qc->err_mask))
5851 qc->flags |= ATA_QCFLAG_FAILED;
5852
5853 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5854 if (!ata_tag_internal(qc->tag)) {
5855 /* always fill result TF for failed qc */
39599a53 5856 fill_result_tf(qc);
f686bcb8
TH
5857 ata_qc_schedule_eh(qc);
5858 return;
5859 }
5860 }
5861
5862 /* read result TF if requested */
5863 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5864 fill_result_tf(qc);
f686bcb8 5865
4dbfa39b
TH
5866 /* Some commands need post-processing after successful
5867 * completion.
5868 */
5869 switch (qc->tf.command) {
5870 case ATA_CMD_SET_FEATURES:
5871 if (qc->tf.feature != SETFEATURES_WC_ON &&
5872 qc->tf.feature != SETFEATURES_WC_OFF)
5873 break;
5874 /* fall through */
5875 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
5876 case ATA_CMD_SET_MULTI: /* multi_count changed */
5877 /* revalidate device */
5878 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
5879 ata_port_schedule_eh(ap);
5880 break;
054a5fba
TH
5881
5882 case ATA_CMD_SLEEP:
5883 dev->flags |= ATA_DFLAG_SLEEPING;
5884 break;
4dbfa39b
TH
5885 }
5886
00115e0f
TH
5887 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
5888 ata_verify_xfer(qc);
5889
f686bcb8
TH
5890 __ata_qc_complete(qc);
5891 } else {
5892 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5893 return;
5894
5895 /* read result TF if failed or requested */
5896 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5897 fill_result_tf(qc);
f686bcb8
TH
5898
5899 __ata_qc_complete(qc);
5900 }
5901}
5902
dedaf2b0
TH
5903/**
5904 * ata_qc_complete_multiple - Complete multiple qcs successfully
5905 * @ap: port in question
5906 * @qc_active: new qc_active mask
5907 * @finish_qc: LLDD callback invoked before completing a qc
5908 *
5909 * Complete in-flight commands. This functions is meant to be
5910 * called from low-level driver's interrupt routine to complete
5911 * requests normally. ap->qc_active and @qc_active is compared
5912 * and commands are completed accordingly.
5913 *
5914 * LOCKING:
cca3974e 5915 * spin_lock_irqsave(host lock)
dedaf2b0
TH
5916 *
5917 * RETURNS:
5918 * Number of completed commands on success, -errno otherwise.
5919 */
5920int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5921 void (*finish_qc)(struct ata_queued_cmd *))
5922{
5923 int nr_done = 0;
5924 u32 done_mask;
5925 int i;
5926
5927 done_mask = ap->qc_active ^ qc_active;
5928
5929 if (unlikely(done_mask & qc_active)) {
5930 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5931 "(%08x->%08x)\n", ap->qc_active, qc_active);
5932 return -EINVAL;
5933 }
5934
5935 for (i = 0; i < ATA_MAX_QUEUE; i++) {
5936 struct ata_queued_cmd *qc;
5937
5938 if (!(done_mask & (1 << i)))
5939 continue;
5940
5941 if ((qc = ata_qc_from_tag(ap, i))) {
5942 if (finish_qc)
5943 finish_qc(qc);
5944 ata_qc_complete(qc);
5945 nr_done++;
5946 }
5947 }
5948
5949 return nr_done;
5950}
5951
1da177e4
LT
5952/**
5953 * ata_qc_issue - issue taskfile to device
5954 * @qc: command to issue to device
5955 *
5956 * Prepare an ATA command to submission to device.
5957 * This includes mapping the data into a DMA-able
5958 * area, filling in the S/G table, and finally
5959 * writing the taskfile to hardware, starting the command.
5960 *
5961 * LOCKING:
cca3974e 5962 * spin_lock_irqsave(host lock)
1da177e4 5963 */
8e0e694a 5964void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
5965{
5966 struct ata_port *ap = qc->ap;
9af5c9c9 5967 struct ata_link *link = qc->dev->link;
405e66b3 5968 u8 prot = qc->tf.protocol;
1da177e4 5969
dedaf2b0
TH
5970 /* Make sure only one non-NCQ command is outstanding. The
5971 * check is skipped for old EH because it reuses active qc to
5972 * request ATAPI sense.
5973 */
9af5c9c9 5974 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
dedaf2b0 5975
1973a023 5976 if (ata_is_ncq(prot)) {
9af5c9c9 5977 WARN_ON(link->sactive & (1 << qc->tag));
da917d69
TH
5978
5979 if (!link->sactive)
5980 ap->nr_active_links++;
9af5c9c9 5981 link->sactive |= 1 << qc->tag;
dedaf2b0 5982 } else {
9af5c9c9 5983 WARN_ON(link->sactive);
da917d69
TH
5984
5985 ap->nr_active_links++;
9af5c9c9 5986 link->active_tag = qc->tag;
dedaf2b0
TH
5987 }
5988
e4a70e76 5989 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 5990 ap->qc_active |= 1 << qc->tag;
e4a70e76 5991
f92a2636
TH
5992 /* We guarantee to LLDs that they will have at least one
5993 * non-zero sg if the command is a data command.
5994 */
ff2aeb1e 5995 BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
f92a2636 5996
405e66b3 5997 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
f92a2636 5998 (ap->flags & ATA_FLAG_PIO_DMA)))
001102d7
TH
5999 if (ata_sg_setup(qc))
6000 goto sg_err;
1da177e4 6001
054a5fba
TH
6002 /* if device is sleeping, schedule softreset and abort the link */
6003 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
6004 link->eh_info.action |= ATA_EH_SOFTRESET;
6005 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
6006 ata_link_abort(link);
6007 return;
6008 }
6009
1da177e4
LT
6010 ap->ops->qc_prep(qc);
6011
8e0e694a
TH
6012 qc->err_mask |= ap->ops->qc_issue(qc);
6013 if (unlikely(qc->err_mask))
6014 goto err;
6015 return;
1da177e4 6016
8e436af9 6017sg_err:
8e0e694a
TH
6018 qc->err_mask |= AC_ERR_SYSTEM;
6019err:
6020 ata_qc_complete(qc);
1da177e4
LT
6021}
6022
6023/**
6024 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
6025 * @qc: command to issue to device
6026 *
6027 * Using various libata functions and hooks, this function
6028 * starts an ATA command. ATA commands are grouped into
6029 * classes called "protocols", and issuing each type of protocol
6030 * is slightly different.
6031 *
0baab86b
EF
6032 * May be used as the qc_issue() entry in ata_port_operations.
6033 *
1da177e4 6034 * LOCKING:
cca3974e 6035 * spin_lock_irqsave(host lock)
1da177e4
LT
6036 *
6037 * RETURNS:
9a3d9eb0 6038 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
6039 */
6040
9a3d9eb0 6041unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
6042{
6043 struct ata_port *ap = qc->ap;
6044
e50362ec
AL
6045 /* Use polling pio if the LLD doesn't handle
6046 * interrupt driven pio and atapi CDB interrupt.
6047 */
6048 if (ap->flags & ATA_FLAG_PIO_POLLING) {
6049 switch (qc->tf.protocol) {
6050 case ATA_PROT_PIO:
e3472cbe 6051 case ATA_PROT_NODATA:
0dc36888
TH
6052 case ATAPI_PROT_PIO:
6053 case ATAPI_PROT_NODATA:
e50362ec
AL
6054 qc->tf.flags |= ATA_TFLAG_POLLING;
6055 break;
0dc36888 6056 case ATAPI_PROT_DMA:
e50362ec 6057 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 6058 /* see ata_dma_blacklisted() */
e50362ec
AL
6059 BUG();
6060 break;
6061 default:
6062 break;
6063 }
6064 }
6065
312f7da2 6066 /* select the device */
1da177e4
LT
6067 ata_dev_select(ap, qc->dev->devno, 1, 0);
6068
312f7da2 6069 /* start the command */
1da177e4
LT
6070 switch (qc->tf.protocol) {
6071 case ATA_PROT_NODATA:
312f7da2
AL
6072 if (qc->tf.flags & ATA_TFLAG_POLLING)
6073 ata_qc_set_polling(qc);
6074
e5338254 6075 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
6076 ap->hsm_task_state = HSM_ST_LAST;
6077
6078 if (qc->tf.flags & ATA_TFLAG_POLLING)
442eacc3 6079 ata_pio_queue_task(ap, qc, 0);
312f7da2 6080
1da177e4
LT
6081 break;
6082
6083 case ATA_PROT_DMA:
587005de 6084 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 6085
1da177e4
LT
6086 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
6087 ap->ops->bmdma_setup(qc); /* set up bmdma */
6088 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 6089 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
6090 break;
6091
312f7da2
AL
6092 case ATA_PROT_PIO:
6093 if (qc->tf.flags & ATA_TFLAG_POLLING)
6094 ata_qc_set_polling(qc);
1da177e4 6095
e5338254 6096 ata_tf_to_host(ap, &qc->tf);
312f7da2 6097
54f00389
AL
6098 if (qc->tf.flags & ATA_TFLAG_WRITE) {
6099 /* PIO data out protocol */
6100 ap->hsm_task_state = HSM_ST_FIRST;
442eacc3 6101 ata_pio_queue_task(ap, qc, 0);
54f00389
AL
6102
6103 /* always send first data block using
e27486db 6104 * the ata_pio_task() codepath.
54f00389 6105 */
312f7da2 6106 } else {
54f00389
AL
6107 /* PIO data in protocol */
6108 ap->hsm_task_state = HSM_ST;
6109
6110 if (qc->tf.flags & ATA_TFLAG_POLLING)
442eacc3 6111 ata_pio_queue_task(ap, qc, 0);
54f00389
AL
6112
6113 /* if polling, ata_pio_task() handles the rest.
6114 * otherwise, interrupt handler takes over from here.
6115 */
312f7da2
AL
6116 }
6117
1da177e4
LT
6118 break;
6119
0dc36888
TH
6120 case ATAPI_PROT_PIO:
6121 case ATAPI_PROT_NODATA:
312f7da2
AL
6122 if (qc->tf.flags & ATA_TFLAG_POLLING)
6123 ata_qc_set_polling(qc);
6124
e5338254 6125 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 6126
312f7da2
AL
6127 ap->hsm_task_state = HSM_ST_FIRST;
6128
6129 /* send cdb by polling if no cdb interrupt */
6130 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
6131 (qc->tf.flags & ATA_TFLAG_POLLING))
442eacc3 6132 ata_pio_queue_task(ap, qc, 0);
1da177e4
LT
6133 break;
6134
0dc36888 6135 case ATAPI_PROT_DMA:
587005de 6136 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 6137
1da177e4
LT
6138 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
6139 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
6140 ap->hsm_task_state = HSM_ST_FIRST;
6141
6142 /* send cdb by polling if no cdb interrupt */
6143 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
442eacc3 6144 ata_pio_queue_task(ap, qc, 0);
1da177e4
LT
6145 break;
6146
6147 default:
6148 WARN_ON(1);
9a3d9eb0 6149 return AC_ERR_SYSTEM;
1da177e4
LT
6150 }
6151
6152 return 0;
6153}
6154
1da177e4
LT
6155/**
6156 * ata_host_intr - Handle host interrupt for given (port, task)
6157 * @ap: Port on which interrupt arrived (possibly...)
6158 * @qc: Taskfile currently active in engine
6159 *
6160 * Handle host interrupt for given queued command. Currently,
6161 * only DMA interrupts are handled. All other commands are
6162 * handled via polling with interrupts disabled (nIEN bit).
6163 *
6164 * LOCKING:
cca3974e 6165 * spin_lock_irqsave(host lock)
1da177e4
LT
6166 *
6167 * RETURNS:
6168 * One if interrupt was handled, zero if not (shared irq).
6169 */
6170
2dcb407e
JG
6171inline unsigned int ata_host_intr(struct ata_port *ap,
6172 struct ata_queued_cmd *qc)
1da177e4 6173{
9af5c9c9 6174 struct ata_eh_info *ehi = &ap->link.eh_info;
312f7da2 6175 u8 status, host_stat = 0;
1da177e4 6176
312f7da2 6177 VPRINTK("ata%u: protocol %d task_state %d\n",
44877b4e 6178 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 6179
312f7da2
AL
6180 /* Check whether we are expecting interrupt in this state */
6181 switch (ap->hsm_task_state) {
6182 case HSM_ST_FIRST:
6912ccd5
AL
6183 /* Some pre-ATAPI-4 devices assert INTRQ
6184 * at this state when ready to receive CDB.
6185 */
1da177e4 6186
312f7da2 6187 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
405e66b3
TH
6188 * The flag was turned on only for atapi devices. No
6189 * need to check ata_is_atapi(qc->tf.protocol) again.
312f7da2
AL
6190 */
6191 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 6192 goto idle_irq;
1da177e4 6193 break;
312f7da2
AL
6194 case HSM_ST_LAST:
6195 if (qc->tf.protocol == ATA_PROT_DMA ||
0dc36888 6196 qc->tf.protocol == ATAPI_PROT_DMA) {
312f7da2
AL
6197 /* check status of DMA engine */
6198 host_stat = ap->ops->bmdma_status(ap);
44877b4e
TH
6199 VPRINTK("ata%u: host_stat 0x%X\n",
6200 ap->print_id, host_stat);
312f7da2
AL
6201
6202 /* if it's not our irq... */
6203 if (!(host_stat & ATA_DMA_INTR))
6204 goto idle_irq;
6205
6206 /* before we do anything else, clear DMA-Start bit */
6207 ap->ops->bmdma_stop(qc);
a4f16610
AL
6208
6209 if (unlikely(host_stat & ATA_DMA_ERR)) {
6210 /* error when transfering data to/from memory */
6211 qc->err_mask |= AC_ERR_HOST_BUS;
6212 ap->hsm_task_state = HSM_ST_ERR;
6213 }
312f7da2
AL
6214 }
6215 break;
6216 case HSM_ST:
6217 break;
1da177e4
LT
6218 default:
6219 goto idle_irq;
6220 }
6221
312f7da2
AL
6222 /* check altstatus */
6223 status = ata_altstatus(ap);
6224 if (status & ATA_BUSY)
6225 goto idle_irq;
1da177e4 6226
312f7da2
AL
6227 /* check main status, clearing INTRQ */
6228 status = ata_chk_status(ap);
6229 if (unlikely(status & ATA_BUSY))
6230 goto idle_irq;
1da177e4 6231
312f7da2
AL
6232 /* ack bmdma irq events */
6233 ap->ops->irq_clear(ap);
1da177e4 6234
bb5cb290 6235 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
6236
6237 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
0dc36888 6238 qc->tf.protocol == ATAPI_PROT_DMA))
ea54763f
TH
6239 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
6240
1da177e4
LT
6241 return 1; /* irq handled */
6242
6243idle_irq:
6244 ap->stats.idle_irq++;
6245
6246#ifdef ATA_IRQ_TRAP
6247 if ((ap->stats.idle_irq % 1000) == 0) {
6d32d30f
JG
6248 ata_chk_status(ap);
6249 ap->ops->irq_clear(ap);
f15a1daf 6250 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 6251 return 1;
1da177e4
LT
6252 }
6253#endif
6254 return 0; /* irq not handled */
6255}
6256
6257/**
6258 * ata_interrupt - Default ATA host interrupt handler
0cba632b 6259 * @irq: irq line (unused)
cca3974e 6260 * @dev_instance: pointer to our ata_host information structure
1da177e4 6261 *
0cba632b
JG
6262 * Default interrupt handler for PCI IDE devices. Calls
6263 * ata_host_intr() for each port that is not disabled.
6264 *
1da177e4 6265 * LOCKING:
cca3974e 6266 * Obtains host lock during operation.
1da177e4
LT
6267 *
6268 * RETURNS:
0cba632b 6269 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
6270 */
6271
2dcb407e 6272irqreturn_t ata_interrupt(int irq, void *dev_instance)
1da177e4 6273{
cca3974e 6274 struct ata_host *host = dev_instance;
1da177e4
LT
6275 unsigned int i;
6276 unsigned int handled = 0;
6277 unsigned long flags;
6278
6279 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 6280 spin_lock_irqsave(&host->lock, flags);
1da177e4 6281
cca3974e 6282 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
6283 struct ata_port *ap;
6284
cca3974e 6285 ap = host->ports[i];
c1389503 6286 if (ap &&
029f5468 6287 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
6288 struct ata_queued_cmd *qc;
6289
9af5c9c9 6290 qc = ata_qc_from_tag(ap, ap->link.active_tag);
312f7da2 6291 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 6292 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
6293 handled |= ata_host_intr(ap, qc);
6294 }
6295 }
6296
cca3974e 6297 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
6298
6299 return IRQ_RETVAL(handled);
6300}
6301
34bf2170
TH
6302/**
6303 * sata_scr_valid - test whether SCRs are accessible
936fd732 6304 * @link: ATA link to test SCR accessibility for
34bf2170 6305 *
936fd732 6306 * Test whether SCRs are accessible for @link.
34bf2170
TH
6307 *
6308 * LOCKING:
6309 * None.
6310 *
6311 * RETURNS:
6312 * 1 if SCRs are accessible, 0 otherwise.
6313 */
936fd732 6314int sata_scr_valid(struct ata_link *link)
34bf2170 6315{
936fd732
TH
6316 struct ata_port *ap = link->ap;
6317
a16abc0b 6318 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
34bf2170
TH
6319}
6320
6321/**
6322 * sata_scr_read - read SCR register of the specified port
936fd732 6323 * @link: ATA link to read SCR for
34bf2170
TH
6324 * @reg: SCR to read
6325 * @val: Place to store read value
6326 *
936fd732 6327 * Read SCR register @reg of @link into *@val. This function is
633273a3
TH
6328 * guaranteed to succeed if @link is ap->link, the cable type of
6329 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
6330 *
6331 * LOCKING:
633273a3 6332 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6333 *
6334 * RETURNS:
6335 * 0 on success, negative errno on failure.
6336 */
936fd732 6337int sata_scr_read(struct ata_link *link, int reg, u32 *val)
34bf2170 6338{
633273a3
TH
6339 if (ata_is_host_link(link)) {
6340 struct ata_port *ap = link->ap;
936fd732 6341
633273a3
TH
6342 if (sata_scr_valid(link))
6343 return ap->ops->scr_read(ap, reg, val);
6344 return -EOPNOTSUPP;
6345 }
6346
6347 return sata_pmp_scr_read(link, reg, val);
34bf2170
TH
6348}
6349
6350/**
6351 * sata_scr_write - write SCR register of the specified port
936fd732 6352 * @link: ATA link to write SCR for
34bf2170
TH
6353 * @reg: SCR to write
6354 * @val: value to write
6355 *
936fd732 6356 * Write @val to SCR register @reg of @link. This function is
633273a3
TH
6357 * guaranteed to succeed if @link is ap->link, the cable type of
6358 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
6359 *
6360 * LOCKING:
633273a3 6361 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6362 *
6363 * RETURNS:
6364 * 0 on success, negative errno on failure.
6365 */
936fd732 6366int sata_scr_write(struct ata_link *link, int reg, u32 val)
34bf2170 6367{
633273a3
TH
6368 if (ata_is_host_link(link)) {
6369 struct ata_port *ap = link->ap;
6370
6371 if (sata_scr_valid(link))
6372 return ap->ops->scr_write(ap, reg, val);
6373 return -EOPNOTSUPP;
6374 }
936fd732 6375
633273a3 6376 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
6377}
6378
6379/**
6380 * sata_scr_write_flush - write SCR register of the specified port and flush
936fd732 6381 * @link: ATA link to write SCR for
34bf2170
TH
6382 * @reg: SCR to write
6383 * @val: value to write
6384 *
6385 * This function is identical to sata_scr_write() except that this
6386 * function performs flush after writing to the register.
6387 *
6388 * LOCKING:
633273a3 6389 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6390 *
6391 * RETURNS:
6392 * 0 on success, negative errno on failure.
6393 */
936fd732 6394int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
34bf2170 6395{
633273a3
TH
6396 if (ata_is_host_link(link)) {
6397 struct ata_port *ap = link->ap;
6398 int rc;
da3dbb17 6399
633273a3
TH
6400 if (sata_scr_valid(link)) {
6401 rc = ap->ops->scr_write(ap, reg, val);
6402 if (rc == 0)
6403 rc = ap->ops->scr_read(ap, reg, &val);
6404 return rc;
6405 }
6406 return -EOPNOTSUPP;
34bf2170 6407 }
633273a3
TH
6408
6409 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
6410}
6411
6412/**
936fd732
TH
6413 * ata_link_online - test whether the given link is online
6414 * @link: ATA link to test
34bf2170 6415 *
936fd732
TH
6416 * Test whether @link is online. Note that this function returns
6417 * 0 if online status of @link cannot be obtained, so
6418 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
6419 *
6420 * LOCKING:
6421 * None.
6422 *
6423 * RETURNS:
6424 * 1 if the port online status is available and online.
6425 */
936fd732 6426int ata_link_online(struct ata_link *link)
34bf2170
TH
6427{
6428 u32 sstatus;
6429
936fd732
TH
6430 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6431 (sstatus & 0xf) == 0x3)
34bf2170
TH
6432 return 1;
6433 return 0;
6434}
6435
6436/**
936fd732
TH
6437 * ata_link_offline - test whether the given link is offline
6438 * @link: ATA link to test
34bf2170 6439 *
936fd732
TH
6440 * Test whether @link is offline. Note that this function
6441 * returns 0 if offline status of @link cannot be obtained, so
6442 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
6443 *
6444 * LOCKING:
6445 * None.
6446 *
6447 * RETURNS:
6448 * 1 if the port offline status is available and offline.
6449 */
936fd732 6450int ata_link_offline(struct ata_link *link)
34bf2170
TH
6451{
6452 u32 sstatus;
6453
936fd732
TH
6454 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6455 (sstatus & 0xf) != 0x3)
34bf2170
TH
6456 return 1;
6457 return 0;
6458}
0baab86b 6459
77b08fb5 6460int ata_flush_cache(struct ata_device *dev)
9b847548 6461{
977e6b9f 6462 unsigned int err_mask;
9b847548
JA
6463 u8 cmd;
6464
6465 if (!ata_try_flush_cache(dev))
6466 return 0;
6467
6fc49adb 6468 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
6469 cmd = ATA_CMD_FLUSH_EXT;
6470 else
6471 cmd = ATA_CMD_FLUSH;
6472
4f34337b
AC
6473 /* This is wrong. On a failed flush we get back the LBA of the lost
6474 sector and we should (assuming it wasn't aborted as unknown) issue
2dcb407e 6475 a further flush command to continue the writeback until it
4f34337b 6476 does not error */
977e6b9f
TH
6477 err_mask = ata_do_simple_cmd(dev, cmd);
6478 if (err_mask) {
6479 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
6480 return -EIO;
6481 }
6482
6483 return 0;
9b847548
JA
6484}
6485
6ffa01d8 6486#ifdef CONFIG_PM
cca3974e
JG
6487static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
6488 unsigned int action, unsigned int ehi_flags,
6489 int wait)
500530f6
TH
6490{
6491 unsigned long flags;
6492 int i, rc;
6493
cca3974e
JG
6494 for (i = 0; i < host->n_ports; i++) {
6495 struct ata_port *ap = host->ports[i];
e3667ebf 6496 struct ata_link *link;
500530f6
TH
6497
6498 /* Previous resume operation might still be in
6499 * progress. Wait for PM_PENDING to clear.
6500 */
6501 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
6502 ata_port_wait_eh(ap);
6503 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6504 }
6505
6506 /* request PM ops to EH */
6507 spin_lock_irqsave(ap->lock, flags);
6508
6509 ap->pm_mesg = mesg;
6510 if (wait) {
6511 rc = 0;
6512 ap->pm_result = &rc;
6513 }
6514
6515 ap->pflags |= ATA_PFLAG_PM_PENDING;
e3667ebf
TH
6516 __ata_port_for_each_link(link, ap) {
6517 link->eh_info.action |= action;
6518 link->eh_info.flags |= ehi_flags;
6519 }
500530f6
TH
6520
6521 ata_port_schedule_eh(ap);
6522
6523 spin_unlock_irqrestore(ap->lock, flags);
6524
6525 /* wait and check result */
6526 if (wait) {
6527 ata_port_wait_eh(ap);
6528 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6529 if (rc)
6530 return rc;
6531 }
6532 }
6533
6534 return 0;
6535}
6536
6537/**
cca3974e
JG
6538 * ata_host_suspend - suspend host
6539 * @host: host to suspend
500530f6
TH
6540 * @mesg: PM message
6541 *
cca3974e 6542 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
6543 * function requests EH to perform PM operations and waits for EH
6544 * to finish.
6545 *
6546 * LOCKING:
6547 * Kernel thread context (may sleep).
6548 *
6549 * RETURNS:
6550 * 0 on success, -errno on failure.
6551 */
cca3974e 6552int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6 6553{
9666f400 6554 int rc;
500530f6 6555
ca77329f
KCA
6556 /*
6557 * disable link pm on all ports before requesting
6558 * any pm activity
6559 */
6560 ata_lpm_enable(host);
6561
cca3974e 6562 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
9666f400
TH
6563 if (rc == 0)
6564 host->dev->power.power_state = mesg;
500530f6
TH
6565 return rc;
6566}
6567
6568/**
cca3974e
JG
6569 * ata_host_resume - resume host
6570 * @host: host to resume
500530f6 6571 *
cca3974e 6572 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
6573 * function requests EH to perform PM operations and returns.
6574 * Note that all resume operations are performed parallely.
6575 *
6576 * LOCKING:
6577 * Kernel thread context (may sleep).
6578 */
cca3974e 6579void ata_host_resume(struct ata_host *host)
500530f6 6580{
cca3974e
JG
6581 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
6582 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
6583 host->dev->power.power_state = PMSG_ON;
ca77329f
KCA
6584
6585 /* reenable link pm */
6586 ata_lpm_disable(host);
500530f6 6587}
6ffa01d8 6588#endif
500530f6 6589
c893a3ae
RD
6590/**
6591 * ata_port_start - Set port up for dma.
6592 * @ap: Port to initialize
6593 *
6594 * Called just after data structures for each port are
6595 * initialized. Allocates space for PRD table.
6596 *
6597 * May be used as the port_start() entry in ata_port_operations.
6598 *
6599 * LOCKING:
6600 * Inherited from caller.
6601 */
f0d36efd 6602int ata_port_start(struct ata_port *ap)
1da177e4 6603{
2f1f610b 6604 struct device *dev = ap->dev;
1da177e4 6605
f0d36efd
TH
6606 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6607 GFP_KERNEL);
1da177e4
LT
6608 if (!ap->prd)
6609 return -ENOMEM;
6610
1da177e4
LT
6611 return 0;
6612}
6613
3ef3b43d
TH
6614/**
6615 * ata_dev_init - Initialize an ata_device structure
6616 * @dev: Device structure to initialize
6617 *
6618 * Initialize @dev in preparation for probing.
6619 *
6620 * LOCKING:
6621 * Inherited from caller.
6622 */
6623void ata_dev_init(struct ata_device *dev)
6624{
9af5c9c9
TH
6625 struct ata_link *link = dev->link;
6626 struct ata_port *ap = link->ap;
72fa4b74
TH
6627 unsigned long flags;
6628
5a04bf4b 6629 /* SATA spd limit is bound to the first device */
9af5c9c9
TH
6630 link->sata_spd_limit = link->hw_sata_spd_limit;
6631 link->sata_spd = 0;
5a04bf4b 6632
72fa4b74
TH
6633 /* High bits of dev->flags are used to record warm plug
6634 * requests which occur asynchronously. Synchronize using
cca3974e 6635 * host lock.
72fa4b74 6636 */
ba6a1308 6637 spin_lock_irqsave(ap->lock, flags);
72fa4b74 6638 dev->flags &= ~ATA_DFLAG_INIT_MASK;
3dcc323f 6639 dev->horkage = 0;
ba6a1308 6640 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 6641
72fa4b74
TH
6642 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
6643 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
6644 dev->pio_mask = UINT_MAX;
6645 dev->mwdma_mask = UINT_MAX;
6646 dev->udma_mask = UINT_MAX;
6647}
6648
4fb37a25
TH
6649/**
6650 * ata_link_init - Initialize an ata_link structure
6651 * @ap: ATA port link is attached to
6652 * @link: Link structure to initialize
8989805d 6653 * @pmp: Port multiplier port number
4fb37a25
TH
6654 *
6655 * Initialize @link.
6656 *
6657 * LOCKING:
6658 * Kernel thread context (may sleep)
6659 */
fb7fd614 6660void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
4fb37a25
TH
6661{
6662 int i;
6663
6664 /* clear everything except for devices */
6665 memset(link, 0, offsetof(struct ata_link, device[0]));
6666
6667 link->ap = ap;
8989805d 6668 link->pmp = pmp;
4fb37a25
TH
6669 link->active_tag = ATA_TAG_POISON;
6670 link->hw_sata_spd_limit = UINT_MAX;
6671
6672 /* can't use iterator, ap isn't initialized yet */
6673 for (i = 0; i < ATA_MAX_DEVICES; i++) {
6674 struct ata_device *dev = &link->device[i];
6675
6676 dev->link = link;
6677 dev->devno = dev - link->device;
6678 ata_dev_init(dev);
6679 }
6680}
6681
6682/**
6683 * sata_link_init_spd - Initialize link->sata_spd_limit
6684 * @link: Link to configure sata_spd_limit for
6685 *
6686 * Initialize @link->[hw_]sata_spd_limit to the currently
6687 * configured value.
6688 *
6689 * LOCKING:
6690 * Kernel thread context (may sleep).
6691 *
6692 * RETURNS:
6693 * 0 on success, -errno on failure.
6694 */
fb7fd614 6695int sata_link_init_spd(struct ata_link *link)
4fb37a25 6696{
33267325
TH
6697 u32 scontrol;
6698 u8 spd;
4fb37a25
TH
6699 int rc;
6700
6701 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
6702 if (rc)
6703 return rc;
6704
6705 spd = (scontrol >> 4) & 0xf;
6706 if (spd)
6707 link->hw_sata_spd_limit &= (1 << spd) - 1;
6708
33267325
TH
6709 ata_force_spd_limit(link);
6710
4fb37a25
TH
6711 link->sata_spd_limit = link->hw_sata_spd_limit;
6712
6713 return 0;
6714}
6715
1da177e4 6716/**
f3187195
TH
6717 * ata_port_alloc - allocate and initialize basic ATA port resources
6718 * @host: ATA host this allocated port belongs to
1da177e4 6719 *
f3187195
TH
6720 * Allocate and initialize basic ATA port resources.
6721 *
6722 * RETURNS:
6723 * Allocate ATA port on success, NULL on failure.
0cba632b 6724 *
1da177e4 6725 * LOCKING:
f3187195 6726 * Inherited from calling layer (may sleep).
1da177e4 6727 */
f3187195 6728struct ata_port *ata_port_alloc(struct ata_host *host)
1da177e4 6729{
f3187195 6730 struct ata_port *ap;
1da177e4 6731
f3187195
TH
6732 DPRINTK("ENTER\n");
6733
6734 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6735 if (!ap)
6736 return NULL;
6737
f4d6d004 6738 ap->pflags |= ATA_PFLAG_INITIALIZING;
cca3974e 6739 ap->lock = &host->lock;
198e0fed 6740 ap->flags = ATA_FLAG_DISABLED;
f3187195 6741 ap->print_id = -1;
1da177e4 6742 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 6743 ap->host = host;
f3187195 6744 ap->dev = host->dev;
1da177e4 6745 ap->last_ctl = 0xFF;
bd5d825c
BP
6746
6747#if defined(ATA_VERBOSE_DEBUG)
6748 /* turn on all debugging levels */
6749 ap->msg_enable = 0x00FF;
6750#elif defined(ATA_DEBUG)
6751 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 6752#else
0dd4b21f 6753 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 6754#endif
1da177e4 6755
442eacc3 6756 INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
65f27f38
DH
6757 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6758 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 6759 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 6760 init_waitqueue_head(&ap->eh_wait_q);
5ddf24c5
TH
6761 init_timer_deferrable(&ap->fastdrain_timer);
6762 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
6763 ap->fastdrain_timer.data = (unsigned long)ap;
1da177e4 6764
838df628 6765 ap->cbl = ATA_CBL_NONE;
838df628 6766
8989805d 6767 ata_link_init(ap, &ap->link, 0);
1da177e4
LT
6768
6769#ifdef ATA_IRQ_TRAP
6770 ap->stats.unhandled_irq = 1;
6771 ap->stats.idle_irq = 1;
6772#endif
1da177e4 6773 return ap;
1da177e4
LT
6774}
6775
f0d36efd
TH
6776static void ata_host_release(struct device *gendev, void *res)
6777{
6778 struct ata_host *host = dev_get_drvdata(gendev);
6779 int i;
6780
1aa506e4
TH
6781 for (i = 0; i < host->n_ports; i++) {
6782 struct ata_port *ap = host->ports[i];
6783
4911487a
TH
6784 if (!ap)
6785 continue;
6786
6787 if (ap->scsi_host)
1aa506e4
TH
6788 scsi_host_put(ap->scsi_host);
6789
633273a3 6790 kfree(ap->pmp_link);
4911487a 6791 kfree(ap);
1aa506e4
TH
6792 host->ports[i] = NULL;
6793 }
6794
1aa56cca 6795 dev_set_drvdata(gendev, NULL);
f0d36efd
TH
6796}
6797
f3187195
TH
6798/**
6799 * ata_host_alloc - allocate and init basic ATA host resources
6800 * @dev: generic device this host is associated with
6801 * @max_ports: maximum number of ATA ports associated with this host
6802 *
6803 * Allocate and initialize basic ATA host resources. LLD calls
6804 * this function to allocate a host, initializes it fully and
6805 * attaches it using ata_host_register().
6806 *
6807 * @max_ports ports are allocated and host->n_ports is
6808 * initialized to @max_ports. The caller is allowed to decrease
6809 * host->n_ports before calling ata_host_register(). The unused
6810 * ports will be automatically freed on registration.
6811 *
6812 * RETURNS:
6813 * Allocate ATA host on success, NULL on failure.
6814 *
6815 * LOCKING:
6816 * Inherited from calling layer (may sleep).
6817 */
6818struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6819{
6820 struct ata_host *host;
6821 size_t sz;
6822 int i;
6823
6824 DPRINTK("ENTER\n");
6825
6826 if (!devres_open_group(dev, NULL, GFP_KERNEL))
6827 return NULL;
6828
6829 /* alloc a container for our list of ATA ports (buses) */
6830 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6831 /* alloc a container for our list of ATA ports (buses) */
6832 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6833 if (!host)
6834 goto err_out;
6835
6836 devres_add(dev, host);
6837 dev_set_drvdata(dev, host);
6838
6839 spin_lock_init(&host->lock);
6840 host->dev = dev;
6841 host->n_ports = max_ports;
6842
6843 /* allocate ports bound to this host */
6844 for (i = 0; i < max_ports; i++) {
6845 struct ata_port *ap;
6846
6847 ap = ata_port_alloc(host);
6848 if (!ap)
6849 goto err_out;
6850
6851 ap->port_no = i;
6852 host->ports[i] = ap;
6853 }
6854
6855 devres_remove_group(dev, NULL);
6856 return host;
6857
6858 err_out:
6859 devres_release_group(dev, NULL);
6860 return NULL;
6861}
6862
f5cda257
TH
6863/**
6864 * ata_host_alloc_pinfo - alloc host and init with port_info array
6865 * @dev: generic device this host is associated with
6866 * @ppi: array of ATA port_info to initialize host with
6867 * @n_ports: number of ATA ports attached to this host
6868 *
6869 * Allocate ATA host and initialize with info from @ppi. If NULL
6870 * terminated, @ppi may contain fewer entries than @n_ports. The
6871 * last entry will be used for the remaining ports.
6872 *
6873 * RETURNS:
6874 * Allocate ATA host on success, NULL on failure.
6875 *
6876 * LOCKING:
6877 * Inherited from calling layer (may sleep).
6878 */
6879struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6880 const struct ata_port_info * const * ppi,
6881 int n_ports)
6882{
6883 const struct ata_port_info *pi;
6884 struct ata_host *host;
6885 int i, j;
6886
6887 host = ata_host_alloc(dev, n_ports);
6888 if (!host)
6889 return NULL;
6890
6891 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6892 struct ata_port *ap = host->ports[i];
6893
6894 if (ppi[j])
6895 pi = ppi[j++];
6896
6897 ap->pio_mask = pi->pio_mask;
6898 ap->mwdma_mask = pi->mwdma_mask;
6899 ap->udma_mask = pi->udma_mask;
6900 ap->flags |= pi->flags;
0c88758b 6901 ap->link.flags |= pi->link_flags;
f5cda257
TH
6902 ap->ops = pi->port_ops;
6903
6904 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6905 host->ops = pi->port_ops;
6906 if (!host->private_data && pi->private_data)
6907 host->private_data = pi->private_data;
6908 }
6909
6910 return host;
6911}
6912
32ebbc0c
TH
6913static void ata_host_stop(struct device *gendev, void *res)
6914{
6915 struct ata_host *host = dev_get_drvdata(gendev);
6916 int i;
6917
6918 WARN_ON(!(host->flags & ATA_HOST_STARTED));
6919
6920 for (i = 0; i < host->n_ports; i++) {
6921 struct ata_port *ap = host->ports[i];
6922
6923 if (ap->ops->port_stop)
6924 ap->ops->port_stop(ap);
6925 }
6926
6927 if (host->ops->host_stop)
6928 host->ops->host_stop(host);
6929}
6930
ecef7253
TH
6931/**
6932 * ata_host_start - start and freeze ports of an ATA host
6933 * @host: ATA host to start ports for
6934 *
6935 * Start and then freeze ports of @host. Started status is
6936 * recorded in host->flags, so this function can be called
6937 * multiple times. Ports are guaranteed to get started only
f3187195
TH
6938 * once. If host->ops isn't initialized yet, its set to the
6939 * first non-dummy port ops.
ecef7253
TH
6940 *
6941 * LOCKING:
6942 * Inherited from calling layer (may sleep).
6943 *
6944 * RETURNS:
6945 * 0 if all ports are started successfully, -errno otherwise.
6946 */
6947int ata_host_start(struct ata_host *host)
6948{
32ebbc0c
TH
6949 int have_stop = 0;
6950 void *start_dr = NULL;
ecef7253
TH
6951 int i, rc;
6952
6953 if (host->flags & ATA_HOST_STARTED)
6954 return 0;
6955
6956 for (i = 0; i < host->n_ports; i++) {
6957 struct ata_port *ap = host->ports[i];
6958
f3187195
TH
6959 if (!host->ops && !ata_port_is_dummy(ap))
6960 host->ops = ap->ops;
6961
32ebbc0c
TH
6962 if (ap->ops->port_stop)
6963 have_stop = 1;
6964 }
6965
6966 if (host->ops->host_stop)
6967 have_stop = 1;
6968
6969 if (have_stop) {
6970 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
6971 if (!start_dr)
6972 return -ENOMEM;
6973 }
6974
6975 for (i = 0; i < host->n_ports; i++) {
6976 struct ata_port *ap = host->ports[i];
6977
ecef7253
TH
6978 if (ap->ops->port_start) {
6979 rc = ap->ops->port_start(ap);
6980 if (rc) {
0f9fe9b7 6981 if (rc != -ENODEV)
0f757743
AM
6982 dev_printk(KERN_ERR, host->dev,
6983 "failed to start port %d "
6984 "(errno=%d)\n", i, rc);
ecef7253
TH
6985 goto err_out;
6986 }
6987 }
ecef7253
TH
6988 ata_eh_freeze_port(ap);
6989 }
6990
32ebbc0c
TH
6991 if (start_dr)
6992 devres_add(host->dev, start_dr);
ecef7253
TH
6993 host->flags |= ATA_HOST_STARTED;
6994 return 0;
6995
6996 err_out:
6997 while (--i >= 0) {
6998 struct ata_port *ap = host->ports[i];
6999
7000 if (ap->ops->port_stop)
7001 ap->ops->port_stop(ap);
7002 }
32ebbc0c 7003 devres_free(start_dr);
ecef7253
TH
7004 return rc;
7005}
7006
b03732f0 7007/**
cca3974e
JG
7008 * ata_sas_host_init - Initialize a host struct
7009 * @host: host to initialize
7010 * @dev: device host is attached to
7011 * @flags: host flags
7012 * @ops: port_ops
b03732f0
BK
7013 *
7014 * LOCKING:
7015 * PCI/etc. bus probe sem.
7016 *
7017 */
f3187195 7018/* KILLME - the only user left is ipr */
cca3974e
JG
7019void ata_host_init(struct ata_host *host, struct device *dev,
7020 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 7021{
cca3974e
JG
7022 spin_lock_init(&host->lock);
7023 host->dev = dev;
7024 host->flags = flags;
7025 host->ops = ops;
b03732f0
BK
7026}
7027
f3187195
TH
7028/**
7029 * ata_host_register - register initialized ATA host
7030 * @host: ATA host to register
7031 * @sht: template for SCSI host
7032 *
7033 * Register initialized ATA host. @host is allocated using
7034 * ata_host_alloc() and fully initialized by LLD. This function
7035 * starts ports, registers @host with ATA and SCSI layers and
7036 * probe registered devices.
7037 *
7038 * LOCKING:
7039 * Inherited from calling layer (may sleep).
7040 *
7041 * RETURNS:
7042 * 0 on success, -errno otherwise.
7043 */
7044int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
7045{
7046 int i, rc;
7047
7048 /* host must have been started */
7049 if (!(host->flags & ATA_HOST_STARTED)) {
7050 dev_printk(KERN_ERR, host->dev,
7051 "BUG: trying to register unstarted host\n");
7052 WARN_ON(1);
7053 return -EINVAL;
7054 }
7055
7056 /* Blow away unused ports. This happens when LLD can't
7057 * determine the exact number of ports to allocate at
7058 * allocation time.
7059 */
7060 for (i = host->n_ports; host->ports[i]; i++)
7061 kfree(host->ports[i]);
7062
7063 /* give ports names and add SCSI hosts */
7064 for (i = 0; i < host->n_ports; i++)
7065 host->ports[i]->print_id = ata_print_id++;
7066
7067 rc = ata_scsi_add_hosts(host, sht);
7068 if (rc)
7069 return rc;
7070
fafbae87
TH
7071 /* associate with ACPI nodes */
7072 ata_acpi_associate(host);
7073
f3187195
TH
7074 /* set cable, sata_spd_limit and report */
7075 for (i = 0; i < host->n_ports; i++) {
7076 struct ata_port *ap = host->ports[i];
f3187195
TH
7077 unsigned long xfer_mask;
7078
7079 /* set SATA cable type if still unset */
7080 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
7081 ap->cbl = ATA_CBL_SATA;
7082
7083 /* init sata_spd_limit to the current value */
4fb37a25 7084 sata_link_init_spd(&ap->link);
f3187195 7085
cbcdd875 7086 /* print per-port info to dmesg */
f3187195
TH
7087 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
7088 ap->udma_mask);
7089
abf6e8ed 7090 if (!ata_port_is_dummy(ap)) {
cbcdd875
TH
7091 ata_port_printk(ap, KERN_INFO,
7092 "%cATA max %s %s\n",
a16abc0b 7093 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
f3187195 7094 ata_mode_string(xfer_mask),
cbcdd875 7095 ap->link.eh_info.desc);
abf6e8ed
TH
7096 ata_ehi_clear_desc(&ap->link.eh_info);
7097 } else
f3187195
TH
7098 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
7099 }
7100
7101 /* perform each probe synchronously */
7102 DPRINTK("probe begin\n");
7103 for (i = 0; i < host->n_ports; i++) {
7104 struct ata_port *ap = host->ports[i];
f3187195
TH
7105
7106 /* probe */
7107 if (ap->ops->error_handler) {
9af5c9c9 7108 struct ata_eh_info *ehi = &ap->link.eh_info;
f3187195
TH
7109 unsigned long flags;
7110
7111 ata_port_probe(ap);
7112
7113 /* kick EH for boot probing */
7114 spin_lock_irqsave(ap->lock, flags);
7115
f58229f8
TH
7116 ehi->probe_mask =
7117 (1 << ata_link_max_devices(&ap->link)) - 1;
f3187195
TH
7118 ehi->action |= ATA_EH_SOFTRESET;
7119 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
7120
f4d6d004 7121 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
f3187195
TH
7122 ap->pflags |= ATA_PFLAG_LOADING;
7123 ata_port_schedule_eh(ap);
7124
7125 spin_unlock_irqrestore(ap->lock, flags);
7126
7127 /* wait for EH to finish */
7128 ata_port_wait_eh(ap);
7129 } else {
7130 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
7131 rc = ata_bus_probe(ap);
7132 DPRINTK("ata%u: bus probe end\n", ap->print_id);
7133
7134 if (rc) {
7135 /* FIXME: do something useful here?
7136 * Current libata behavior will
7137 * tear down everything when
7138 * the module is removed
7139 * or the h/w is unplugged.
7140 */
7141 }
7142 }
7143 }
7144
7145 /* probes are done, now scan each port's disk(s) */
7146 DPRINTK("host probe begin\n");
7147 for (i = 0; i < host->n_ports; i++) {
7148 struct ata_port *ap = host->ports[i];
7149
1ae46317 7150 ata_scsi_scan_host(ap, 1);
ca77329f 7151 ata_lpm_schedule(ap, ap->pm_policy);
f3187195
TH
7152 }
7153
7154 return 0;
7155}
7156
f5cda257
TH
7157/**
7158 * ata_host_activate - start host, request IRQ and register it
7159 * @host: target ATA host
7160 * @irq: IRQ to request
7161 * @irq_handler: irq_handler used when requesting IRQ
7162 * @irq_flags: irq_flags used when requesting IRQ
7163 * @sht: scsi_host_template to use when registering the host
7164 *
7165 * After allocating an ATA host and initializing it, most libata
7166 * LLDs perform three steps to activate the host - start host,
7167 * request IRQ and register it. This helper takes necessasry
7168 * arguments and performs the three steps in one go.
7169 *
3d46b2e2
PM
7170 * An invalid IRQ skips the IRQ registration and expects the host to
7171 * have set polling mode on the port. In this case, @irq_handler
7172 * should be NULL.
7173 *
f5cda257
TH
7174 * LOCKING:
7175 * Inherited from calling layer (may sleep).
7176 *
7177 * RETURNS:
7178 * 0 on success, -errno otherwise.
7179 */
7180int ata_host_activate(struct ata_host *host, int irq,
7181 irq_handler_t irq_handler, unsigned long irq_flags,
7182 struct scsi_host_template *sht)
7183{
cbcdd875 7184 int i, rc;
f5cda257
TH
7185
7186 rc = ata_host_start(host);
7187 if (rc)
7188 return rc;
7189
3d46b2e2
PM
7190 /* Special case for polling mode */
7191 if (!irq) {
7192 WARN_ON(irq_handler);
7193 return ata_host_register(host, sht);
7194 }
7195
f5cda257
TH
7196 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
7197 dev_driver_string(host->dev), host);
7198 if (rc)
7199 return rc;
7200
cbcdd875
TH
7201 for (i = 0; i < host->n_ports; i++)
7202 ata_port_desc(host->ports[i], "irq %d", irq);
4031826b 7203
f5cda257
TH
7204 rc = ata_host_register(host, sht);
7205 /* if failed, just free the IRQ and leave ports alone */
7206 if (rc)
7207 devm_free_irq(host->dev, irq, host);
7208
7209 return rc;
7210}
7211
720ba126
TH
7212/**
7213 * ata_port_detach - Detach ATA port in prepration of device removal
7214 * @ap: ATA port to be detached
7215 *
7216 * Detach all ATA devices and the associated SCSI devices of @ap;
7217 * then, remove the associated SCSI host. @ap is guaranteed to
7218 * be quiescent on return from this function.
7219 *
7220 * LOCKING:
7221 * Kernel thread context (may sleep).
7222 */
741b7763 7223static void ata_port_detach(struct ata_port *ap)
720ba126
TH
7224{
7225 unsigned long flags;
41bda9c9 7226 struct ata_link *link;
f58229f8 7227 struct ata_device *dev;
720ba126
TH
7228
7229 if (!ap->ops->error_handler)
c3cf30a9 7230 goto skip_eh;
720ba126
TH
7231
7232 /* tell EH we're leaving & flush EH */
ba6a1308 7233 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 7234 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 7235 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
7236
7237 ata_port_wait_eh(ap);
7238
7f9ad9b8
TH
7239 /* EH is now guaranteed to see UNLOADING - EH context belongs
7240 * to us. Disable all existing devices.
720ba126 7241 */
41bda9c9
TH
7242 ata_port_for_each_link(link, ap) {
7243 ata_link_for_each_dev(dev, link)
7244 ata_dev_disable(dev);
7245 }
720ba126 7246
720ba126
TH
7247 /* Final freeze & EH. All in-flight commands are aborted. EH
7248 * will be skipped and retrials will be terminated with bad
7249 * target.
7250 */
ba6a1308 7251 spin_lock_irqsave(ap->lock, flags);
720ba126 7252 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 7253 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
7254
7255 ata_port_wait_eh(ap);
45a66c1c 7256 cancel_rearming_delayed_work(&ap->hotplug_task);
720ba126 7257
c3cf30a9 7258 skip_eh:
720ba126 7259 /* remove the associated SCSI host */
cca3974e 7260 scsi_remove_host(ap->scsi_host);
720ba126
TH
7261}
7262
0529c159
TH
7263/**
7264 * ata_host_detach - Detach all ports of an ATA host
7265 * @host: Host to detach
7266 *
7267 * Detach all ports of @host.
7268 *
7269 * LOCKING:
7270 * Kernel thread context (may sleep).
7271 */
7272void ata_host_detach(struct ata_host *host)
7273{
7274 int i;
7275
7276 for (i = 0; i < host->n_ports; i++)
7277 ata_port_detach(host->ports[i]);
562f0c2d
TH
7278
7279 /* the host is dead now, dissociate ACPI */
7280 ata_acpi_dissociate(host);
0529c159
TH
7281}
7282
1da177e4
LT
7283/**
7284 * ata_std_ports - initialize ioaddr with standard port offsets.
7285 * @ioaddr: IO address structure to be initialized
0baab86b
EF
7286 *
7287 * Utility function which initializes data_addr, error_addr,
7288 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
7289 * device_addr, status_addr, and command_addr to standard offsets
7290 * relative to cmd_addr.
7291 *
7292 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 7293 */
0baab86b 7294
1da177e4
LT
7295void ata_std_ports(struct ata_ioports *ioaddr)
7296{
7297 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
7298 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
7299 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
7300 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
7301 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
7302 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
7303 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
7304 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
7305 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
7306 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
7307}
7308
0baab86b 7309
374b1873
JG
7310#ifdef CONFIG_PCI
7311
1da177e4
LT
7312/**
7313 * ata_pci_remove_one - PCI layer callback for device removal
7314 * @pdev: PCI device that was removed
7315 *
b878ca5d
TH
7316 * PCI layer indicates to libata via this hook that hot-unplug or
7317 * module unload event has occurred. Detach all ports. Resource
7318 * release is handled via devres.
1da177e4
LT
7319 *
7320 * LOCKING:
7321 * Inherited from PCI layer (may sleep).
7322 */
f0d36efd 7323void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4 7324{
2855568b 7325 struct device *dev = &pdev->dev;
cca3974e 7326 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 7327
b878ca5d 7328 ata_host_detach(host);
1da177e4
LT
7329}
7330
7331/* move to PCI subsystem */
057ace5e 7332int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
7333{
7334 unsigned long tmp = 0;
7335
7336 switch (bits->width) {
7337 case 1: {
7338 u8 tmp8 = 0;
7339 pci_read_config_byte(pdev, bits->reg, &tmp8);
7340 tmp = tmp8;
7341 break;
7342 }
7343 case 2: {
7344 u16 tmp16 = 0;
7345 pci_read_config_word(pdev, bits->reg, &tmp16);
7346 tmp = tmp16;
7347 break;
7348 }
7349 case 4: {
7350 u32 tmp32 = 0;
7351 pci_read_config_dword(pdev, bits->reg, &tmp32);
7352 tmp = tmp32;
7353 break;
7354 }
7355
7356 default:
7357 return -EINVAL;
7358 }
7359
7360 tmp &= bits->mask;
7361
7362 return (tmp == bits->val) ? 1 : 0;
7363}
9b847548 7364
6ffa01d8 7365#ifdef CONFIG_PM
3c5100c1 7366void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
7367{
7368 pci_save_state(pdev);
4c90d971 7369 pci_disable_device(pdev);
500530f6 7370
4c90d971 7371 if (mesg.event == PM_EVENT_SUSPEND)
500530f6 7372 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
7373}
7374
553c4aa6 7375int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 7376{
553c4aa6
TH
7377 int rc;
7378
9b847548
JA
7379 pci_set_power_state(pdev, PCI_D0);
7380 pci_restore_state(pdev);
553c4aa6 7381
b878ca5d 7382 rc = pcim_enable_device(pdev);
553c4aa6
TH
7383 if (rc) {
7384 dev_printk(KERN_ERR, &pdev->dev,
7385 "failed to enable device after resume (%d)\n", rc);
7386 return rc;
7387 }
7388
9b847548 7389 pci_set_master(pdev);
553c4aa6 7390 return 0;
500530f6
TH
7391}
7392
3c5100c1 7393int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 7394{
cca3974e 7395 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
7396 int rc = 0;
7397
cca3974e 7398 rc = ata_host_suspend(host, mesg);
500530f6
TH
7399 if (rc)
7400 return rc;
7401
3c5100c1 7402 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
7403
7404 return 0;
7405}
7406
7407int ata_pci_device_resume(struct pci_dev *pdev)
7408{
cca3974e 7409 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 7410 int rc;
500530f6 7411
553c4aa6
TH
7412 rc = ata_pci_device_do_resume(pdev);
7413 if (rc == 0)
7414 ata_host_resume(host);
7415 return rc;
9b847548 7416}
6ffa01d8
TH
7417#endif /* CONFIG_PM */
7418
1da177e4
LT
7419#endif /* CONFIG_PCI */
7420
33267325
TH
7421static int __init ata_parse_force_one(char **cur,
7422 struct ata_force_ent *force_ent,
7423 const char **reason)
7424{
7425 /* FIXME: Currently, there's no way to tag init const data and
7426 * using __initdata causes build failure on some versions of
7427 * gcc. Once __initdataconst is implemented, add const to the
7428 * following structure.
7429 */
7430 static struct ata_force_param force_tbl[] __initdata = {
7431 { "40c", .cbl = ATA_CBL_PATA40 },
7432 { "80c", .cbl = ATA_CBL_PATA80 },
7433 { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
7434 { "unk", .cbl = ATA_CBL_PATA_UNK },
7435 { "ign", .cbl = ATA_CBL_PATA_IGN },
7436 { "sata", .cbl = ATA_CBL_SATA },
7437 { "1.5Gbps", .spd_limit = 1 },
7438 { "3.0Gbps", .spd_limit = 2 },
7439 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
7440 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
7441 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
7442 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
7443 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
7444 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
7445 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
7446 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
7447 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
7448 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
7449 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
7450 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
7451 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
7452 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
7453 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
7454 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
7455 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
7456 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
7457 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
7458 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
7459 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
7460 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
7461 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
7462 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
7463 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
7464 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
7465 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
7466 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
7467 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
7468 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
7469 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
7470 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
7471 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
7472 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
7473 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
7474 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
7475 };
7476 char *start = *cur, *p = *cur;
7477 char *id, *val, *endp;
7478 const struct ata_force_param *match_fp = NULL;
7479 int nr_matches = 0, i;
7480
7481 /* find where this param ends and update *cur */
7482 while (*p != '\0' && *p != ',')
7483 p++;
7484
7485 if (*p == '\0')
7486 *cur = p;
7487 else
7488 *cur = p + 1;
7489
7490 *p = '\0';
7491
7492 /* parse */
7493 p = strchr(start, ':');
7494 if (!p) {
7495 val = strstrip(start);
7496 goto parse_val;
7497 }
7498 *p = '\0';
7499
7500 id = strstrip(start);
7501 val = strstrip(p + 1);
7502
7503 /* parse id */
7504 p = strchr(id, '.');
7505 if (p) {
7506 *p++ = '\0';
7507 force_ent->device = simple_strtoul(p, &endp, 10);
7508 if (p == endp || *endp != '\0') {
7509 *reason = "invalid device";
7510 return -EINVAL;
7511 }
7512 }
7513
7514 force_ent->port = simple_strtoul(id, &endp, 10);
7515 if (p == endp || *endp != '\0') {
7516 *reason = "invalid port/link";
7517 return -EINVAL;
7518 }
7519
7520 parse_val:
7521 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
7522 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
7523 const struct ata_force_param *fp = &force_tbl[i];
7524
7525 if (strncasecmp(val, fp->name, strlen(val)))
7526 continue;
7527
7528 nr_matches++;
7529 match_fp = fp;
7530
7531 if (strcasecmp(val, fp->name) == 0) {
7532 nr_matches = 1;
7533 break;
7534 }
7535 }
7536
7537 if (!nr_matches) {
7538 *reason = "unknown value";
7539 return -EINVAL;
7540 }
7541 if (nr_matches > 1) {
7542 *reason = "ambigious value";
7543 return -EINVAL;
7544 }
7545
7546 force_ent->param = *match_fp;
7547
7548 return 0;
7549}
7550
7551static void __init ata_parse_force_param(void)
7552{
7553 int idx = 0, size = 1;
7554 int last_port = -1, last_device = -1;
7555 char *p, *cur, *next;
7556
7557 /* calculate maximum number of params and allocate force_tbl */
7558 for (p = ata_force_param_buf; *p; p++)
7559 if (*p == ',')
7560 size++;
7561
7562 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
7563 if (!ata_force_tbl) {
7564 printk(KERN_WARNING "ata: failed to extend force table, "
7565 "libata.force ignored\n");
7566 return;
7567 }
7568
7569 /* parse and populate the table */
7570 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
7571 const char *reason = "";
7572 struct ata_force_ent te = { .port = -1, .device = -1 };
7573
7574 next = cur;
7575 if (ata_parse_force_one(&next, &te, &reason)) {
7576 printk(KERN_WARNING "ata: failed to parse force "
7577 "parameter \"%s\" (%s)\n",
7578 cur, reason);
7579 continue;
7580 }
7581
7582 if (te.port == -1) {
7583 te.port = last_port;
7584 te.device = last_device;
7585 }
7586
7587 ata_force_tbl[idx++] = te;
7588
7589 last_port = te.port;
7590 last_device = te.device;
7591 }
7592
7593 ata_force_tbl_size = idx;
7594}
1da177e4 7595
1da177e4
LT
7596static int __init ata_init(void)
7597{
a8601e5f 7598 ata_probe_timeout *= HZ;
33267325
TH
7599
7600 ata_parse_force_param();
7601
1da177e4
LT
7602 ata_wq = create_workqueue("ata");
7603 if (!ata_wq)
7604 return -ENOMEM;
7605
453b07ac
TH
7606 ata_aux_wq = create_singlethread_workqueue("ata_aux");
7607 if (!ata_aux_wq) {
7608 destroy_workqueue(ata_wq);
7609 return -ENOMEM;
7610 }
7611
1da177e4
LT
7612 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
7613 return 0;
7614}
7615
7616static void __exit ata_exit(void)
7617{
33267325 7618 kfree(ata_force_tbl);
1da177e4 7619 destroy_workqueue(ata_wq);
453b07ac 7620 destroy_workqueue(ata_aux_wq);
1da177e4
LT
7621}
7622
a4625085 7623subsys_initcall(ata_init);
1da177e4
LT
7624module_exit(ata_exit);
7625
67846b30 7626static unsigned long ratelimit_time;
34af946a 7627static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
7628
7629int ata_ratelimit(void)
7630{
7631 int rc;
7632 unsigned long flags;
7633
7634 spin_lock_irqsave(&ata_ratelimit_lock, flags);
7635
7636 if (time_after(jiffies, ratelimit_time)) {
7637 rc = 1;
7638 ratelimit_time = jiffies + (HZ/5);
7639 } else
7640 rc = 0;
7641
7642 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
7643
7644 return rc;
7645}
7646
c22daff4
TH
7647/**
7648 * ata_wait_register - wait until register value changes
7649 * @reg: IO-mapped register
7650 * @mask: Mask to apply to read register value
7651 * @val: Wait condition
7652 * @interval_msec: polling interval in milliseconds
7653 * @timeout_msec: timeout in milliseconds
7654 *
7655 * Waiting for some bits of register to change is a common
7656 * operation for ATA controllers. This function reads 32bit LE
7657 * IO-mapped register @reg and tests for the following condition.
7658 *
7659 * (*@reg & mask) != val
7660 *
7661 * If the condition is met, it returns; otherwise, the process is
7662 * repeated after @interval_msec until timeout.
7663 *
7664 * LOCKING:
7665 * Kernel thread context (may sleep)
7666 *
7667 * RETURNS:
7668 * The final register value.
7669 */
7670u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
7671 unsigned long interval_msec,
7672 unsigned long timeout_msec)
7673{
7674 unsigned long timeout;
7675 u32 tmp;
7676
7677 tmp = ioread32(reg);
7678
7679 /* Calculate timeout _after_ the first read to make sure
7680 * preceding writes reach the controller before starting to
7681 * eat away the timeout.
7682 */
7683 timeout = jiffies + (timeout_msec * HZ) / 1000;
7684
7685 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
7686 msleep(interval_msec);
7687 tmp = ioread32(reg);
7688 }
7689
7690 return tmp;
7691}
7692
dd5b06c4
TH
7693/*
7694 * Dummy port_ops
7695 */
7696static void ata_dummy_noret(struct ata_port *ap) { }
7697static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
7698static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
7699
7700static u8 ata_dummy_check_status(struct ata_port *ap)
7701{
7702 return ATA_DRDY;
7703}
7704
7705static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7706{
7707 return AC_ERR_SYSTEM;
7708}
7709
7710const struct ata_port_operations ata_dummy_port_ops = {
dd5b06c4
TH
7711 .check_status = ata_dummy_check_status,
7712 .check_altstatus = ata_dummy_check_status,
7713 .dev_select = ata_noop_dev_select,
7714 .qc_prep = ata_noop_qc_prep,
7715 .qc_issue = ata_dummy_qc_issue,
7716 .freeze = ata_dummy_noret,
7717 .thaw = ata_dummy_noret,
7718 .error_handler = ata_dummy_noret,
7719 .post_internal_cmd = ata_dummy_qc_noret,
7720 .irq_clear = ata_dummy_noret,
7721 .port_start = ata_dummy_ret0,
7722 .port_stop = ata_dummy_noret,
7723};
7724
21b0ad4f
TH
7725const struct ata_port_info ata_dummy_port_info = {
7726 .port_ops = &ata_dummy_port_ops,
7727};
7728
1da177e4
LT
7729/*
7730 * libata is essentially a library of internal helper functions for
7731 * low-level ATA host controller drivers. As such, the API/ABI is
7732 * likely to change as new drivers are added and updated.
7733 * Do not depend on ABI/API stability.
7734 */
e9c83914
TH
7735EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7736EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7737EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 7738EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
21b0ad4f 7739EXPORT_SYMBOL_GPL(ata_dummy_port_info);
1da177e4
LT
7740EXPORT_SYMBOL_GPL(ata_std_bios_param);
7741EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 7742EXPORT_SYMBOL_GPL(ata_host_init);
f3187195 7743EXPORT_SYMBOL_GPL(ata_host_alloc);
f5cda257 7744EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
ecef7253 7745EXPORT_SYMBOL_GPL(ata_host_start);
f3187195 7746EXPORT_SYMBOL_GPL(ata_host_register);
f5cda257 7747EXPORT_SYMBOL_GPL(ata_host_activate);
0529c159 7748EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4 7749EXPORT_SYMBOL_GPL(ata_sg_init);
9a1004d0 7750EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 7751EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 7752EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 7753EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
7754EXPORT_SYMBOL_GPL(ata_tf_load);
7755EXPORT_SYMBOL_GPL(ata_tf_read);
7756EXPORT_SYMBOL_GPL(ata_noop_dev_select);
7757EXPORT_SYMBOL_GPL(ata_std_dev_select);
43727fbc 7758EXPORT_SYMBOL_GPL(sata_print_link_status);
1da177e4
LT
7759EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7760EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6357357c
TH
7761EXPORT_SYMBOL_GPL(ata_pack_xfermask);
7762EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
7763EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
7764EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
7765EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
7766EXPORT_SYMBOL_GPL(ata_mode_string);
7767EXPORT_SYMBOL_GPL(ata_id_xfermask);
1da177e4
LT
7768EXPORT_SYMBOL_GPL(ata_check_status);
7769EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
7770EXPORT_SYMBOL_GPL(ata_exec_command);
7771EXPORT_SYMBOL_GPL(ata_port_start);
d92e74d3 7772EXPORT_SYMBOL_GPL(ata_sff_port_start);
1da177e4 7773EXPORT_SYMBOL_GPL(ata_interrupt);
04351821 7774EXPORT_SYMBOL_GPL(ata_do_set_mode);
0d5ff566
TH
7775EXPORT_SYMBOL_GPL(ata_data_xfer);
7776EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
31cc23b3 7777EXPORT_SYMBOL_GPL(ata_std_qc_defer);
1da177e4 7778EXPORT_SYMBOL_GPL(ata_qc_prep);
d26fc955 7779EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
e46834cd 7780EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
7781EXPORT_SYMBOL_GPL(ata_bmdma_setup);
7782EXPORT_SYMBOL_GPL(ata_bmdma_start);
7783EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
7784EXPORT_SYMBOL_GPL(ata_bmdma_status);
7785EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
7786EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
7787EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
7788EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
7789EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
7790EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 7791EXPORT_SYMBOL_GPL(ata_port_probe);
10305f0f 7792EXPORT_SYMBOL_GPL(ata_dev_disable);
3c567b7d 7793EXPORT_SYMBOL_GPL(sata_set_spd);
936fd732
TH
7794EXPORT_SYMBOL_GPL(sata_link_debounce);
7795EXPORT_SYMBOL_GPL(sata_link_resume);
1da177e4 7796EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 7797EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 7798EXPORT_SYMBOL_GPL(ata_std_softreset);
cc0680a5 7799EXPORT_SYMBOL_GPL(sata_link_hardreset);
c2bd5804
TH
7800EXPORT_SYMBOL_GPL(sata_std_hardreset);
7801EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
7802EXPORT_SYMBOL_GPL(ata_dev_classify);
7803EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 7804EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 7805EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 7806EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 7807EXPORT_SYMBOL_GPL(ata_busy_sleep);
88ff6eaf 7808EXPORT_SYMBOL_GPL(ata_wait_after_reset);
d4b2bab4 7809EXPORT_SYMBOL_GPL(ata_wait_ready);
1da177e4
LT
7810EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
7811EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 7812EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 7813EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 7814EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4 7815EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
7816EXPORT_SYMBOL_GPL(sata_scr_valid);
7817EXPORT_SYMBOL_GPL(sata_scr_read);
7818EXPORT_SYMBOL_GPL(sata_scr_write);
7819EXPORT_SYMBOL_GPL(sata_scr_write_flush);
936fd732
TH
7820EXPORT_SYMBOL_GPL(ata_link_online);
7821EXPORT_SYMBOL_GPL(ata_link_offline);
6ffa01d8 7822#ifdef CONFIG_PM
cca3974e
JG
7823EXPORT_SYMBOL_GPL(ata_host_suspend);
7824EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 7825#endif /* CONFIG_PM */
6a62a04d
TH
7826EXPORT_SYMBOL_GPL(ata_id_string);
7827EXPORT_SYMBOL_GPL(ata_id_c_string);
1da177e4
LT
7828EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7829
1bc4ccff 7830EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6357357c 7831EXPORT_SYMBOL_GPL(ata_timing_find_mode);
452503f9
AC
7832EXPORT_SYMBOL_GPL(ata_timing_compute);
7833EXPORT_SYMBOL_GPL(ata_timing_merge);
a0f79b92 7834EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
452503f9 7835
1da177e4
LT
7836#ifdef CONFIG_PCI
7837EXPORT_SYMBOL_GPL(pci_test_config_bits);
d583bc18 7838EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
1626aeb8 7839EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
d583bc18 7840EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
4e6b79fa 7841EXPORT_SYMBOL_GPL(ata_pci_activate_sff_host);
1da177e4
LT
7842EXPORT_SYMBOL_GPL(ata_pci_init_one);
7843EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6ffa01d8 7844#ifdef CONFIG_PM
500530f6
TH
7845EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7846EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
7847EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7848EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 7849#endif /* CONFIG_PM */
67951ade
AC
7850EXPORT_SYMBOL_GPL(ata_pci_default_filter);
7851EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 7852#endif /* CONFIG_PCI */
9b847548 7853
31f88384 7854EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch);
3af9a77a
TH
7855EXPORT_SYMBOL_GPL(sata_pmp_std_prereset);
7856EXPORT_SYMBOL_GPL(sata_pmp_std_hardreset);
7857EXPORT_SYMBOL_GPL(sata_pmp_std_postreset);
7858EXPORT_SYMBOL_GPL(sata_pmp_do_eh);
7859
b64bbc39
TH
7860EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7861EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7862EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
cbcdd875
TH
7863EXPORT_SYMBOL_GPL(ata_port_desc);
7864#ifdef CONFIG_PCI
7865EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7866#endif /* CONFIG_PCI */
7b70fc03 7867EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
dbd82616 7868EXPORT_SYMBOL_GPL(ata_link_abort);
7b70fc03 7869EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499 7870EXPORT_SYMBOL_GPL(ata_port_freeze);
7d77b247 7871EXPORT_SYMBOL_GPL(sata_async_notification);
e3180499
TH
7872EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7873EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
7874EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7875EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 7876EXPORT_SYMBOL_GPL(ata_do_eh);
83625006 7877EXPORT_SYMBOL_GPL(ata_irq_on);
a619f981 7878EXPORT_SYMBOL_GPL(ata_dev_try_classify);
be0d18df
AC
7879
7880EXPORT_SYMBOL_GPL(ata_cable_40wire);
7881EXPORT_SYMBOL_GPL(ata_cable_80wire);
7882EXPORT_SYMBOL_GPL(ata_cable_unknown);
c88f90c3 7883EXPORT_SYMBOL_GPL(ata_cable_ignore);
be0d18df 7884EXPORT_SYMBOL_GPL(ata_cable_sata);