]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/ata/libata-core.c
libata: use ULONG_MAX to terminate reset timeout table
[net-next-2.6.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
92c52c52
AC
33 * Standards documents from:
34 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
35 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
36 * http://www.sata-io.org (SATA)
37 * http://www.compactflash.org (CF)
38 * http://www.qic.org (QIC157 - Tape and DSC)
39 * http://www.ce-ata.org (CE-ATA: not supported)
40 *
1da177e4
LT
41 */
42
1da177e4
LT
43#include <linux/kernel.h>
44#include <linux/module.h>
45#include <linux/pci.h>
46#include <linux/init.h>
47#include <linux/list.h>
48#include <linux/mm.h>
1da177e4
LT
49#include <linux/spinlock.h>
50#include <linux/blkdev.h>
51#include <linux/delay.h>
52#include <linux/timer.h>
53#include <linux/interrupt.h>
54#include <linux/completion.h>
55#include <linux/suspend.h>
56#include <linux/workqueue.h>
378f058c 57#include <linux/scatterlist.h>
2dcb407e 58#include <linux/io.h>
1da177e4 59#include <scsi/scsi.h>
193515d5 60#include <scsi/scsi_cmnd.h>
1da177e4
LT
61#include <scsi/scsi_host.h>
62#include <linux/libata.h>
1da177e4 63#include <asm/byteorder.h>
140b5e59 64#include <linux/cdrom.h>
1da177e4
LT
65
66#include "libata.h"
67
fda0efc5 68
d7bb4cc7 69/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
70const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
71const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
72const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 73
029cfd6b 74const struct ata_port_operations ata_base_port_ops = {
0aa1113d 75 .prereset = ata_std_prereset,
203c75b8 76 .postreset = ata_std_postreset,
a1efdaba 77 .error_handler = ata_std_error_handler,
029cfd6b
TH
78};
79
80const struct ata_port_operations sata_port_ops = {
81 .inherits = &ata_base_port_ops,
82
83 .qc_defer = ata_std_qc_defer,
57c9efdf 84 .hardreset = sata_std_hardreset,
029cfd6b
TH
85};
86
3373efd8
TH
87static unsigned int ata_dev_init_params(struct ata_device *dev,
88 u16 heads, u16 sectors);
89static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
218f3d30
JG
90static unsigned int ata_dev_set_feature(struct ata_device *dev,
91 u8 enable, u8 feature);
3373efd8 92static void ata_dev_xfermask(struct ata_device *dev);
75683fe7 93static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
1da177e4 94
f3187195 95unsigned int ata_print_id = 1;
1da177e4
LT
96static struct workqueue_struct *ata_wq;
97
453b07ac
TH
98struct workqueue_struct *ata_aux_wq;
99
33267325
TH
100struct ata_force_param {
101 const char *name;
102 unsigned int cbl;
103 int spd_limit;
104 unsigned long xfer_mask;
105 unsigned int horkage_on;
106 unsigned int horkage_off;
107};
108
109struct ata_force_ent {
110 int port;
111 int device;
112 struct ata_force_param param;
113};
114
115static struct ata_force_ent *ata_force_tbl;
116static int ata_force_tbl_size;
117
118static char ata_force_param_buf[PAGE_SIZE] __initdata;
7afb4222
TH
119/* param_buf is thrown away after initialization, disallow read */
120module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
33267325
TH
121MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
122
418dc1f5 123int atapi_enabled = 1;
1623c81e
JG
124module_param(atapi_enabled, int, 0444);
125MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
126
c5c61bda 127static int atapi_dmadir = 0;
95de719a
AL
128module_param(atapi_dmadir, int, 0444);
129MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
130
baf4fdfa
ML
131int atapi_passthru16 = 1;
132module_param(atapi_passthru16, int, 0444);
133MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
134
c3c013a2
JG
135int libata_fua = 0;
136module_param_named(fua, libata_fua, int, 0444);
137MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
138
2dcb407e 139static int ata_ignore_hpa;
1e999736
AC
140module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
141MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
142
b3a70601
AC
143static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
144module_param_named(dma, libata_dma_mask, int, 0444);
145MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
146
341c2c95 147static int ata_probe_timeout = ATA_TMOUT_INTERNAL / 1000;
a8601e5f
AM
148module_param(ata_probe_timeout, int, 0444);
149MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
150
6ebe9d86 151int libata_noacpi = 0;
d7d0dad6 152module_param_named(noacpi, libata_noacpi, int, 0444);
6ebe9d86 153MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
11ef697b 154
ae8d4ee7
AC
155int libata_allow_tpm = 0;
156module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
157MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands");
158
1da177e4
LT
159MODULE_AUTHOR("Jeff Garzik");
160MODULE_DESCRIPTION("Library module for ATA devices");
161MODULE_LICENSE("GPL");
162MODULE_VERSION(DRV_VERSION);
163
0baab86b 164
33267325
TH
165/**
166 * ata_force_cbl - force cable type according to libata.force
4cdfa1b3 167 * @ap: ATA port of interest
33267325
TH
168 *
169 * Force cable type according to libata.force and whine about it.
170 * The last entry which has matching port number is used, so it
171 * can be specified as part of device force parameters. For
172 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
173 * same effect.
174 *
175 * LOCKING:
176 * EH context.
177 */
178void ata_force_cbl(struct ata_port *ap)
179{
180 int i;
181
182 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
183 const struct ata_force_ent *fe = &ata_force_tbl[i];
184
185 if (fe->port != -1 && fe->port != ap->print_id)
186 continue;
187
188 if (fe->param.cbl == ATA_CBL_NONE)
189 continue;
190
191 ap->cbl = fe->param.cbl;
192 ata_port_printk(ap, KERN_NOTICE,
193 "FORCE: cable set to %s\n", fe->param.name);
194 return;
195 }
196}
197
198/**
199 * ata_force_spd_limit - force SATA spd limit according to libata.force
200 * @link: ATA link of interest
201 *
202 * Force SATA spd limit according to libata.force and whine about
203 * it. When only the port part is specified (e.g. 1:), the limit
204 * applies to all links connected to both the host link and all
205 * fan-out ports connected via PMP. If the device part is
206 * specified as 0 (e.g. 1.00:), it specifies the first fan-out
207 * link not the host link. Device number 15 always points to the
208 * host link whether PMP is attached or not.
209 *
210 * LOCKING:
211 * EH context.
212 */
213static void ata_force_spd_limit(struct ata_link *link)
214{
215 int linkno, i;
216
217 if (ata_is_host_link(link))
218 linkno = 15;
219 else
220 linkno = link->pmp;
221
222 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
223 const struct ata_force_ent *fe = &ata_force_tbl[i];
224
225 if (fe->port != -1 && fe->port != link->ap->print_id)
226 continue;
227
228 if (fe->device != -1 && fe->device != linkno)
229 continue;
230
231 if (!fe->param.spd_limit)
232 continue;
233
234 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
235 ata_link_printk(link, KERN_NOTICE,
236 "FORCE: PHY spd limit set to %s\n", fe->param.name);
237 return;
238 }
239}
240
241/**
242 * ata_force_xfermask - force xfermask according to libata.force
243 * @dev: ATA device of interest
244 *
245 * Force xfer_mask according to libata.force and whine about it.
246 * For consistency with link selection, device number 15 selects
247 * the first device connected to the host link.
248 *
249 * LOCKING:
250 * EH context.
251 */
252static void ata_force_xfermask(struct ata_device *dev)
253{
254 int devno = dev->link->pmp + dev->devno;
255 int alt_devno = devno;
256 int i;
257
258 /* allow n.15 for the first device attached to host port */
259 if (ata_is_host_link(dev->link) && devno == 0)
260 alt_devno = 15;
261
262 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
263 const struct ata_force_ent *fe = &ata_force_tbl[i];
264 unsigned long pio_mask, mwdma_mask, udma_mask;
265
266 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
267 continue;
268
269 if (fe->device != -1 && fe->device != devno &&
270 fe->device != alt_devno)
271 continue;
272
273 if (!fe->param.xfer_mask)
274 continue;
275
276 ata_unpack_xfermask(fe->param.xfer_mask,
277 &pio_mask, &mwdma_mask, &udma_mask);
278 if (udma_mask)
279 dev->udma_mask = udma_mask;
280 else if (mwdma_mask) {
281 dev->udma_mask = 0;
282 dev->mwdma_mask = mwdma_mask;
283 } else {
284 dev->udma_mask = 0;
285 dev->mwdma_mask = 0;
286 dev->pio_mask = pio_mask;
287 }
288
289 ata_dev_printk(dev, KERN_NOTICE,
290 "FORCE: xfer_mask set to %s\n", fe->param.name);
291 return;
292 }
293}
294
295/**
296 * ata_force_horkage - force horkage according to libata.force
297 * @dev: ATA device of interest
298 *
299 * Force horkage according to libata.force and whine about it.
300 * For consistency with link selection, device number 15 selects
301 * the first device connected to the host link.
302 *
303 * LOCKING:
304 * EH context.
305 */
306static void ata_force_horkage(struct ata_device *dev)
307{
308 int devno = dev->link->pmp + dev->devno;
309 int alt_devno = devno;
310 int i;
311
312 /* allow n.15 for the first device attached to host port */
313 if (ata_is_host_link(dev->link) && devno == 0)
314 alt_devno = 15;
315
316 for (i = 0; i < ata_force_tbl_size; i++) {
317 const struct ata_force_ent *fe = &ata_force_tbl[i];
318
319 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
320 continue;
321
322 if (fe->device != -1 && fe->device != devno &&
323 fe->device != alt_devno)
324 continue;
325
326 if (!(~dev->horkage & fe->param.horkage_on) &&
327 !(dev->horkage & fe->param.horkage_off))
328 continue;
329
330 dev->horkage |= fe->param.horkage_on;
331 dev->horkage &= ~fe->param.horkage_off;
332
333 ata_dev_printk(dev, KERN_NOTICE,
334 "FORCE: horkage modified (%s)\n", fe->param.name);
335 }
336}
337
436d34b3
TH
338/**
339 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode
340 * @opcode: SCSI opcode
341 *
342 * Determine ATAPI command type from @opcode.
343 *
344 * LOCKING:
345 * None.
346 *
347 * RETURNS:
348 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
349 */
350int atapi_cmd_type(u8 opcode)
351{
352 switch (opcode) {
353 case GPCMD_READ_10:
354 case GPCMD_READ_12:
355 return ATAPI_READ;
356
357 case GPCMD_WRITE_10:
358 case GPCMD_WRITE_12:
359 case GPCMD_WRITE_AND_VERIFY_10:
360 return ATAPI_WRITE;
361
362 case GPCMD_READ_CD:
363 case GPCMD_READ_CD_MSF:
364 return ATAPI_READ_CD;
365
e52dcc48
TH
366 case ATA_16:
367 case ATA_12:
368 if (atapi_passthru16)
369 return ATAPI_PASS_THRU;
370 /* fall thru */
436d34b3
TH
371 default:
372 return ATAPI_MISC;
373 }
374}
375
1da177e4
LT
376/**
377 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
378 * @tf: Taskfile to convert
1da177e4 379 * @pmp: Port multiplier port
9977126c
TH
380 * @is_cmd: This FIS is for command
381 * @fis: Buffer into which data will output
1da177e4
LT
382 *
383 * Converts a standard ATA taskfile to a Serial ATA
384 * FIS structure (Register - Host to Device).
385 *
386 * LOCKING:
387 * Inherited from caller.
388 */
9977126c 389void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
1da177e4 390{
9977126c
TH
391 fis[0] = 0x27; /* Register - Host to Device FIS */
392 fis[1] = pmp & 0xf; /* Port multiplier number*/
393 if (is_cmd)
394 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
395
1da177e4
LT
396 fis[2] = tf->command;
397 fis[3] = tf->feature;
398
399 fis[4] = tf->lbal;
400 fis[5] = tf->lbam;
401 fis[6] = tf->lbah;
402 fis[7] = tf->device;
403
404 fis[8] = tf->hob_lbal;
405 fis[9] = tf->hob_lbam;
406 fis[10] = tf->hob_lbah;
407 fis[11] = tf->hob_feature;
408
409 fis[12] = tf->nsect;
410 fis[13] = tf->hob_nsect;
411 fis[14] = 0;
412 fis[15] = tf->ctl;
413
414 fis[16] = 0;
415 fis[17] = 0;
416 fis[18] = 0;
417 fis[19] = 0;
418}
419
420/**
421 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
422 * @fis: Buffer from which data will be input
423 * @tf: Taskfile to output
424 *
e12a1be6 425 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
426 *
427 * LOCKING:
428 * Inherited from caller.
429 */
430
057ace5e 431void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
432{
433 tf->command = fis[2]; /* status */
434 tf->feature = fis[3]; /* error */
435
436 tf->lbal = fis[4];
437 tf->lbam = fis[5];
438 tf->lbah = fis[6];
439 tf->device = fis[7];
440
441 tf->hob_lbal = fis[8];
442 tf->hob_lbam = fis[9];
443 tf->hob_lbah = fis[10];
444
445 tf->nsect = fis[12];
446 tf->hob_nsect = fis[13];
447}
448
8cbd6df1
AL
449static const u8 ata_rw_cmds[] = {
450 /* pio multi */
451 ATA_CMD_READ_MULTI,
452 ATA_CMD_WRITE_MULTI,
453 ATA_CMD_READ_MULTI_EXT,
454 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
455 0,
456 0,
457 0,
458 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
459 /* pio */
460 ATA_CMD_PIO_READ,
461 ATA_CMD_PIO_WRITE,
462 ATA_CMD_PIO_READ_EXT,
463 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
464 0,
465 0,
466 0,
467 0,
8cbd6df1
AL
468 /* dma */
469 ATA_CMD_READ,
470 ATA_CMD_WRITE,
471 ATA_CMD_READ_EXT,
9a3dccc4
TH
472 ATA_CMD_WRITE_EXT,
473 0,
474 0,
475 0,
476 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 477};
1da177e4
LT
478
479/**
8cbd6df1 480 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
481 * @tf: command to examine and configure
482 * @dev: device tf belongs to
1da177e4 483 *
2e9edbf8 484 * Examine the device configuration and tf->flags to calculate
8cbd6df1 485 * the proper read/write commands and protocol to use.
1da177e4
LT
486 *
487 * LOCKING:
488 * caller.
489 */
bd056d7e 490static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 491{
9a3dccc4 492 u8 cmd;
1da177e4 493
9a3dccc4 494 int index, fua, lba48, write;
2e9edbf8 495
9a3dccc4 496 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
497 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
498 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 499
8cbd6df1
AL
500 if (dev->flags & ATA_DFLAG_PIO) {
501 tf->protocol = ATA_PROT_PIO;
9a3dccc4 502 index = dev->multi_count ? 0 : 8;
9af5c9c9 503 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
504 /* Unable to use DMA due to host limitation */
505 tf->protocol = ATA_PROT_PIO;
0565c26d 506 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
507 } else {
508 tf->protocol = ATA_PROT_DMA;
9a3dccc4 509 index = 16;
8cbd6df1 510 }
1da177e4 511
9a3dccc4
TH
512 cmd = ata_rw_cmds[index + fua + lba48 + write];
513 if (cmd) {
514 tf->command = cmd;
515 return 0;
516 }
517 return -1;
1da177e4
LT
518}
519
35b649fe
TH
520/**
521 * ata_tf_read_block - Read block address from ATA taskfile
522 * @tf: ATA taskfile of interest
523 * @dev: ATA device @tf belongs to
524 *
525 * LOCKING:
526 * None.
527 *
528 * Read block address from @tf. This function can handle all
529 * three address formats - LBA, LBA48 and CHS. tf->protocol and
530 * flags select the address format to use.
531 *
532 * RETURNS:
533 * Block address read from @tf.
534 */
535u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
536{
537 u64 block = 0;
538
539 if (tf->flags & ATA_TFLAG_LBA) {
540 if (tf->flags & ATA_TFLAG_LBA48) {
541 block |= (u64)tf->hob_lbah << 40;
542 block |= (u64)tf->hob_lbam << 32;
543 block |= tf->hob_lbal << 24;
544 } else
545 block |= (tf->device & 0xf) << 24;
546
547 block |= tf->lbah << 16;
548 block |= tf->lbam << 8;
549 block |= tf->lbal;
550 } else {
551 u32 cyl, head, sect;
552
553 cyl = tf->lbam | (tf->lbah << 8);
554 head = tf->device & 0xf;
555 sect = tf->lbal;
556
557 block = (cyl * dev->heads + head) * dev->sectors + sect;
558 }
559
560 return block;
561}
562
bd056d7e
TH
563/**
564 * ata_build_rw_tf - Build ATA taskfile for given read/write request
565 * @tf: Target ATA taskfile
566 * @dev: ATA device @tf belongs to
567 * @block: Block address
568 * @n_block: Number of blocks
569 * @tf_flags: RW/FUA etc...
570 * @tag: tag
571 *
572 * LOCKING:
573 * None.
574 *
575 * Build ATA taskfile @tf for read/write request described by
576 * @block, @n_block, @tf_flags and @tag on @dev.
577 *
578 * RETURNS:
579 *
580 * 0 on success, -ERANGE if the request is too large for @dev,
581 * -EINVAL if the request is invalid.
582 */
583int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
584 u64 block, u32 n_block, unsigned int tf_flags,
585 unsigned int tag)
586{
587 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
588 tf->flags |= tf_flags;
589
6d1245bf 590 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
591 /* yay, NCQ */
592 if (!lba_48_ok(block, n_block))
593 return -ERANGE;
594
595 tf->protocol = ATA_PROT_NCQ;
596 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
597
598 if (tf->flags & ATA_TFLAG_WRITE)
599 tf->command = ATA_CMD_FPDMA_WRITE;
600 else
601 tf->command = ATA_CMD_FPDMA_READ;
602
603 tf->nsect = tag << 3;
604 tf->hob_feature = (n_block >> 8) & 0xff;
605 tf->feature = n_block & 0xff;
606
607 tf->hob_lbah = (block >> 40) & 0xff;
608 tf->hob_lbam = (block >> 32) & 0xff;
609 tf->hob_lbal = (block >> 24) & 0xff;
610 tf->lbah = (block >> 16) & 0xff;
611 tf->lbam = (block >> 8) & 0xff;
612 tf->lbal = block & 0xff;
613
614 tf->device = 1 << 6;
615 if (tf->flags & ATA_TFLAG_FUA)
616 tf->device |= 1 << 7;
617 } else if (dev->flags & ATA_DFLAG_LBA) {
618 tf->flags |= ATA_TFLAG_LBA;
619
620 if (lba_28_ok(block, n_block)) {
621 /* use LBA28 */
622 tf->device |= (block >> 24) & 0xf;
623 } else if (lba_48_ok(block, n_block)) {
624 if (!(dev->flags & ATA_DFLAG_LBA48))
625 return -ERANGE;
626
627 /* use LBA48 */
628 tf->flags |= ATA_TFLAG_LBA48;
629
630 tf->hob_nsect = (n_block >> 8) & 0xff;
631
632 tf->hob_lbah = (block >> 40) & 0xff;
633 tf->hob_lbam = (block >> 32) & 0xff;
634 tf->hob_lbal = (block >> 24) & 0xff;
635 } else
636 /* request too large even for LBA48 */
637 return -ERANGE;
638
639 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
640 return -EINVAL;
641
642 tf->nsect = n_block & 0xff;
643
644 tf->lbah = (block >> 16) & 0xff;
645 tf->lbam = (block >> 8) & 0xff;
646 tf->lbal = block & 0xff;
647
648 tf->device |= ATA_LBA;
649 } else {
650 /* CHS */
651 u32 sect, head, cyl, track;
652
653 /* The request -may- be too large for CHS addressing. */
654 if (!lba_28_ok(block, n_block))
655 return -ERANGE;
656
657 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
658 return -EINVAL;
659
660 /* Convert LBA to CHS */
661 track = (u32)block / dev->sectors;
662 cyl = track / dev->heads;
663 head = track % dev->heads;
664 sect = (u32)block % dev->sectors + 1;
665
666 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
667 (u32)block, track, cyl, head, sect);
668
669 /* Check whether the converted CHS can fit.
670 Cylinder: 0-65535
671 Head: 0-15
672 Sector: 1-255*/
673 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
674 return -ERANGE;
675
676 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
677 tf->lbal = sect;
678 tf->lbam = cyl;
679 tf->lbah = cyl >> 8;
680 tf->device |= head;
681 }
682
683 return 0;
684}
685
cb95d562
TH
686/**
687 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
688 * @pio_mask: pio_mask
689 * @mwdma_mask: mwdma_mask
690 * @udma_mask: udma_mask
691 *
692 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
693 * unsigned int xfer_mask.
694 *
695 * LOCKING:
696 * None.
697 *
698 * RETURNS:
699 * Packed xfer_mask.
700 */
7dc951ae
TH
701unsigned long ata_pack_xfermask(unsigned long pio_mask,
702 unsigned long mwdma_mask,
703 unsigned long udma_mask)
cb95d562
TH
704{
705 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
706 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
707 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
708}
709
c0489e4e
TH
710/**
711 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
712 * @xfer_mask: xfer_mask to unpack
713 * @pio_mask: resulting pio_mask
714 * @mwdma_mask: resulting mwdma_mask
715 * @udma_mask: resulting udma_mask
716 *
717 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
718 * Any NULL distination masks will be ignored.
719 */
7dc951ae
TH
720void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
721 unsigned long *mwdma_mask, unsigned long *udma_mask)
c0489e4e
TH
722{
723 if (pio_mask)
724 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
725 if (mwdma_mask)
726 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
727 if (udma_mask)
728 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
729}
730
cb95d562 731static const struct ata_xfer_ent {
be9a50c8 732 int shift, bits;
cb95d562
TH
733 u8 base;
734} ata_xfer_tbl[] = {
70cd071e
TH
735 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
736 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
737 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
cb95d562
TH
738 { -1, },
739};
740
741/**
742 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
743 * @xfer_mask: xfer_mask of interest
744 *
745 * Return matching XFER_* value for @xfer_mask. Only the highest
746 * bit of @xfer_mask is considered.
747 *
748 * LOCKING:
749 * None.
750 *
751 * RETURNS:
70cd071e 752 * Matching XFER_* value, 0xff if no match found.
cb95d562 753 */
7dc951ae 754u8 ata_xfer_mask2mode(unsigned long xfer_mask)
cb95d562
TH
755{
756 int highbit = fls(xfer_mask) - 1;
757 const struct ata_xfer_ent *ent;
758
759 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
760 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
761 return ent->base + highbit - ent->shift;
70cd071e 762 return 0xff;
cb95d562
TH
763}
764
765/**
766 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
767 * @xfer_mode: XFER_* of interest
768 *
769 * Return matching xfer_mask for @xfer_mode.
770 *
771 * LOCKING:
772 * None.
773 *
774 * RETURNS:
775 * Matching xfer_mask, 0 if no match found.
776 */
7dc951ae 777unsigned long ata_xfer_mode2mask(u8 xfer_mode)
cb95d562
TH
778{
779 const struct ata_xfer_ent *ent;
780
781 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
782 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
70cd071e
TH
783 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
784 & ~((1 << ent->shift) - 1);
cb95d562
TH
785 return 0;
786}
787
788/**
789 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
790 * @xfer_mode: XFER_* of interest
791 *
792 * Return matching xfer_shift for @xfer_mode.
793 *
794 * LOCKING:
795 * None.
796 *
797 * RETURNS:
798 * Matching xfer_shift, -1 if no match found.
799 */
7dc951ae 800int ata_xfer_mode2shift(unsigned long xfer_mode)
cb95d562
TH
801{
802 const struct ata_xfer_ent *ent;
803
804 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
805 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
806 return ent->shift;
807 return -1;
808}
809
1da177e4 810/**
1da7b0d0
TH
811 * ata_mode_string - convert xfer_mask to string
812 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
813 *
814 * Determine string which represents the highest speed
1da7b0d0 815 * (highest bit in @modemask).
1da177e4
LT
816 *
817 * LOCKING:
818 * None.
819 *
820 * RETURNS:
821 * Constant C string representing highest speed listed in
1da7b0d0 822 * @mode_mask, or the constant C string "<n/a>".
1da177e4 823 */
7dc951ae 824const char *ata_mode_string(unsigned long xfer_mask)
1da177e4 825{
75f554bc
TH
826 static const char * const xfer_mode_str[] = {
827 "PIO0",
828 "PIO1",
829 "PIO2",
830 "PIO3",
831 "PIO4",
b352e57d
AC
832 "PIO5",
833 "PIO6",
75f554bc
TH
834 "MWDMA0",
835 "MWDMA1",
836 "MWDMA2",
b352e57d
AC
837 "MWDMA3",
838 "MWDMA4",
75f554bc
TH
839 "UDMA/16",
840 "UDMA/25",
841 "UDMA/33",
842 "UDMA/44",
843 "UDMA/66",
844 "UDMA/100",
845 "UDMA/133",
846 "UDMA7",
847 };
1da7b0d0 848 int highbit;
1da177e4 849
1da7b0d0
TH
850 highbit = fls(xfer_mask) - 1;
851 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
852 return xfer_mode_str[highbit];
1da177e4 853 return "<n/a>";
1da177e4
LT
854}
855
4c360c81
TH
856static const char *sata_spd_string(unsigned int spd)
857{
858 static const char * const spd_str[] = {
859 "1.5 Gbps",
860 "3.0 Gbps",
861 };
862
863 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
864 return "<unknown>";
865 return spd_str[spd - 1];
866}
867
3373efd8 868void ata_dev_disable(struct ata_device *dev)
0b8efb0a 869{
09d7f9b0 870 if (ata_dev_enabled(dev)) {
9af5c9c9 871 if (ata_msg_drv(dev->link->ap))
09d7f9b0 872 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
562f0c2d 873 ata_acpi_on_disable(dev);
4ae72a1e
TH
874 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
875 ATA_DNXFER_QUIET);
0b8efb0a
TH
876 dev->class++;
877 }
878}
879
ca77329f
KCA
880static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
881{
882 struct ata_link *link = dev->link;
883 struct ata_port *ap = link->ap;
884 u32 scontrol;
885 unsigned int err_mask;
886 int rc;
887
888 /*
889 * disallow DIPM for drivers which haven't set
890 * ATA_FLAG_IPM. This is because when DIPM is enabled,
891 * phy ready will be set in the interrupt status on
892 * state changes, which will cause some drivers to
893 * think there are errors - additionally drivers will
894 * need to disable hot plug.
895 */
896 if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
897 ap->pm_policy = NOT_AVAILABLE;
898 return -EINVAL;
899 }
900
901 /*
902 * For DIPM, we will only enable it for the
903 * min_power setting.
904 *
905 * Why? Because Disks are too stupid to know that
906 * If the host rejects a request to go to SLUMBER
907 * they should retry at PARTIAL, and instead it
908 * just would give up. So, for medium_power to
909 * work at all, we need to only allow HIPM.
910 */
911 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
912 if (rc)
913 return rc;
914
915 switch (policy) {
916 case MIN_POWER:
917 /* no restrictions on IPM transitions */
918 scontrol &= ~(0x3 << 8);
919 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
920 if (rc)
921 return rc;
922
923 /* enable DIPM */
924 if (dev->flags & ATA_DFLAG_DIPM)
925 err_mask = ata_dev_set_feature(dev,
926 SETFEATURES_SATA_ENABLE, SATA_DIPM);
927 break;
928 case MEDIUM_POWER:
929 /* allow IPM to PARTIAL */
930 scontrol &= ~(0x1 << 8);
931 scontrol |= (0x2 << 8);
932 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
933 if (rc)
934 return rc;
935
f5456b63
KCA
936 /*
937 * we don't have to disable DIPM since IPM flags
938 * disallow transitions to SLUMBER, which effectively
939 * disable DIPM if it does not support PARTIAL
940 */
ca77329f
KCA
941 break;
942 case NOT_AVAILABLE:
943 case MAX_PERFORMANCE:
944 /* disable all IPM transitions */
945 scontrol |= (0x3 << 8);
946 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
947 if (rc)
948 return rc;
949
f5456b63
KCA
950 /*
951 * we don't have to disable DIPM since IPM flags
952 * disallow all transitions which effectively
953 * disable DIPM anyway.
954 */
ca77329f
KCA
955 break;
956 }
957
958 /* FIXME: handle SET FEATURES failure */
959 (void) err_mask;
960
961 return 0;
962}
963
964/**
965 * ata_dev_enable_pm - enable SATA interface power management
48166fd9
SH
966 * @dev: device to enable power management
967 * @policy: the link power management policy
ca77329f
KCA
968 *
969 * Enable SATA Interface power management. This will enable
970 * Device Interface Power Management (DIPM) for min_power
971 * policy, and then call driver specific callbacks for
972 * enabling Host Initiated Power management.
973 *
974 * Locking: Caller.
975 * Returns: -EINVAL if IPM is not supported, 0 otherwise.
976 */
977void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
978{
979 int rc = 0;
980 struct ata_port *ap = dev->link->ap;
981
982 /* set HIPM first, then DIPM */
983 if (ap->ops->enable_pm)
984 rc = ap->ops->enable_pm(ap, policy);
985 if (rc)
986 goto enable_pm_out;
987 rc = ata_dev_set_dipm(dev, policy);
988
989enable_pm_out:
990 if (rc)
991 ap->pm_policy = MAX_PERFORMANCE;
992 else
993 ap->pm_policy = policy;
994 return /* rc */; /* hopefully we can use 'rc' eventually */
995}
996
1992a5ed 997#ifdef CONFIG_PM
ca77329f
KCA
998/**
999 * ata_dev_disable_pm - disable SATA interface power management
48166fd9 1000 * @dev: device to disable power management
ca77329f
KCA
1001 *
1002 * Disable SATA Interface power management. This will disable
1003 * Device Interface Power Management (DIPM) without changing
1004 * policy, call driver specific callbacks for disabling Host
1005 * Initiated Power management.
1006 *
1007 * Locking: Caller.
1008 * Returns: void
1009 */
1010static void ata_dev_disable_pm(struct ata_device *dev)
1011{
1012 struct ata_port *ap = dev->link->ap;
1013
1014 ata_dev_set_dipm(dev, MAX_PERFORMANCE);
1015 if (ap->ops->disable_pm)
1016 ap->ops->disable_pm(ap);
1017}
1992a5ed 1018#endif /* CONFIG_PM */
ca77329f
KCA
1019
1020void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
1021{
1022 ap->pm_policy = policy;
3ec25ebd 1023 ap->link.eh_info.action |= ATA_EH_LPM;
ca77329f
KCA
1024 ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
1025 ata_port_schedule_eh(ap);
1026}
1027
1992a5ed 1028#ifdef CONFIG_PM
ca77329f
KCA
1029static void ata_lpm_enable(struct ata_host *host)
1030{
1031 struct ata_link *link;
1032 struct ata_port *ap;
1033 struct ata_device *dev;
1034 int i;
1035
1036 for (i = 0; i < host->n_ports; i++) {
1037 ap = host->ports[i];
1038 ata_port_for_each_link(link, ap) {
1039 ata_link_for_each_dev(dev, link)
1040 ata_dev_disable_pm(dev);
1041 }
1042 }
1043}
1044
1045static void ata_lpm_disable(struct ata_host *host)
1046{
1047 int i;
1048
1049 for (i = 0; i < host->n_ports; i++) {
1050 struct ata_port *ap = host->ports[i];
1051 ata_lpm_schedule(ap, ap->pm_policy);
1052 }
1053}
1992a5ed 1054#endif /* CONFIG_PM */
ca77329f 1055
1da177e4
LT
1056/**
1057 * ata_dev_classify - determine device type based on ATA-spec signature
1058 * @tf: ATA taskfile register set for device to be identified
1059 *
1060 * Determine from taskfile register contents whether a device is
1061 * ATA or ATAPI, as per "Signature and persistence" section
1062 * of ATA/PI spec (volume 1, sect 5.14).
1063 *
1064 * LOCKING:
1065 * None.
1066 *
1067 * RETURNS:
633273a3
TH
1068 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
1069 * %ATA_DEV_UNKNOWN the event of failure.
1da177e4 1070 */
057ace5e 1071unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
1072{
1073 /* Apple's open source Darwin code hints that some devices only
1074 * put a proper signature into the LBA mid/high registers,
1075 * So, we only check those. It's sufficient for uniqueness.
633273a3
TH
1076 *
1077 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1078 * signatures for ATA and ATAPI devices attached on SerialATA,
1079 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
1080 * spec has never mentioned about using different signatures
1081 * for ATA/ATAPI devices. Then, Serial ATA II: Port
1082 * Multiplier specification began to use 0x69/0x96 to identify
1083 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1084 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1085 * 0x69/0x96 shortly and described them as reserved for
1086 * SerialATA.
1087 *
1088 * We follow the current spec and consider that 0x69/0x96
1089 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1da177e4 1090 */
633273a3 1091 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1da177e4
LT
1092 DPRINTK("found ATA device by sig\n");
1093 return ATA_DEV_ATA;
1094 }
1095
633273a3 1096 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1da177e4
LT
1097 DPRINTK("found ATAPI device by sig\n");
1098 return ATA_DEV_ATAPI;
1099 }
1100
633273a3
TH
1101 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1102 DPRINTK("found PMP device by sig\n");
1103 return ATA_DEV_PMP;
1104 }
1105
1106 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
2dcb407e 1107 printk(KERN_INFO "ata: SEMB device ignored\n");
633273a3
TH
1108 return ATA_DEV_SEMB_UNSUP; /* not yet */
1109 }
1110
1da177e4
LT
1111 DPRINTK("unknown device\n");
1112 return ATA_DEV_UNKNOWN;
1113}
1114
1da177e4 1115/**
6a62a04d 1116 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
1117 * @id: IDENTIFY DEVICE results we will examine
1118 * @s: string into which data is output
1119 * @ofs: offset into identify device page
1120 * @len: length of string to return. must be an even number.
1121 *
1122 * The strings in the IDENTIFY DEVICE page are broken up into
1123 * 16-bit chunks. Run through the string, and output each
1124 * 8-bit chunk linearly, regardless of platform.
1125 *
1126 * LOCKING:
1127 * caller.
1128 */
1129
6a62a04d
TH
1130void ata_id_string(const u16 *id, unsigned char *s,
1131 unsigned int ofs, unsigned int len)
1da177e4
LT
1132{
1133 unsigned int c;
1134
1135 while (len > 0) {
1136 c = id[ofs] >> 8;
1137 *s = c;
1138 s++;
1139
1140 c = id[ofs] & 0xff;
1141 *s = c;
1142 s++;
1143
1144 ofs++;
1145 len -= 2;
1146 }
1147}
1148
0e949ff3 1149/**
6a62a04d 1150 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
1151 * @id: IDENTIFY DEVICE results we will examine
1152 * @s: string into which data is output
1153 * @ofs: offset into identify device page
1154 * @len: length of string to return. must be an odd number.
1155 *
6a62a04d 1156 * This function is identical to ata_id_string except that it
0e949ff3
TH
1157 * trims trailing spaces and terminates the resulting string with
1158 * null. @len must be actual maximum length (even number) + 1.
1159 *
1160 * LOCKING:
1161 * caller.
1162 */
6a62a04d
TH
1163void ata_id_c_string(const u16 *id, unsigned char *s,
1164 unsigned int ofs, unsigned int len)
0e949ff3
TH
1165{
1166 unsigned char *p;
1167
1168 WARN_ON(!(len & 1));
1169
6a62a04d 1170 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
1171
1172 p = s + strnlen(s, len - 1);
1173 while (p > s && p[-1] == ' ')
1174 p--;
1175 *p = '\0';
1176}
0baab86b 1177
db6f8759
TH
1178static u64 ata_id_n_sectors(const u16 *id)
1179{
1180 if (ata_id_has_lba(id)) {
1181 if (ata_id_has_lba48(id))
1182 return ata_id_u64(id, 100);
1183 else
1184 return ata_id_u32(id, 60);
1185 } else {
1186 if (ata_id_current_chs_valid(id))
1187 return ata_id_u32(id, 57);
1188 else
1189 return id[1] * id[3] * id[6];
1190 }
1191}
1192
a5987e0a 1193u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1e999736
AC
1194{
1195 u64 sectors = 0;
1196
1197 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1198 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1199 sectors |= (tf->hob_lbal & 0xff) << 24;
1200 sectors |= (tf->lbah & 0xff) << 16;
1201 sectors |= (tf->lbam & 0xff) << 8;
1202 sectors |= (tf->lbal & 0xff);
1203
a5987e0a 1204 return sectors;
1e999736
AC
1205}
1206
a5987e0a 1207u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1e999736
AC
1208{
1209 u64 sectors = 0;
1210
1211 sectors |= (tf->device & 0x0f) << 24;
1212 sectors |= (tf->lbah & 0xff) << 16;
1213 sectors |= (tf->lbam & 0xff) << 8;
1214 sectors |= (tf->lbal & 0xff);
1215
a5987e0a 1216 return sectors;
1e999736
AC
1217}
1218
1219/**
c728a914
TH
1220 * ata_read_native_max_address - Read native max address
1221 * @dev: target device
1222 * @max_sectors: out parameter for the result native max address
1e999736 1223 *
c728a914
TH
1224 * Perform an LBA48 or LBA28 native size query upon the device in
1225 * question.
1e999736 1226 *
c728a914
TH
1227 * RETURNS:
1228 * 0 on success, -EACCES if command is aborted by the drive.
1229 * -EIO on other errors.
1e999736 1230 */
c728a914 1231static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1e999736 1232{
c728a914 1233 unsigned int err_mask;
1e999736 1234 struct ata_taskfile tf;
c728a914 1235 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1236
1237 ata_tf_init(dev, &tf);
1238
c728a914 1239 /* always clear all address registers */
1e999736 1240 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1e999736 1241
c728a914
TH
1242 if (lba48) {
1243 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1244 tf.flags |= ATA_TFLAG_LBA48;
1245 } else
1246 tf.command = ATA_CMD_READ_NATIVE_MAX;
1e999736 1247
1e999736 1248 tf.protocol |= ATA_PROT_NODATA;
c728a914
TH
1249 tf.device |= ATA_LBA;
1250
2b789108 1251 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914
TH
1252 if (err_mask) {
1253 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1254 "max address (err_mask=0x%x)\n", err_mask);
1255 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1256 return -EACCES;
1257 return -EIO;
1258 }
1e999736 1259
c728a914 1260 if (lba48)
a5987e0a 1261 *max_sectors = ata_tf_to_lba48(&tf) + 1;
c728a914 1262 else
a5987e0a 1263 *max_sectors = ata_tf_to_lba(&tf) + 1;
2dcb407e 1264 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
93328e11 1265 (*max_sectors)--;
c728a914 1266 return 0;
1e999736
AC
1267}
1268
1269/**
c728a914
TH
1270 * ata_set_max_sectors - Set max sectors
1271 * @dev: target device
6b38d1d1 1272 * @new_sectors: new max sectors value to set for the device
1e999736 1273 *
c728a914
TH
1274 * Set max sectors of @dev to @new_sectors.
1275 *
1276 * RETURNS:
1277 * 0 on success, -EACCES if command is aborted or denied (due to
1278 * previous non-volatile SET_MAX) by the drive. -EIO on other
1279 * errors.
1e999736 1280 */
05027adc 1281static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1e999736 1282{
c728a914 1283 unsigned int err_mask;
1e999736 1284 struct ata_taskfile tf;
c728a914 1285 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1286
1287 new_sectors--;
1288
1289 ata_tf_init(dev, &tf);
1290
1e999736 1291 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
c728a914
TH
1292
1293 if (lba48) {
1294 tf.command = ATA_CMD_SET_MAX_EXT;
1295 tf.flags |= ATA_TFLAG_LBA48;
1296
1297 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1298 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1299 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1e582ba4 1300 } else {
c728a914
TH
1301 tf.command = ATA_CMD_SET_MAX;
1302
1e582ba4
TH
1303 tf.device |= (new_sectors >> 24) & 0xf;
1304 }
1305
1e999736 1306 tf.protocol |= ATA_PROT_NODATA;
c728a914 1307 tf.device |= ATA_LBA;
1e999736
AC
1308
1309 tf.lbal = (new_sectors >> 0) & 0xff;
1310 tf.lbam = (new_sectors >> 8) & 0xff;
1311 tf.lbah = (new_sectors >> 16) & 0xff;
1e999736 1312
2b789108 1313 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914
TH
1314 if (err_mask) {
1315 ata_dev_printk(dev, KERN_WARNING, "failed to set "
1316 "max address (err_mask=0x%x)\n", err_mask);
1317 if (err_mask == AC_ERR_DEV &&
1318 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1319 return -EACCES;
1320 return -EIO;
1321 }
1322
c728a914 1323 return 0;
1e999736
AC
1324}
1325
1326/**
1327 * ata_hpa_resize - Resize a device with an HPA set
1328 * @dev: Device to resize
1329 *
1330 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1331 * it if required to the full size of the media. The caller must check
1332 * the drive has the HPA feature set enabled.
05027adc
TH
1333 *
1334 * RETURNS:
1335 * 0 on success, -errno on failure.
1e999736 1336 */
05027adc 1337static int ata_hpa_resize(struct ata_device *dev)
1e999736 1338{
05027adc
TH
1339 struct ata_eh_context *ehc = &dev->link->eh_context;
1340 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1341 u64 sectors = ata_id_n_sectors(dev->id);
1342 u64 native_sectors;
c728a914 1343 int rc;
a617c09f 1344
05027adc
TH
1345 /* do we need to do it? */
1346 if (dev->class != ATA_DEV_ATA ||
1347 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1348 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
c728a914 1349 return 0;
1e999736 1350
05027adc
TH
1351 /* read native max address */
1352 rc = ata_read_native_max_address(dev, &native_sectors);
1353 if (rc) {
dda7aba1
TH
1354 /* If device aborted the command or HPA isn't going to
1355 * be unlocked, skip HPA resizing.
05027adc 1356 */
dda7aba1 1357 if (rc == -EACCES || !ata_ignore_hpa) {
05027adc 1358 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
dda7aba1 1359 "broken, skipping HPA handling\n");
05027adc
TH
1360 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1361
1362 /* we can continue if device aborted the command */
1363 if (rc == -EACCES)
1364 rc = 0;
1e999736 1365 }
37301a55 1366
05027adc
TH
1367 return rc;
1368 }
1369
1370 /* nothing to do? */
1371 if (native_sectors <= sectors || !ata_ignore_hpa) {
1372 if (!print_info || native_sectors == sectors)
1373 return 0;
1374
1375 if (native_sectors > sectors)
1376 ata_dev_printk(dev, KERN_INFO,
1377 "HPA detected: current %llu, native %llu\n",
1378 (unsigned long long)sectors,
1379 (unsigned long long)native_sectors);
1380 else if (native_sectors < sectors)
1381 ata_dev_printk(dev, KERN_WARNING,
1382 "native sectors (%llu) is smaller than "
1383 "sectors (%llu)\n",
1384 (unsigned long long)native_sectors,
1385 (unsigned long long)sectors);
1386 return 0;
1387 }
1388
1389 /* let's unlock HPA */
1390 rc = ata_set_max_sectors(dev, native_sectors);
1391 if (rc == -EACCES) {
1392 /* if device aborted the command, skip HPA resizing */
1393 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1394 "(%llu -> %llu), skipping HPA handling\n",
1395 (unsigned long long)sectors,
1396 (unsigned long long)native_sectors);
1397 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1398 return 0;
1399 } else if (rc)
1400 return rc;
1401
1402 /* re-read IDENTIFY data */
1403 rc = ata_dev_reread_id(dev, 0);
1404 if (rc) {
1405 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1406 "data after HPA resizing\n");
1407 return rc;
1408 }
1409
1410 if (print_info) {
1411 u64 new_sectors = ata_id_n_sectors(dev->id);
1412 ata_dev_printk(dev, KERN_INFO,
1413 "HPA unlocked: %llu -> %llu, native %llu\n",
1414 (unsigned long long)sectors,
1415 (unsigned long long)new_sectors,
1416 (unsigned long long)native_sectors);
1417 }
1418
1419 return 0;
1e999736
AC
1420}
1421
1da177e4
LT
1422/**
1423 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 1424 * @id: IDENTIFY DEVICE page to dump
1da177e4 1425 *
0bd3300a
TH
1426 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1427 * page.
1da177e4
LT
1428 *
1429 * LOCKING:
1430 * caller.
1431 */
1432
0bd3300a 1433static inline void ata_dump_id(const u16 *id)
1da177e4
LT
1434{
1435 DPRINTK("49==0x%04x "
1436 "53==0x%04x "
1437 "63==0x%04x "
1438 "64==0x%04x "
1439 "75==0x%04x \n",
0bd3300a
TH
1440 id[49],
1441 id[53],
1442 id[63],
1443 id[64],
1444 id[75]);
1da177e4
LT
1445 DPRINTK("80==0x%04x "
1446 "81==0x%04x "
1447 "82==0x%04x "
1448 "83==0x%04x "
1449 "84==0x%04x \n",
0bd3300a
TH
1450 id[80],
1451 id[81],
1452 id[82],
1453 id[83],
1454 id[84]);
1da177e4
LT
1455 DPRINTK("88==0x%04x "
1456 "93==0x%04x\n",
0bd3300a
TH
1457 id[88],
1458 id[93]);
1da177e4
LT
1459}
1460
cb95d562
TH
1461/**
1462 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1463 * @id: IDENTIFY data to compute xfer mask from
1464 *
1465 * Compute the xfermask for this device. This is not as trivial
1466 * as it seems if we must consider early devices correctly.
1467 *
1468 * FIXME: pre IDE drive timing (do we care ?).
1469 *
1470 * LOCKING:
1471 * None.
1472 *
1473 * RETURNS:
1474 * Computed xfermask
1475 */
7dc951ae 1476unsigned long ata_id_xfermask(const u16 *id)
cb95d562 1477{
7dc951ae 1478 unsigned long pio_mask, mwdma_mask, udma_mask;
cb95d562
TH
1479
1480 /* Usual case. Word 53 indicates word 64 is valid */
1481 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1482 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1483 pio_mask <<= 3;
1484 pio_mask |= 0x7;
1485 } else {
1486 /* If word 64 isn't valid then Word 51 high byte holds
1487 * the PIO timing number for the maximum. Turn it into
1488 * a mask.
1489 */
7a0f1c8a 1490 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb 1491 if (mode < 5) /* Valid PIO range */
2dcb407e 1492 pio_mask = (2 << mode) - 1;
46767aeb
AC
1493 else
1494 pio_mask = 1;
cb95d562
TH
1495
1496 /* But wait.. there's more. Design your standards by
1497 * committee and you too can get a free iordy field to
1498 * process. However its the speeds not the modes that
1499 * are supported... Note drivers using the timing API
1500 * will get this right anyway
1501 */
1502 }
1503
1504 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1505
b352e57d
AC
1506 if (ata_id_is_cfa(id)) {
1507 /*
1508 * Process compact flash extended modes
1509 */
1510 int pio = id[163] & 0x7;
1511 int dma = (id[163] >> 3) & 7;
1512
1513 if (pio)
1514 pio_mask |= (1 << 5);
1515 if (pio > 1)
1516 pio_mask |= (1 << 6);
1517 if (dma)
1518 mwdma_mask |= (1 << 3);
1519 if (dma > 1)
1520 mwdma_mask |= (1 << 4);
1521 }
1522
fb21f0d0
TH
1523 udma_mask = 0;
1524 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1525 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1526
1527 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1528}
1529
86e45b6b 1530/**
442eacc3 1531 * ata_pio_queue_task - Queue port_task
86e45b6b 1532 * @ap: The ata_port to queue port_task for
e2a7f77a 1533 * @fn: workqueue function to be scheduled
65f27f38 1534 * @data: data for @fn to use
341c2c95 1535 * @delay: delay time in msecs for workqueue function
86e45b6b
TH
1536 *
1537 * Schedule @fn(@data) for execution after @delay jiffies using
1538 * port_task. There is one port_task per port and it's the
1539 * user(low level driver)'s responsibility to make sure that only
1540 * one task is active at any given time.
1541 *
1542 * libata core layer takes care of synchronization between
442eacc3 1543 * port_task and EH. ata_pio_queue_task() may be ignored for EH
86e45b6b
TH
1544 * synchronization.
1545 *
1546 * LOCKING:
1547 * Inherited from caller.
1548 */
624d5c51 1549void ata_pio_queue_task(struct ata_port *ap, void *data, unsigned long delay)
86e45b6b 1550{
65f27f38 1551 ap->port_task_data = data;
86e45b6b 1552
45a66c1c 1553 /* may fail if ata_port_flush_task() in progress */
341c2c95 1554 queue_delayed_work(ata_wq, &ap->port_task, msecs_to_jiffies(delay));
86e45b6b
TH
1555}
1556
1557/**
1558 * ata_port_flush_task - Flush port_task
1559 * @ap: The ata_port to flush port_task for
1560 *
1561 * After this function completes, port_task is guranteed not to
1562 * be running or scheduled.
1563 *
1564 * LOCKING:
1565 * Kernel thread context (may sleep)
1566 */
1567void ata_port_flush_task(struct ata_port *ap)
1568{
86e45b6b
TH
1569 DPRINTK("ENTER\n");
1570
45a66c1c 1571 cancel_rearming_delayed_work(&ap->port_task);
86e45b6b 1572
0dd4b21f 1573 if (ata_msg_ctl(ap))
7f5e4e8d 1574 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);
86e45b6b
TH
1575}
1576
7102d230 1577static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1578{
77853bf2 1579 struct completion *waiting = qc->private_data;
a2a7a662 1580
a2a7a662 1581 complete(waiting);
a2a7a662
TH
1582}
1583
1584/**
2432697b 1585 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1586 * @dev: Device to which the command is sent
1587 * @tf: Taskfile registers for the command and the result
d69cf37d 1588 * @cdb: CDB for packet command
a2a7a662 1589 * @dma_dir: Data tranfer direction of the command
5c1ad8b3 1590 * @sgl: sg list for the data buffer of the command
2432697b 1591 * @n_elem: Number of sg entries
2b789108 1592 * @timeout: Timeout in msecs (0 for default)
a2a7a662
TH
1593 *
1594 * Executes libata internal command with timeout. @tf contains
1595 * command on entry and result on return. Timeout and error
1596 * conditions are reported via return value. No recovery action
1597 * is taken after a command times out. It's caller's duty to
1598 * clean up after timeout.
1599 *
1600 * LOCKING:
1601 * None. Should be called with kernel context, might sleep.
551e8889
TH
1602 *
1603 * RETURNS:
1604 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1605 */
2432697b
TH
1606unsigned ata_exec_internal_sg(struct ata_device *dev,
1607 struct ata_taskfile *tf, const u8 *cdb,
87260216 1608 int dma_dir, struct scatterlist *sgl,
2b789108 1609 unsigned int n_elem, unsigned long timeout)
a2a7a662 1610{
9af5c9c9
TH
1611 struct ata_link *link = dev->link;
1612 struct ata_port *ap = link->ap;
a2a7a662
TH
1613 u8 command = tf->command;
1614 struct ata_queued_cmd *qc;
2ab7db1f 1615 unsigned int tag, preempted_tag;
dedaf2b0 1616 u32 preempted_sactive, preempted_qc_active;
da917d69 1617 int preempted_nr_active_links;
60be6b9a 1618 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1619 unsigned long flags;
77853bf2 1620 unsigned int err_mask;
d95a717f 1621 int rc;
a2a7a662 1622
ba6a1308 1623 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1624
e3180499 1625 /* no internal command while frozen */
b51e9e5d 1626 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1627 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1628 return AC_ERR_SYSTEM;
1629 }
1630
2ab7db1f 1631 /* initialize internal qc */
a2a7a662 1632
2ab7db1f
TH
1633 /* XXX: Tag 0 is used for drivers with legacy EH as some
1634 * drivers choke if any other tag is given. This breaks
1635 * ata_tag_internal() test for those drivers. Don't use new
1636 * EH stuff without converting to it.
1637 */
1638 if (ap->ops->error_handler)
1639 tag = ATA_TAG_INTERNAL;
1640 else
1641 tag = 0;
1642
6cec4a39 1643 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1644 BUG();
f69499f4 1645 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1646
1647 qc->tag = tag;
1648 qc->scsicmd = NULL;
1649 qc->ap = ap;
1650 qc->dev = dev;
1651 ata_qc_reinit(qc);
1652
9af5c9c9
TH
1653 preempted_tag = link->active_tag;
1654 preempted_sactive = link->sactive;
dedaf2b0 1655 preempted_qc_active = ap->qc_active;
da917d69 1656 preempted_nr_active_links = ap->nr_active_links;
9af5c9c9
TH
1657 link->active_tag = ATA_TAG_POISON;
1658 link->sactive = 0;
dedaf2b0 1659 ap->qc_active = 0;
da917d69 1660 ap->nr_active_links = 0;
2ab7db1f
TH
1661
1662 /* prepare & issue qc */
a2a7a662 1663 qc->tf = *tf;
d69cf37d
TH
1664 if (cdb)
1665 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1666 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1667 qc->dma_dir = dma_dir;
1668 if (dma_dir != DMA_NONE) {
2432697b 1669 unsigned int i, buflen = 0;
87260216 1670 struct scatterlist *sg;
2432697b 1671
87260216
JA
1672 for_each_sg(sgl, sg, n_elem, i)
1673 buflen += sg->length;
2432697b 1674
87260216 1675 ata_sg_init(qc, sgl, n_elem);
49c80429 1676 qc->nbytes = buflen;
a2a7a662
TH
1677 }
1678
77853bf2 1679 qc->private_data = &wait;
a2a7a662
TH
1680 qc->complete_fn = ata_qc_complete_internal;
1681
8e0e694a 1682 ata_qc_issue(qc);
a2a7a662 1683
ba6a1308 1684 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1685
2b789108 1686 if (!timeout)
341c2c95 1687 timeout = ata_probe_timeout * 1000;
2b789108
TH
1688
1689 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
d95a717f
TH
1690
1691 ata_port_flush_task(ap);
41ade50c 1692
d95a717f 1693 if (!rc) {
ba6a1308 1694 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1695
1696 /* We're racing with irq here. If we lose, the
1697 * following test prevents us from completing the qc
d95a717f
TH
1698 * twice. If we win, the port is frozen and will be
1699 * cleaned up by ->post_internal_cmd().
a2a7a662 1700 */
77853bf2 1701 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1702 qc->err_mask |= AC_ERR_TIMEOUT;
1703
1704 if (ap->ops->error_handler)
1705 ata_port_freeze(ap);
1706 else
1707 ata_qc_complete(qc);
f15a1daf 1708
0dd4b21f
BP
1709 if (ata_msg_warn(ap))
1710 ata_dev_printk(dev, KERN_WARNING,
88574551 1711 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1712 }
1713
ba6a1308 1714 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1715 }
1716
d95a717f
TH
1717 /* do post_internal_cmd */
1718 if (ap->ops->post_internal_cmd)
1719 ap->ops->post_internal_cmd(qc);
1720
a51d644a
TH
1721 /* perform minimal error analysis */
1722 if (qc->flags & ATA_QCFLAG_FAILED) {
1723 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1724 qc->err_mask |= AC_ERR_DEV;
1725
1726 if (!qc->err_mask)
1727 qc->err_mask |= AC_ERR_OTHER;
1728
1729 if (qc->err_mask & ~AC_ERR_OTHER)
1730 qc->err_mask &= ~AC_ERR_OTHER;
d95a717f
TH
1731 }
1732
15869303 1733 /* finish up */
ba6a1308 1734 spin_lock_irqsave(ap->lock, flags);
15869303 1735
e61e0672 1736 *tf = qc->result_tf;
77853bf2
TH
1737 err_mask = qc->err_mask;
1738
1739 ata_qc_free(qc);
9af5c9c9
TH
1740 link->active_tag = preempted_tag;
1741 link->sactive = preempted_sactive;
dedaf2b0 1742 ap->qc_active = preempted_qc_active;
da917d69 1743 ap->nr_active_links = preempted_nr_active_links;
77853bf2 1744
1f7dd3e9
TH
1745 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1746 * Until those drivers are fixed, we detect the condition
1747 * here, fail the command with AC_ERR_SYSTEM and reenable the
1748 * port.
1749 *
1750 * Note that this doesn't change any behavior as internal
1751 * command failure results in disabling the device in the
1752 * higher layer for LLDDs without new reset/EH callbacks.
1753 *
1754 * Kill the following code as soon as those drivers are fixed.
1755 */
198e0fed 1756 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1757 err_mask |= AC_ERR_SYSTEM;
1758 ata_port_probe(ap);
1759 }
1760
ba6a1308 1761 spin_unlock_irqrestore(ap->lock, flags);
15869303 1762
77853bf2 1763 return err_mask;
a2a7a662
TH
1764}
1765
2432697b 1766/**
33480a0e 1767 * ata_exec_internal - execute libata internal command
2432697b
TH
1768 * @dev: Device to which the command is sent
1769 * @tf: Taskfile registers for the command and the result
1770 * @cdb: CDB for packet command
1771 * @dma_dir: Data tranfer direction of the command
1772 * @buf: Data buffer of the command
1773 * @buflen: Length of data buffer
2b789108 1774 * @timeout: Timeout in msecs (0 for default)
2432697b
TH
1775 *
1776 * Wrapper around ata_exec_internal_sg() which takes simple
1777 * buffer instead of sg list.
1778 *
1779 * LOCKING:
1780 * None. Should be called with kernel context, might sleep.
1781 *
1782 * RETURNS:
1783 * Zero on success, AC_ERR_* mask on failure
1784 */
1785unsigned ata_exec_internal(struct ata_device *dev,
1786 struct ata_taskfile *tf, const u8 *cdb,
2b789108
TH
1787 int dma_dir, void *buf, unsigned int buflen,
1788 unsigned long timeout)
2432697b 1789{
33480a0e
TH
1790 struct scatterlist *psg = NULL, sg;
1791 unsigned int n_elem = 0;
2432697b 1792
33480a0e
TH
1793 if (dma_dir != DMA_NONE) {
1794 WARN_ON(!buf);
1795 sg_init_one(&sg, buf, buflen);
1796 psg = &sg;
1797 n_elem++;
1798 }
2432697b 1799
2b789108
TH
1800 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1801 timeout);
2432697b
TH
1802}
1803
977e6b9f
TH
1804/**
1805 * ata_do_simple_cmd - execute simple internal command
1806 * @dev: Device to which the command is sent
1807 * @cmd: Opcode to execute
1808 *
1809 * Execute a 'simple' command, that only consists of the opcode
1810 * 'cmd' itself, without filling any other registers
1811 *
1812 * LOCKING:
1813 * Kernel thread context (may sleep).
1814 *
1815 * RETURNS:
1816 * Zero on success, AC_ERR_* mask on failure
e58eb583 1817 */
77b08fb5 1818unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1819{
1820 struct ata_taskfile tf;
e58eb583
TH
1821
1822 ata_tf_init(dev, &tf);
1823
1824 tf.command = cmd;
1825 tf.flags |= ATA_TFLAG_DEVICE;
1826 tf.protocol = ATA_PROT_NODATA;
1827
2b789108 1828 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
e58eb583
TH
1829}
1830
1bc4ccff
AC
1831/**
1832 * ata_pio_need_iordy - check if iordy needed
1833 * @adev: ATA device
1834 *
1835 * Check if the current speed of the device requires IORDY. Used
1836 * by various controllers for chip configuration.
1837 */
a617c09f 1838
1bc4ccff
AC
1839unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1840{
432729f0
AC
1841 /* Controller doesn't support IORDY. Probably a pointless check
1842 as the caller should know this */
9af5c9c9 1843 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1bc4ccff 1844 return 0;
432729f0
AC
1845 /* PIO3 and higher it is mandatory */
1846 if (adev->pio_mode > XFER_PIO_2)
1847 return 1;
1848 /* We turn it on when possible */
1849 if (ata_id_has_iordy(adev->id))
1bc4ccff 1850 return 1;
432729f0
AC
1851 return 0;
1852}
2e9edbf8 1853
432729f0
AC
1854/**
1855 * ata_pio_mask_no_iordy - Return the non IORDY mask
1856 * @adev: ATA device
1857 *
1858 * Compute the highest mode possible if we are not using iordy. Return
1859 * -1 if no iordy mode is available.
1860 */
a617c09f 1861
432729f0
AC
1862static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1863{
1bc4ccff 1864 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1bc4ccff 1865 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
432729f0 1866 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1bc4ccff
AC
1867 /* Is the speed faster than the drive allows non IORDY ? */
1868 if (pio) {
1869 /* This is cycle times not frequency - watch the logic! */
1870 if (pio > 240) /* PIO2 is 240nS per cycle */
432729f0
AC
1871 return 3 << ATA_SHIFT_PIO;
1872 return 7 << ATA_SHIFT_PIO;
1bc4ccff
AC
1873 }
1874 }
432729f0 1875 return 3 << ATA_SHIFT_PIO;
1bc4ccff
AC
1876}
1877
1da177e4 1878/**
49016aca 1879 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1880 * @dev: target device
1881 * @p_class: pointer to class of the target device (may be changed)
bff04647 1882 * @flags: ATA_READID_* flags
fe635c7e 1883 * @id: buffer to read IDENTIFY data into
1da177e4 1884 *
49016aca
TH
1885 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1886 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1887 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1888 * for pre-ATA4 drives.
1da177e4 1889 *
50a99018 1890 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
2dcb407e 1891 * now we abort if we hit that case.
50a99018 1892 *
1da177e4 1893 * LOCKING:
49016aca
TH
1894 * Kernel thread context (may sleep)
1895 *
1896 * RETURNS:
1897 * 0 on success, -errno otherwise.
1da177e4 1898 */
a9beec95 1899int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1900 unsigned int flags, u16 *id)
1da177e4 1901{
9af5c9c9 1902 struct ata_port *ap = dev->link->ap;
49016aca 1903 unsigned int class = *p_class;
a0123703 1904 struct ata_taskfile tf;
49016aca
TH
1905 unsigned int err_mask = 0;
1906 const char *reason;
54936f8b 1907 int may_fallback = 1, tried_spinup = 0;
49016aca 1908 int rc;
1da177e4 1909
0dd4b21f 1910 if (ata_msg_ctl(ap))
7f5e4e8d 1911 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
1da177e4 1912
49016aca 1913 retry:
3373efd8 1914 ata_tf_init(dev, &tf);
a0123703 1915
49016aca
TH
1916 switch (class) {
1917 case ATA_DEV_ATA:
a0123703 1918 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1919 break;
1920 case ATA_DEV_ATAPI:
a0123703 1921 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1922 break;
1923 default:
1924 rc = -ENODEV;
1925 reason = "unsupported class";
1926 goto err_out;
1da177e4
LT
1927 }
1928
a0123703 1929 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1930
1931 /* Some devices choke if TF registers contain garbage. Make
1932 * sure those are properly initialized.
1933 */
1934 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1935
1936 /* Device presence detection is unreliable on some
1937 * controllers. Always poll IDENTIFY if available.
1938 */
1939 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 1940
3373efd8 1941 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
2b789108 1942 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
a0123703 1943 if (err_mask) {
800b3996 1944 if (err_mask & AC_ERR_NODEV_HINT) {
1ffc151f
TH
1945 ata_dev_printk(dev, KERN_DEBUG,
1946 "NODEV after polling detection\n");
55a8e2c8
TH
1947 return -ENOENT;
1948 }
1949
1ffc151f
TH
1950 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1951 /* Device or controller might have reported
1952 * the wrong device class. Give a shot at the
1953 * other IDENTIFY if the current one is
1954 * aborted by the device.
1955 */
1956 if (may_fallback) {
1957 may_fallback = 0;
1958
1959 if (class == ATA_DEV_ATA)
1960 class = ATA_DEV_ATAPI;
1961 else
1962 class = ATA_DEV_ATA;
1963 goto retry;
1964 }
1965
1966 /* Control reaches here iff the device aborted
1967 * both flavors of IDENTIFYs which happens
1968 * sometimes with phantom devices.
1969 */
1970 ata_dev_printk(dev, KERN_DEBUG,
1971 "both IDENTIFYs aborted, assuming NODEV\n");
1972 return -ENOENT;
54936f8b
TH
1973 }
1974
49016aca
TH
1975 rc = -EIO;
1976 reason = "I/O error";
1da177e4
LT
1977 goto err_out;
1978 }
1979
54936f8b
TH
1980 /* Falling back doesn't make sense if ID data was read
1981 * successfully at least once.
1982 */
1983 may_fallback = 0;
1984
49016aca 1985 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1986
49016aca 1987 /* sanity check */
a4f5749b 1988 rc = -EINVAL;
6070068b 1989 reason = "device reports invalid type";
a4f5749b
TH
1990
1991 if (class == ATA_DEV_ATA) {
1992 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1993 goto err_out;
1994 } else {
1995 if (ata_id_is_ata(id))
1996 goto err_out;
49016aca
TH
1997 }
1998
169439c2
ML
1999 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
2000 tried_spinup = 1;
2001 /*
2002 * Drive powered-up in standby mode, and requires a specific
2003 * SET_FEATURES spin-up subcommand before it will accept
2004 * anything other than the original IDENTIFY command.
2005 */
218f3d30 2006 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
fb0582f9 2007 if (err_mask && id[2] != 0x738c) {
169439c2
ML
2008 rc = -EIO;
2009 reason = "SPINUP failed";
2010 goto err_out;
2011 }
2012 /*
2013 * If the drive initially returned incomplete IDENTIFY info,
2014 * we now must reissue the IDENTIFY command.
2015 */
2016 if (id[2] == 0x37c8)
2017 goto retry;
2018 }
2019
bff04647 2020 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
2021 /*
2022 * The exact sequence expected by certain pre-ATA4 drives is:
2023 * SRST RESET
50a99018
AC
2024 * IDENTIFY (optional in early ATA)
2025 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
49016aca
TH
2026 * anything else..
2027 * Some drives were very specific about that exact sequence.
50a99018
AC
2028 *
2029 * Note that ATA4 says lba is mandatory so the second check
2030 * shoud never trigger.
49016aca
TH
2031 */
2032 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 2033 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
2034 if (err_mask) {
2035 rc = -EIO;
2036 reason = "INIT_DEV_PARAMS failed";
2037 goto err_out;
2038 }
2039
2040 /* current CHS translation info (id[53-58]) might be
2041 * changed. reread the identify device info.
2042 */
bff04647 2043 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
2044 goto retry;
2045 }
2046 }
2047
2048 *p_class = class;
fe635c7e 2049
49016aca
TH
2050 return 0;
2051
2052 err_out:
88574551 2053 if (ata_msg_warn(ap))
0dd4b21f 2054 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 2055 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
2056 return rc;
2057}
2058
3373efd8 2059static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 2060{
9af5c9c9
TH
2061 struct ata_port *ap = dev->link->ap;
2062 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
2063}
2064
a6e6ce8e
TH
2065static void ata_dev_config_ncq(struct ata_device *dev,
2066 char *desc, size_t desc_sz)
2067{
9af5c9c9 2068 struct ata_port *ap = dev->link->ap;
a6e6ce8e
TH
2069 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2070
2071 if (!ata_id_has_ncq(dev->id)) {
2072 desc[0] = '\0';
2073 return;
2074 }
75683fe7 2075 if (dev->horkage & ATA_HORKAGE_NONCQ) {
6919a0a6
AC
2076 snprintf(desc, desc_sz, "NCQ (not used)");
2077 return;
2078 }
a6e6ce8e 2079 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 2080 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
2081 dev->flags |= ATA_DFLAG_NCQ;
2082 }
2083
2084 if (hdepth >= ddepth)
2085 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
2086 else
2087 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
2088}
2089
49016aca 2090/**
ffeae418 2091 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
2092 * @dev: Target device to configure
2093 *
2094 * Configure @dev according to @dev->id. Generic and low-level
2095 * driver specific fixups are also applied.
49016aca
TH
2096 *
2097 * LOCKING:
ffeae418
TH
2098 * Kernel thread context (may sleep)
2099 *
2100 * RETURNS:
2101 * 0 on success, -errno otherwise
49016aca 2102 */
efdaedc4 2103int ata_dev_configure(struct ata_device *dev)
49016aca 2104{
9af5c9c9
TH
2105 struct ata_port *ap = dev->link->ap;
2106 struct ata_eh_context *ehc = &dev->link->eh_context;
6746544c 2107 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1148c3a7 2108 const u16 *id = dev->id;
7dc951ae 2109 unsigned long xfer_mask;
b352e57d 2110 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
2111 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2112 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 2113 int rc;
49016aca 2114
0dd4b21f 2115 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
44877b4e 2116 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
7f5e4e8d 2117 __func__);
ffeae418 2118 return 0;
49016aca
TH
2119 }
2120
0dd4b21f 2121 if (ata_msg_probe(ap))
7f5e4e8d 2122 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
1da177e4 2123
75683fe7
TH
2124 /* set horkage */
2125 dev->horkage |= ata_dev_blacklisted(dev);
33267325 2126 ata_force_horkage(dev);
75683fe7 2127
50af2fa1
TH
2128 if (dev->horkage & ATA_HORKAGE_DISABLE) {
2129 ata_dev_printk(dev, KERN_INFO,
2130 "unsupported device, disabling\n");
2131 ata_dev_disable(dev);
2132 return 0;
2133 }
2134
6746544c
TH
2135 /* let ACPI work its magic */
2136 rc = ata_acpi_on_devcfg(dev);
2137 if (rc)
2138 return rc;
08573a86 2139
05027adc
TH
2140 /* massage HPA, do it early as it might change IDENTIFY data */
2141 rc = ata_hpa_resize(dev);
2142 if (rc)
2143 return rc;
2144
c39f5ebe 2145 /* print device capabilities */
0dd4b21f 2146 if (ata_msg_probe(ap))
88574551
TH
2147 ata_dev_printk(dev, KERN_DEBUG,
2148 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2149 "85:%04x 86:%04x 87:%04x 88:%04x\n",
7f5e4e8d 2150 __func__,
f15a1daf
TH
2151 id[49], id[82], id[83], id[84],
2152 id[85], id[86], id[87], id[88]);
c39f5ebe 2153
208a9933 2154 /* initialize to-be-configured parameters */
ea1dd4e1 2155 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
2156 dev->max_sectors = 0;
2157 dev->cdb_len = 0;
2158 dev->n_sectors = 0;
2159 dev->cylinders = 0;
2160 dev->heads = 0;
2161 dev->sectors = 0;
2162
1da177e4
LT
2163 /*
2164 * common ATA, ATAPI feature tests
2165 */
2166
ff8854b2 2167 /* find max transfer mode; for printk only */
1148c3a7 2168 xfer_mask = ata_id_xfermask(id);
1da177e4 2169
0dd4b21f
BP
2170 if (ata_msg_probe(ap))
2171 ata_dump_id(id);
1da177e4 2172
ef143d57
AL
2173 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2174 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2175 sizeof(fwrevbuf));
2176
2177 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2178 sizeof(modelbuf));
2179
1da177e4
LT
2180 /* ATA-specific feature tests */
2181 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
2182 if (ata_id_is_cfa(id)) {
2183 if (id[162] & 1) /* CPRM may make this media unusable */
44877b4e
TH
2184 ata_dev_printk(dev, KERN_WARNING,
2185 "supports DRM functions and may "
2186 "not be fully accessable.\n");
b352e57d 2187 snprintf(revbuf, 7, "CFA");
ae8d4ee7 2188 } else {
2dcb407e 2189 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
ae8d4ee7
AC
2190 /* Warn the user if the device has TPM extensions */
2191 if (ata_id_has_tpm(id))
2192 ata_dev_printk(dev, KERN_WARNING,
2193 "supports DRM functions and may "
2194 "not be fully accessable.\n");
2195 }
b352e57d 2196
1148c3a7 2197 dev->n_sectors = ata_id_n_sectors(id);
2940740b 2198
3f64f565
EM
2199 if (dev->id[59] & 0x100)
2200 dev->multi_count = dev->id[59] & 0xff;
2201
1148c3a7 2202 if (ata_id_has_lba(id)) {
4c2d721a 2203 const char *lba_desc;
a6e6ce8e 2204 char ncq_desc[20];
8bf62ece 2205
4c2d721a
TH
2206 lba_desc = "LBA";
2207 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 2208 if (ata_id_has_lba48(id)) {
8bf62ece 2209 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 2210 lba_desc = "LBA48";
6fc49adb
TH
2211
2212 if (dev->n_sectors >= (1UL << 28) &&
2213 ata_id_has_flush_ext(id))
2214 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 2215 }
8bf62ece 2216
a6e6ce8e
TH
2217 /* config NCQ */
2218 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2219
8bf62ece 2220 /* print device info to dmesg */
3f64f565
EM
2221 if (ata_msg_drv(ap) && print_info) {
2222 ata_dev_printk(dev, KERN_INFO,
2223 "%s: %s, %s, max %s\n",
2224 revbuf, modelbuf, fwrevbuf,
2225 ata_mode_string(xfer_mask));
2226 ata_dev_printk(dev, KERN_INFO,
2227 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 2228 (unsigned long long)dev->n_sectors,
3f64f565
EM
2229 dev->multi_count, lba_desc, ncq_desc);
2230 }
ffeae418 2231 } else {
8bf62ece
AL
2232 /* CHS */
2233
2234 /* Default translation */
1148c3a7
TH
2235 dev->cylinders = id[1];
2236 dev->heads = id[3];
2237 dev->sectors = id[6];
8bf62ece 2238
1148c3a7 2239 if (ata_id_current_chs_valid(id)) {
8bf62ece 2240 /* Current CHS translation is valid. */
1148c3a7
TH
2241 dev->cylinders = id[54];
2242 dev->heads = id[55];
2243 dev->sectors = id[56];
8bf62ece
AL
2244 }
2245
2246 /* print device info to dmesg */
3f64f565 2247 if (ata_msg_drv(ap) && print_info) {
88574551 2248 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2249 "%s: %s, %s, max %s\n",
2250 revbuf, modelbuf, fwrevbuf,
2251 ata_mode_string(xfer_mask));
a84471fe 2252 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2253 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2254 (unsigned long long)dev->n_sectors,
2255 dev->multi_count, dev->cylinders,
2256 dev->heads, dev->sectors);
2257 }
07f6f7d0
AL
2258 }
2259
6e7846e9 2260 dev->cdb_len = 16;
1da177e4
LT
2261 }
2262
2263 /* ATAPI-specific feature tests */
2c13b7ce 2264 else if (dev->class == ATA_DEV_ATAPI) {
854c73a2
TH
2265 const char *cdb_intr_string = "";
2266 const char *atapi_an_string = "";
91163006 2267 const char *dma_dir_string = "";
7d77b247 2268 u32 sntf;
08a556db 2269
1148c3a7 2270 rc = atapi_cdb_len(id);
1da177e4 2271 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 2272 if (ata_msg_warn(ap))
88574551
TH
2273 ata_dev_printk(dev, KERN_WARNING,
2274 "unsupported CDB len\n");
ffeae418 2275 rc = -EINVAL;
1da177e4
LT
2276 goto err_out_nosup;
2277 }
6e7846e9 2278 dev->cdb_len = (unsigned int) rc;
1da177e4 2279
7d77b247
TH
2280 /* Enable ATAPI AN if both the host and device have
2281 * the support. If PMP is attached, SNTF is required
2282 * to enable ATAPI AN to discern between PHY status
2283 * changed notifications and ATAPI ANs.
9f45cbd3 2284 */
7d77b247 2285 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
071f44b1 2286 (!sata_pmp_attached(ap) ||
7d77b247 2287 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
854c73a2
TH
2288 unsigned int err_mask;
2289
9f45cbd3 2290 /* issue SET feature command to turn this on */
218f3d30
JG
2291 err_mask = ata_dev_set_feature(dev,
2292 SETFEATURES_SATA_ENABLE, SATA_AN);
854c73a2 2293 if (err_mask)
9f45cbd3 2294 ata_dev_printk(dev, KERN_ERR,
854c73a2
TH
2295 "failed to enable ATAPI AN "
2296 "(err_mask=0x%x)\n", err_mask);
2297 else {
9f45cbd3 2298 dev->flags |= ATA_DFLAG_AN;
854c73a2
TH
2299 atapi_an_string = ", ATAPI AN";
2300 }
9f45cbd3
KCA
2301 }
2302
08a556db 2303 if (ata_id_cdb_intr(dev->id)) {
312f7da2 2304 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
2305 cdb_intr_string = ", CDB intr";
2306 }
312f7da2 2307
91163006
TH
2308 if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
2309 dev->flags |= ATA_DFLAG_DMADIR;
2310 dma_dir_string = ", DMADIR";
2311 }
2312
1da177e4 2313 /* print device info to dmesg */
5afc8142 2314 if (ata_msg_drv(ap) && print_info)
ef143d57 2315 ata_dev_printk(dev, KERN_INFO,
91163006 2316 "ATAPI: %s, %s, max %s%s%s%s\n",
ef143d57 2317 modelbuf, fwrevbuf,
12436c30 2318 ata_mode_string(xfer_mask),
91163006
TH
2319 cdb_intr_string, atapi_an_string,
2320 dma_dir_string);
1da177e4
LT
2321 }
2322
914ed354
TH
2323 /* determine max_sectors */
2324 dev->max_sectors = ATA_MAX_SECTORS;
2325 if (dev->flags & ATA_DFLAG_LBA48)
2326 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2327
ca77329f
KCA
2328 if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2329 if (ata_id_has_hipm(dev->id))
2330 dev->flags |= ATA_DFLAG_HIPM;
2331 if (ata_id_has_dipm(dev->id))
2332 dev->flags |= ATA_DFLAG_DIPM;
2333 }
2334
c5038fc0
AC
2335 /* Limit PATA drive on SATA cable bridge transfers to udma5,
2336 200 sectors */
3373efd8 2337 if (ata_dev_knobble(dev)) {
5afc8142 2338 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
2339 ata_dev_printk(dev, KERN_INFO,
2340 "applying bridge limits\n");
5a529139 2341 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
2342 dev->max_sectors = ATA_MAX_SECTORS;
2343 }
2344
f8d8e579 2345 if ((dev->class == ATA_DEV_ATAPI) &&
f442cd86 2346 (atapi_command_packet_set(id) == TYPE_TAPE)) {
f8d8e579 2347 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
f442cd86
AL
2348 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2349 }
f8d8e579 2350
75683fe7 2351 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
03ec52de
TH
2352 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2353 dev->max_sectors);
18d6e9d5 2354
ca77329f
KCA
2355 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2356 dev->horkage |= ATA_HORKAGE_IPM;
2357
2358 /* reset link pm_policy for this port to no pm */
2359 ap->pm_policy = MAX_PERFORMANCE;
2360 }
2361
4b2f3ede 2362 if (ap->ops->dev_config)
cd0d3bbc 2363 ap->ops->dev_config(dev);
4b2f3ede 2364
c5038fc0
AC
2365 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2366 /* Let the user know. We don't want to disallow opens for
2367 rescue purposes, or in case the vendor is just a blithering
2368 idiot. Do this after the dev_config call as some controllers
2369 with buggy firmware may want to avoid reporting false device
2370 bugs */
2371
2372 if (print_info) {
2373 ata_dev_printk(dev, KERN_WARNING,
2374"Drive reports diagnostics failure. This may indicate a drive\n");
2375 ata_dev_printk(dev, KERN_WARNING,
2376"fault or invalid emulation. Contact drive vendor for information.\n");
2377 }
2378 }
2379
ffeae418 2380 return 0;
1da177e4
LT
2381
2382err_out_nosup:
0dd4b21f 2383 if (ata_msg_probe(ap))
88574551 2384 ata_dev_printk(dev, KERN_DEBUG,
7f5e4e8d 2385 "%s: EXIT, err\n", __func__);
ffeae418 2386 return rc;
1da177e4
LT
2387}
2388
be0d18df 2389/**
2e41e8e6 2390 * ata_cable_40wire - return 40 wire cable type
be0d18df
AC
2391 * @ap: port
2392 *
2e41e8e6 2393 * Helper method for drivers which want to hardwire 40 wire cable
be0d18df
AC
2394 * detection.
2395 */
2396
2397int ata_cable_40wire(struct ata_port *ap)
2398{
2399 return ATA_CBL_PATA40;
2400}
2401
2402/**
2e41e8e6 2403 * ata_cable_80wire - return 80 wire cable type
be0d18df
AC
2404 * @ap: port
2405 *
2e41e8e6 2406 * Helper method for drivers which want to hardwire 80 wire cable
be0d18df
AC
2407 * detection.
2408 */
2409
2410int ata_cable_80wire(struct ata_port *ap)
2411{
2412 return ATA_CBL_PATA80;
2413}
2414
2415/**
2416 * ata_cable_unknown - return unknown PATA cable.
2417 * @ap: port
2418 *
2419 * Helper method for drivers which have no PATA cable detection.
2420 */
2421
2422int ata_cable_unknown(struct ata_port *ap)
2423{
2424 return ATA_CBL_PATA_UNK;
2425}
2426
c88f90c3
TH
2427/**
2428 * ata_cable_ignore - return ignored PATA cable.
2429 * @ap: port
2430 *
2431 * Helper method for drivers which don't use cable type to limit
2432 * transfer mode.
2433 */
2434int ata_cable_ignore(struct ata_port *ap)
2435{
2436 return ATA_CBL_PATA_IGN;
2437}
2438
be0d18df
AC
2439/**
2440 * ata_cable_sata - return SATA cable type
2441 * @ap: port
2442 *
2443 * Helper method for drivers which have SATA cables
2444 */
2445
2446int ata_cable_sata(struct ata_port *ap)
2447{
2448 return ATA_CBL_SATA;
2449}
2450
1da177e4
LT
2451/**
2452 * ata_bus_probe - Reset and probe ATA bus
2453 * @ap: Bus to probe
2454 *
0cba632b
JG
2455 * Master ATA bus probing function. Initiates a hardware-dependent
2456 * bus reset, then attempts to identify any devices found on
2457 * the bus.
2458 *
1da177e4 2459 * LOCKING:
0cba632b 2460 * PCI/etc. bus probe sem.
1da177e4
LT
2461 *
2462 * RETURNS:
96072e69 2463 * Zero on success, negative errno otherwise.
1da177e4
LT
2464 */
2465
80289167 2466int ata_bus_probe(struct ata_port *ap)
1da177e4 2467{
28ca5c57 2468 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 2469 int tries[ATA_MAX_DEVICES];
f58229f8 2470 int rc;
e82cbdb9 2471 struct ata_device *dev;
1da177e4 2472
28ca5c57 2473 ata_port_probe(ap);
c19ba8af 2474
f58229f8
TH
2475 ata_link_for_each_dev(dev, &ap->link)
2476 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
14d2bac1
TH
2477
2478 retry:
cdeab114
TH
2479 ata_link_for_each_dev(dev, &ap->link) {
2480 /* If we issue an SRST then an ATA drive (not ATAPI)
2481 * may change configuration and be in PIO0 timing. If
2482 * we do a hard reset (or are coming from power on)
2483 * this is true for ATA or ATAPI. Until we've set a
2484 * suitable controller mode we should not touch the
2485 * bus as we may be talking too fast.
2486 */
2487 dev->pio_mode = XFER_PIO_0;
2488
2489 /* If the controller has a pio mode setup function
2490 * then use it to set the chipset to rights. Don't
2491 * touch the DMA setup as that will be dealt with when
2492 * configuring devices.
2493 */
2494 if (ap->ops->set_piomode)
2495 ap->ops->set_piomode(ap, dev);
2496 }
2497
2044470c 2498 /* reset and determine device classes */
52783c5d 2499 ap->ops->phy_reset(ap);
2061a47a 2500
f58229f8 2501 ata_link_for_each_dev(dev, &ap->link) {
52783c5d
TH
2502 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2503 dev->class != ATA_DEV_UNKNOWN)
2504 classes[dev->devno] = dev->class;
2505 else
2506 classes[dev->devno] = ATA_DEV_NONE;
2044470c 2507
52783c5d 2508 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 2509 }
1da177e4 2510
52783c5d 2511 ata_port_probe(ap);
2044470c 2512
f31f0cc2
JG
2513 /* read IDENTIFY page and configure devices. We have to do the identify
2514 specific sequence bass-ackwards so that PDIAG- is released by
2515 the slave device */
2516
a4ba7fe2 2517 ata_link_for_each_dev_reverse(dev, &ap->link) {
f58229f8
TH
2518 if (tries[dev->devno])
2519 dev->class = classes[dev->devno];
ffeae418 2520
14d2bac1 2521 if (!ata_dev_enabled(dev))
ffeae418 2522 continue;
ffeae418 2523
bff04647
TH
2524 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2525 dev->id);
14d2bac1
TH
2526 if (rc)
2527 goto fail;
f31f0cc2
JG
2528 }
2529
be0d18df
AC
2530 /* Now ask for the cable type as PDIAG- should have been released */
2531 if (ap->ops->cable_detect)
2532 ap->cbl = ap->ops->cable_detect(ap);
2533
614fe29b
AC
2534 /* We may have SATA bridge glue hiding here irrespective of the
2535 reported cable types and sensed types */
2536 ata_link_for_each_dev(dev, &ap->link) {
2537 if (!ata_dev_enabled(dev))
2538 continue;
2539 /* SATA drives indicate we have a bridge. We don't know which
2540 end of the link the bridge is which is a problem */
2541 if (ata_id_is_sata(dev->id))
2542 ap->cbl = ATA_CBL_SATA;
2543 }
2544
f31f0cc2
JG
2545 /* After the identify sequence we can now set up the devices. We do
2546 this in the normal order so that the user doesn't get confused */
2547
f58229f8 2548 ata_link_for_each_dev(dev, &ap->link) {
f31f0cc2
JG
2549 if (!ata_dev_enabled(dev))
2550 continue;
14d2bac1 2551
9af5c9c9 2552 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
efdaedc4 2553 rc = ata_dev_configure(dev);
9af5c9c9 2554 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
2555 if (rc)
2556 goto fail;
1da177e4
LT
2557 }
2558
e82cbdb9 2559 /* configure transfer mode */
0260731f 2560 rc = ata_set_mode(&ap->link, &dev);
4ae72a1e 2561 if (rc)
51713d35 2562 goto fail;
1da177e4 2563
f58229f8
TH
2564 ata_link_for_each_dev(dev, &ap->link)
2565 if (ata_dev_enabled(dev))
e82cbdb9 2566 return 0;
1da177e4 2567
e82cbdb9
TH
2568 /* no device present, disable port */
2569 ata_port_disable(ap);
96072e69 2570 return -ENODEV;
14d2bac1
TH
2571
2572 fail:
4ae72a1e
TH
2573 tries[dev->devno]--;
2574
14d2bac1
TH
2575 switch (rc) {
2576 case -EINVAL:
4ae72a1e 2577 /* eeek, something went very wrong, give up */
14d2bac1
TH
2578 tries[dev->devno] = 0;
2579 break;
4ae72a1e
TH
2580
2581 case -ENODEV:
2582 /* give it just one more chance */
2583 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 2584 case -EIO:
4ae72a1e
TH
2585 if (tries[dev->devno] == 1) {
2586 /* This is the last chance, better to slow
2587 * down than lose it.
2588 */
936fd732 2589 sata_down_spd_limit(&ap->link);
4ae72a1e
TH
2590 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2591 }
14d2bac1
TH
2592 }
2593
4ae72a1e 2594 if (!tries[dev->devno])
3373efd8 2595 ata_dev_disable(dev);
ec573755 2596
14d2bac1 2597 goto retry;
1da177e4
LT
2598}
2599
2600/**
0cba632b
JG
2601 * ata_port_probe - Mark port as enabled
2602 * @ap: Port for which we indicate enablement
1da177e4 2603 *
0cba632b
JG
2604 * Modify @ap data structure such that the system
2605 * thinks that the entire port is enabled.
2606 *
cca3974e 2607 * LOCKING: host lock, or some other form of
0cba632b 2608 * serialization.
1da177e4
LT
2609 */
2610
2611void ata_port_probe(struct ata_port *ap)
2612{
198e0fed 2613 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
2614}
2615
3be680b7
TH
2616/**
2617 * sata_print_link_status - Print SATA link status
936fd732 2618 * @link: SATA link to printk link status about
3be680b7
TH
2619 *
2620 * This function prints link speed and status of a SATA link.
2621 *
2622 * LOCKING:
2623 * None.
2624 */
6bdb4fc9 2625static void sata_print_link_status(struct ata_link *link)
3be680b7 2626{
6d5f9732 2627 u32 sstatus, scontrol, tmp;
3be680b7 2628
936fd732 2629 if (sata_scr_read(link, SCR_STATUS, &sstatus))
3be680b7 2630 return;
936fd732 2631 sata_scr_read(link, SCR_CONTROL, &scontrol);
3be680b7 2632
936fd732 2633 if (ata_link_online(link)) {
3be680b7 2634 tmp = (sstatus >> 4) & 0xf;
936fd732 2635 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2636 "SATA link up %s (SStatus %X SControl %X)\n",
2637 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 2638 } else {
936fd732 2639 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2640 "SATA link down (SStatus %X SControl %X)\n",
2641 sstatus, scontrol);
3be680b7
TH
2642 }
2643}
2644
ebdfca6e
AC
2645/**
2646 * ata_dev_pair - return other device on cable
ebdfca6e
AC
2647 * @adev: device
2648 *
2649 * Obtain the other device on the same cable, or if none is
2650 * present NULL is returned
2651 */
2e9edbf8 2652
3373efd8 2653struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2654{
9af5c9c9
TH
2655 struct ata_link *link = adev->link;
2656 struct ata_device *pair = &link->device[1 - adev->devno];
e1211e3f 2657 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2658 return NULL;
2659 return pair;
2660}
2661
1da177e4 2662/**
780a87f7
JG
2663 * ata_port_disable - Disable port.
2664 * @ap: Port to be disabled.
1da177e4 2665 *
780a87f7
JG
2666 * Modify @ap data structure such that the system
2667 * thinks that the entire port is disabled, and should
2668 * never attempt to probe or communicate with devices
2669 * on this port.
2670 *
cca3974e 2671 * LOCKING: host lock, or some other form of
780a87f7 2672 * serialization.
1da177e4
LT
2673 */
2674
2675void ata_port_disable(struct ata_port *ap)
2676{
9af5c9c9
TH
2677 ap->link.device[0].class = ATA_DEV_NONE;
2678 ap->link.device[1].class = ATA_DEV_NONE;
198e0fed 2679 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2680}
2681
1c3fae4d 2682/**
3c567b7d 2683 * sata_down_spd_limit - adjust SATA spd limit downward
936fd732 2684 * @link: Link to adjust SATA spd limit for
1c3fae4d 2685 *
936fd732 2686 * Adjust SATA spd limit of @link downward. Note that this
1c3fae4d 2687 * function only adjusts the limit. The change must be applied
3c567b7d 2688 * using sata_set_spd().
1c3fae4d
TH
2689 *
2690 * LOCKING:
2691 * Inherited from caller.
2692 *
2693 * RETURNS:
2694 * 0 on success, negative errno on failure
2695 */
936fd732 2696int sata_down_spd_limit(struct ata_link *link)
1c3fae4d 2697{
81952c54
TH
2698 u32 sstatus, spd, mask;
2699 int rc, highbit;
1c3fae4d 2700
936fd732 2701 if (!sata_scr_valid(link))
008a7896
TH
2702 return -EOPNOTSUPP;
2703
2704 /* If SCR can be read, use it to determine the current SPD.
936fd732 2705 * If not, use cached value in link->sata_spd.
008a7896 2706 */
936fd732 2707 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
008a7896
TH
2708 if (rc == 0)
2709 spd = (sstatus >> 4) & 0xf;
2710 else
936fd732 2711 spd = link->sata_spd;
1c3fae4d 2712
936fd732 2713 mask = link->sata_spd_limit;
1c3fae4d
TH
2714 if (mask <= 1)
2715 return -EINVAL;
008a7896
TH
2716
2717 /* unconditionally mask off the highest bit */
1c3fae4d
TH
2718 highbit = fls(mask) - 1;
2719 mask &= ~(1 << highbit);
2720
008a7896
TH
2721 /* Mask off all speeds higher than or equal to the current
2722 * one. Force 1.5Gbps if current SPD is not available.
2723 */
2724 if (spd > 1)
2725 mask &= (1 << (spd - 1)) - 1;
2726 else
2727 mask &= 1;
2728
2729 /* were we already at the bottom? */
1c3fae4d
TH
2730 if (!mask)
2731 return -EINVAL;
2732
936fd732 2733 link->sata_spd_limit = mask;
1c3fae4d 2734
936fd732 2735 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
f15a1daf 2736 sata_spd_string(fls(mask)));
1c3fae4d
TH
2737
2738 return 0;
2739}
2740
936fd732 2741static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
1c3fae4d 2742{
5270222f
TH
2743 struct ata_link *host_link = &link->ap->link;
2744 u32 limit, target, spd;
1c3fae4d 2745
5270222f
TH
2746 limit = link->sata_spd_limit;
2747
2748 /* Don't configure downstream link faster than upstream link.
2749 * It doesn't speed up anything and some PMPs choke on such
2750 * configuration.
2751 */
2752 if (!ata_is_host_link(link) && host_link->sata_spd)
2753 limit &= (1 << host_link->sata_spd) - 1;
2754
2755 if (limit == UINT_MAX)
2756 target = 0;
1c3fae4d 2757 else
5270222f 2758 target = fls(limit);
1c3fae4d
TH
2759
2760 spd = (*scontrol >> 4) & 0xf;
5270222f 2761 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
1c3fae4d 2762
5270222f 2763 return spd != target;
1c3fae4d
TH
2764}
2765
2766/**
3c567b7d 2767 * sata_set_spd_needed - is SATA spd configuration needed
936fd732 2768 * @link: Link in question
1c3fae4d
TH
2769 *
2770 * Test whether the spd limit in SControl matches
936fd732 2771 * @link->sata_spd_limit. This function is used to determine
1c3fae4d
TH
2772 * whether hardreset is necessary to apply SATA spd
2773 * configuration.
2774 *
2775 * LOCKING:
2776 * Inherited from caller.
2777 *
2778 * RETURNS:
2779 * 1 if SATA spd configuration is needed, 0 otherwise.
2780 */
1dc55e87 2781static int sata_set_spd_needed(struct ata_link *link)
1c3fae4d
TH
2782{
2783 u32 scontrol;
2784
936fd732 2785 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
db64bcf3 2786 return 1;
1c3fae4d 2787
936fd732 2788 return __sata_set_spd_needed(link, &scontrol);
1c3fae4d
TH
2789}
2790
2791/**
3c567b7d 2792 * sata_set_spd - set SATA spd according to spd limit
936fd732 2793 * @link: Link to set SATA spd for
1c3fae4d 2794 *
936fd732 2795 * Set SATA spd of @link according to sata_spd_limit.
1c3fae4d
TH
2796 *
2797 * LOCKING:
2798 * Inherited from caller.
2799 *
2800 * RETURNS:
2801 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2802 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2803 */
936fd732 2804int sata_set_spd(struct ata_link *link)
1c3fae4d
TH
2805{
2806 u32 scontrol;
81952c54 2807 int rc;
1c3fae4d 2808
936fd732 2809 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 2810 return rc;
1c3fae4d 2811
936fd732 2812 if (!__sata_set_spd_needed(link, &scontrol))
1c3fae4d
TH
2813 return 0;
2814
936fd732 2815 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54
TH
2816 return rc;
2817
1c3fae4d
TH
2818 return 1;
2819}
2820
452503f9
AC
2821/*
2822 * This mode timing computation functionality is ported over from
2823 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2824 */
2825/*
b352e57d 2826 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2827 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2828 * for UDMA6, which is currently supported only by Maxtor drives.
2829 *
2830 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2831 */
2832
2833static const struct ata_timing ata_timing[] = {
70cd071e
TH
2834/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2835 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2836 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2837 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2838 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2839 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2840 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
2841 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
452503f9 2842
70cd071e
TH
2843 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2844 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2845 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
452503f9 2846
70cd071e
TH
2847 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2848 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2849 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
b352e57d 2850 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
70cd071e 2851 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
452503f9
AC
2852
2853/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
70cd071e
TH
2854 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2855 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2856 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2857 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2858 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2859 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2860 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
452503f9
AC
2861
2862 { 0xFF }
2863};
2864
2dcb407e
JG
2865#define ENOUGH(v, unit) (((v)-1)/(unit)+1)
2866#define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
452503f9
AC
2867
2868static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2869{
2870 q->setup = EZ(t->setup * 1000, T);
2871 q->act8b = EZ(t->act8b * 1000, T);
2872 q->rec8b = EZ(t->rec8b * 1000, T);
2873 q->cyc8b = EZ(t->cyc8b * 1000, T);
2874 q->active = EZ(t->active * 1000, T);
2875 q->recover = EZ(t->recover * 1000, T);
2876 q->cycle = EZ(t->cycle * 1000, T);
2877 q->udma = EZ(t->udma * 1000, UT);
2878}
2879
2880void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2881 struct ata_timing *m, unsigned int what)
2882{
2883 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2884 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2885 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2886 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2887 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2888 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2889 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2890 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2891}
2892
6357357c 2893const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
452503f9 2894{
70cd071e
TH
2895 const struct ata_timing *t = ata_timing;
2896
2897 while (xfer_mode > t->mode)
2898 t++;
452503f9 2899
70cd071e
TH
2900 if (xfer_mode == t->mode)
2901 return t;
2902 return NULL;
452503f9
AC
2903}
2904
2905int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2906 struct ata_timing *t, int T, int UT)
2907{
2908 const struct ata_timing *s;
2909 struct ata_timing p;
2910
2911 /*
2e9edbf8 2912 * Find the mode.
75b1f2f8 2913 */
452503f9
AC
2914
2915 if (!(s = ata_timing_find_mode(speed)))
2916 return -EINVAL;
2917
75b1f2f8
AL
2918 memcpy(t, s, sizeof(*s));
2919
452503f9
AC
2920 /*
2921 * If the drive is an EIDE drive, it can tell us it needs extended
2922 * PIO/MW_DMA cycle timing.
2923 */
2924
2925 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2926 memset(&p, 0, sizeof(p));
2dcb407e 2927 if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
452503f9
AC
2928 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2929 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2dcb407e 2930 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
452503f9
AC
2931 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2932 }
2933 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2934 }
2935
2936 /*
2937 * Convert the timing to bus clock counts.
2938 */
2939
75b1f2f8 2940 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2941
2942 /*
c893a3ae
RD
2943 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2944 * S.M.A.R.T * and some other commands. We have to ensure that the
2945 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2946 */
2947
fd3367af 2948 if (speed > XFER_PIO_6) {
452503f9
AC
2949 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2950 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2951 }
2952
2953 /*
c893a3ae 2954 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2955 */
2956
2957 if (t->act8b + t->rec8b < t->cyc8b) {
2958 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2959 t->rec8b = t->cyc8b - t->act8b;
2960 }
2961
2962 if (t->active + t->recover < t->cycle) {
2963 t->active += (t->cycle - (t->active + t->recover)) / 2;
2964 t->recover = t->cycle - t->active;
2965 }
a617c09f 2966
4f701d1e
AC
2967 /* In a few cases quantisation may produce enough errors to
2968 leave t->cycle too low for the sum of active and recovery
2969 if so we must correct this */
2970 if (t->active + t->recover > t->cycle)
2971 t->cycle = t->active + t->recover;
452503f9
AC
2972
2973 return 0;
2974}
2975
a0f79b92
TH
2976/**
2977 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
2978 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
2979 * @cycle: cycle duration in ns
2980 *
2981 * Return matching xfer mode for @cycle. The returned mode is of
2982 * the transfer type specified by @xfer_shift. If @cycle is too
2983 * slow for @xfer_shift, 0xff is returned. If @cycle is faster
2984 * than the fastest known mode, the fasted mode is returned.
2985 *
2986 * LOCKING:
2987 * None.
2988 *
2989 * RETURNS:
2990 * Matching xfer_mode, 0xff if no match found.
2991 */
2992u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
2993{
2994 u8 base_mode = 0xff, last_mode = 0xff;
2995 const struct ata_xfer_ent *ent;
2996 const struct ata_timing *t;
2997
2998 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
2999 if (ent->shift == xfer_shift)
3000 base_mode = ent->base;
3001
3002 for (t = ata_timing_find_mode(base_mode);
3003 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3004 unsigned short this_cycle;
3005
3006 switch (xfer_shift) {
3007 case ATA_SHIFT_PIO:
3008 case ATA_SHIFT_MWDMA:
3009 this_cycle = t->cycle;
3010 break;
3011 case ATA_SHIFT_UDMA:
3012 this_cycle = t->udma;
3013 break;
3014 default:
3015 return 0xff;
3016 }
3017
3018 if (cycle > this_cycle)
3019 break;
3020
3021 last_mode = t->mode;
3022 }
3023
3024 return last_mode;
3025}
3026
cf176e1a
TH
3027/**
3028 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 3029 * @dev: Device to adjust xfer masks
458337db 3030 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
3031 *
3032 * Adjust xfer masks of @dev downward. Note that this function
3033 * does not apply the change. Invoking ata_set_mode() afterwards
3034 * will apply the limit.
3035 *
3036 * LOCKING:
3037 * Inherited from caller.
3038 *
3039 * RETURNS:
3040 * 0 on success, negative errno on failure
3041 */
458337db 3042int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 3043{
458337db 3044 char buf[32];
7dc951ae
TH
3045 unsigned long orig_mask, xfer_mask;
3046 unsigned long pio_mask, mwdma_mask, udma_mask;
458337db 3047 int quiet, highbit;
cf176e1a 3048
458337db
TH
3049 quiet = !!(sel & ATA_DNXFER_QUIET);
3050 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 3051
458337db
TH
3052 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3053 dev->mwdma_mask,
3054 dev->udma_mask);
3055 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 3056
458337db
TH
3057 switch (sel) {
3058 case ATA_DNXFER_PIO:
3059 highbit = fls(pio_mask) - 1;
3060 pio_mask &= ~(1 << highbit);
3061 break;
3062
3063 case ATA_DNXFER_DMA:
3064 if (udma_mask) {
3065 highbit = fls(udma_mask) - 1;
3066 udma_mask &= ~(1 << highbit);
3067 if (!udma_mask)
3068 return -ENOENT;
3069 } else if (mwdma_mask) {
3070 highbit = fls(mwdma_mask) - 1;
3071 mwdma_mask &= ~(1 << highbit);
3072 if (!mwdma_mask)
3073 return -ENOENT;
3074 }
3075 break;
3076
3077 case ATA_DNXFER_40C:
3078 udma_mask &= ATA_UDMA_MASK_40C;
3079 break;
3080
3081 case ATA_DNXFER_FORCE_PIO0:
3082 pio_mask &= 1;
3083 case ATA_DNXFER_FORCE_PIO:
3084 mwdma_mask = 0;
3085 udma_mask = 0;
3086 break;
3087
458337db
TH
3088 default:
3089 BUG();
3090 }
3091
3092 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3093
3094 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3095 return -ENOENT;
3096
3097 if (!quiet) {
3098 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3099 snprintf(buf, sizeof(buf), "%s:%s",
3100 ata_mode_string(xfer_mask),
3101 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3102 else
3103 snprintf(buf, sizeof(buf), "%s",
3104 ata_mode_string(xfer_mask));
3105
3106 ata_dev_printk(dev, KERN_WARNING,
3107 "limiting speed to %s\n", buf);
3108 }
cf176e1a
TH
3109
3110 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3111 &dev->udma_mask);
3112
cf176e1a 3113 return 0;
cf176e1a
TH
3114}
3115
3373efd8 3116static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 3117{
9af5c9c9 3118 struct ata_eh_context *ehc = &dev->link->eh_context;
4055dee7
TH
3119 const char *dev_err_whine = "";
3120 int ign_dev_err = 0;
83206a29
TH
3121 unsigned int err_mask;
3122 int rc;
1da177e4 3123
e8384607 3124 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
3125 if (dev->xfer_shift == ATA_SHIFT_PIO)
3126 dev->flags |= ATA_DFLAG_PIO;
3127
3373efd8 3128 err_mask = ata_dev_set_xfermode(dev);
2dcb407e 3129
4055dee7
TH
3130 if (err_mask & ~AC_ERR_DEV)
3131 goto fail;
3132
3133 /* revalidate */
3134 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3135 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3136 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3137 if (rc)
3138 return rc;
3139
b93fda12
AC
3140 if (dev->xfer_shift == ATA_SHIFT_PIO) {
3141 /* Old CFA may refuse this command, which is just fine */
3142 if (ata_id_is_cfa(dev->id))
3143 ign_dev_err = 1;
3144 /* Catch several broken garbage emulations plus some pre
3145 ATA devices */
3146 if (ata_id_major_version(dev->id) == 0 &&
3147 dev->pio_mode <= XFER_PIO_2)
3148 ign_dev_err = 1;
3149 /* Some very old devices and some bad newer ones fail
3150 any kind of SET_XFERMODE request but support PIO0-2
3151 timings and no IORDY */
3152 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3153 ign_dev_err = 1;
3154 }
3acaf94b
AC
3155 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3156 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
c5038fc0 3157 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3acaf94b
AC
3158 dev->dma_mode == XFER_MW_DMA_0 &&
3159 (dev->id[63] >> 8) & 1)
4055dee7 3160 ign_dev_err = 1;
3acaf94b 3161
4055dee7
TH
3162 /* if the device is actually configured correctly, ignore dev err */
3163 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3164 ign_dev_err = 1;
1da177e4 3165
4055dee7
TH
3166 if (err_mask & AC_ERR_DEV) {
3167 if (!ign_dev_err)
3168 goto fail;
3169 else
3170 dev_err_whine = " (device error ignored)";
3171 }
48a8a14f 3172
23e71c3d
TH
3173 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3174 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 3175
4055dee7
TH
3176 ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n",
3177 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3178 dev_err_whine);
3179
83206a29 3180 return 0;
4055dee7
TH
3181
3182 fail:
3183 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3184 "(err_mask=0x%x)\n", err_mask);
3185 return -EIO;
1da177e4
LT
3186}
3187
1da177e4 3188/**
04351821 3189 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 3190 * @link: link on which timings will be programmed
1967b7ff 3191 * @r_failed_dev: out parameter for failed device
1da177e4 3192 *
04351821
AC
3193 * Standard implementation of the function used to tune and set
3194 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3195 * ata_dev_set_mode() fails, pointer to the failing device is
e82cbdb9 3196 * returned in @r_failed_dev.
780a87f7 3197 *
1da177e4 3198 * LOCKING:
0cba632b 3199 * PCI/etc. bus probe sem.
e82cbdb9
TH
3200 *
3201 * RETURNS:
3202 * 0 on success, negative errno otherwise
1da177e4 3203 */
04351821 3204
0260731f 3205int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
1da177e4 3206{
0260731f 3207 struct ata_port *ap = link->ap;
e8e0619f 3208 struct ata_device *dev;
f58229f8 3209 int rc = 0, used_dma = 0, found = 0;
3adcebb2 3210
a6d5a51c 3211 /* step 1: calculate xfer_mask */
f58229f8 3212 ata_link_for_each_dev(dev, link) {
7dc951ae 3213 unsigned long pio_mask, dma_mask;
b3a70601 3214 unsigned int mode_mask;
a6d5a51c 3215
e1211e3f 3216 if (!ata_dev_enabled(dev))
a6d5a51c
TH
3217 continue;
3218
b3a70601
AC
3219 mode_mask = ATA_DMA_MASK_ATA;
3220 if (dev->class == ATA_DEV_ATAPI)
3221 mode_mask = ATA_DMA_MASK_ATAPI;
3222 else if (ata_id_is_cfa(dev->id))
3223 mode_mask = ATA_DMA_MASK_CFA;
3224
3373efd8 3225 ata_dev_xfermask(dev);
33267325 3226 ata_force_xfermask(dev);
1da177e4 3227
acf356b1
TH
3228 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3229 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
b3a70601
AC
3230
3231 if (libata_dma_mask & mode_mask)
3232 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3233 else
3234 dma_mask = 0;
3235
acf356b1
TH
3236 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3237 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 3238
4f65977d 3239 found = 1;
70cd071e 3240 if (dev->dma_mode != 0xff)
5444a6f4 3241 used_dma = 1;
a6d5a51c 3242 }
4f65977d 3243 if (!found)
e82cbdb9 3244 goto out;
a6d5a51c
TH
3245
3246 /* step 2: always set host PIO timings */
f58229f8 3247 ata_link_for_each_dev(dev, link) {
e8e0619f
TH
3248 if (!ata_dev_enabled(dev))
3249 continue;
3250
70cd071e 3251 if (dev->pio_mode == 0xff) {
f15a1daf 3252 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 3253 rc = -EINVAL;
e82cbdb9 3254 goto out;
e8e0619f
TH
3255 }
3256
3257 dev->xfer_mode = dev->pio_mode;
3258 dev->xfer_shift = ATA_SHIFT_PIO;
3259 if (ap->ops->set_piomode)
3260 ap->ops->set_piomode(ap, dev);
3261 }
1da177e4 3262
a6d5a51c 3263 /* step 3: set host DMA timings */
f58229f8 3264 ata_link_for_each_dev(dev, link) {
70cd071e 3265 if (!ata_dev_enabled(dev) || dev->dma_mode == 0xff)
e8e0619f
TH
3266 continue;
3267
3268 dev->xfer_mode = dev->dma_mode;
3269 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3270 if (ap->ops->set_dmamode)
3271 ap->ops->set_dmamode(ap, dev);
3272 }
1da177e4
LT
3273
3274 /* step 4: update devices' xfer mode */
f58229f8 3275 ata_link_for_each_dev(dev, link) {
18d90deb 3276 /* don't update suspended devices' xfer mode */
9666f400 3277 if (!ata_dev_enabled(dev))
83206a29
TH
3278 continue;
3279
3373efd8 3280 rc = ata_dev_set_mode(dev);
5bbc53f4 3281 if (rc)
e82cbdb9 3282 goto out;
83206a29 3283 }
1da177e4 3284
e8e0619f
TH
3285 /* Record simplex status. If we selected DMA then the other
3286 * host channels are not permitted to do so.
5444a6f4 3287 */
cca3974e 3288 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 3289 ap->host->simplex_claimed = ap;
5444a6f4 3290
e82cbdb9
TH
3291 out:
3292 if (rc)
3293 *r_failed_dev = dev;
3294 return rc;
1da177e4
LT
3295}
3296
aa2731ad
TH
3297/**
3298 * ata_wait_ready - wait for link to become ready
3299 * @link: link to be waited on
3300 * @deadline: deadline jiffies for the operation
3301 * @check_ready: callback to check link readiness
3302 *
3303 * Wait for @link to become ready. @check_ready should return
3304 * positive number if @link is ready, 0 if it isn't, -ENODEV if
3305 * link doesn't seem to be occupied, other errno for other error
3306 * conditions.
3307 *
3308 * Transient -ENODEV conditions are allowed for
3309 * ATA_TMOUT_FF_WAIT.
3310 *
3311 * LOCKING:
3312 * EH context.
3313 *
3314 * RETURNS:
3315 * 0 if @linke is ready before @deadline; otherwise, -errno.
3316 */
3317int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3318 int (*check_ready)(struct ata_link *link))
3319{
3320 unsigned long start = jiffies;
341c2c95 3321 unsigned long nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
aa2731ad
TH
3322 int warned = 0;
3323
3324 if (time_after(nodev_deadline, deadline))
3325 nodev_deadline = deadline;
3326
3327 while (1) {
3328 unsigned long now = jiffies;
3329 int ready, tmp;
3330
3331 ready = tmp = check_ready(link);
3332 if (ready > 0)
3333 return 0;
3334
3335 /* -ENODEV could be transient. Ignore -ENODEV if link
3336 * is online. Also, some SATA devices take a long
3337 * time to clear 0xff after reset. For example,
3338 * HHD424020F7SV00 iVDR needs >= 800ms while Quantum
3339 * GoVault needs even more than that. Wait for
3340 * ATA_TMOUT_FF_WAIT on -ENODEV if link isn't offline.
3341 *
3342 * Note that some PATA controllers (pata_ali) explode
3343 * if status register is read more than once when
3344 * there's no device attached.
3345 */
3346 if (ready == -ENODEV) {
3347 if (ata_link_online(link))
3348 ready = 0;
3349 else if ((link->ap->flags & ATA_FLAG_SATA) &&
3350 !ata_link_offline(link) &&
3351 time_before(now, nodev_deadline))
3352 ready = 0;
3353 }
3354
3355 if (ready)
3356 return ready;
3357 if (time_after(now, deadline))
3358 return -EBUSY;
3359
3360 if (!warned && time_after(now, start + 5 * HZ) &&
3361 (deadline - now > 3 * HZ)) {
3362 ata_link_printk(link, KERN_WARNING,
3363 "link is slow to respond, please be patient "
3364 "(ready=%d)\n", tmp);
3365 warned = 1;
3366 }
3367
3368 msleep(50);
3369 }
3370}
3371
3372/**
3373 * ata_wait_after_reset - wait for link to become ready after reset
3374 * @link: link to be waited on
3375 * @deadline: deadline jiffies for the operation
3376 * @check_ready: callback to check link readiness
3377 *
3378 * Wait for @link to become ready after reset.
3379 *
3380 * LOCKING:
3381 * EH context.
3382 *
3383 * RETURNS:
3384 * 0 if @linke is ready before @deadline; otherwise, -errno.
3385 */
2b4221bb 3386int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
aa2731ad
TH
3387 int (*check_ready)(struct ata_link *link))
3388{
341c2c95 3389 msleep(ATA_WAIT_AFTER_RESET);
aa2731ad
TH
3390
3391 return ata_wait_ready(link, deadline, check_ready);
3392}
3393
d7bb4cc7 3394/**
936fd732
TH
3395 * sata_link_debounce - debounce SATA phy status
3396 * @link: ATA link to debounce SATA phy status for
d7bb4cc7 3397 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3398 * @deadline: deadline jiffies for the operation
d7bb4cc7 3399 *
936fd732 3400* Make sure SStatus of @link reaches stable state, determined by
d7bb4cc7
TH
3401 * holding the same value where DET is not 1 for @duration polled
3402 * every @interval, before @timeout. Timeout constraints the
d4b2bab4
TH
3403 * beginning of the stable state. Because DET gets stuck at 1 on
3404 * some controllers after hot unplugging, this functions waits
d7bb4cc7
TH
3405 * until timeout then returns 0 if DET is stable at 1.
3406 *
d4b2bab4
TH
3407 * @timeout is further limited by @deadline. The sooner of the
3408 * two is used.
3409 *
d7bb4cc7
TH
3410 * LOCKING:
3411 * Kernel thread context (may sleep)
3412 *
3413 * RETURNS:
3414 * 0 on success, -errno on failure.
3415 */
936fd732
TH
3416int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3417 unsigned long deadline)
7a7921e8 3418{
341c2c95
TH
3419 unsigned long interval = params[0];
3420 unsigned long duration = params[1];
d4b2bab4 3421 unsigned long last_jiffies, t;
d7bb4cc7
TH
3422 u32 last, cur;
3423 int rc;
3424
341c2c95 3425 t = ata_deadline(jiffies, params[2]);
d4b2bab4
TH
3426 if (time_before(t, deadline))
3427 deadline = t;
3428
936fd732 3429 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3430 return rc;
3431 cur &= 0xf;
3432
3433 last = cur;
3434 last_jiffies = jiffies;
3435
3436 while (1) {
341c2c95 3437 msleep(interval);
936fd732 3438 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3439 return rc;
3440 cur &= 0xf;
3441
3442 /* DET stable? */
3443 if (cur == last) {
d4b2bab4 3444 if (cur == 1 && time_before(jiffies, deadline))
d7bb4cc7 3445 continue;
341c2c95
TH
3446 if (time_after(jiffies,
3447 ata_deadline(last_jiffies, duration)))
d7bb4cc7
TH
3448 return 0;
3449 continue;
3450 }
3451
3452 /* unstable, start over */
3453 last = cur;
3454 last_jiffies = jiffies;
3455
f1545154
TH
3456 /* Check deadline. If debouncing failed, return
3457 * -EPIPE to tell upper layer to lower link speed.
3458 */
d4b2bab4 3459 if (time_after(jiffies, deadline))
f1545154 3460 return -EPIPE;
d7bb4cc7
TH
3461 }
3462}
3463
3464/**
936fd732
TH
3465 * sata_link_resume - resume SATA link
3466 * @link: ATA link to resume SATA
d7bb4cc7 3467 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3468 * @deadline: deadline jiffies for the operation
d7bb4cc7 3469 *
936fd732 3470 * Resume SATA phy @link and debounce it.
d7bb4cc7
TH
3471 *
3472 * LOCKING:
3473 * Kernel thread context (may sleep)
3474 *
3475 * RETURNS:
3476 * 0 on success, -errno on failure.
3477 */
936fd732
TH
3478int sata_link_resume(struct ata_link *link, const unsigned long *params,
3479 unsigned long deadline)
d7bb4cc7 3480{
ac371987 3481 u32 scontrol, serror;
81952c54
TH
3482 int rc;
3483
936fd732 3484 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 3485 return rc;
7a7921e8 3486
852ee16a 3487 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54 3488
936fd732 3489 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54 3490 return rc;
7a7921e8 3491
d7bb4cc7
TH
3492 /* Some PHYs react badly if SStatus is pounded immediately
3493 * after resuming. Delay 200ms before debouncing.
3494 */
3495 msleep(200);
7a7921e8 3496
ac371987
TH
3497 if ((rc = sata_link_debounce(link, params, deadline)))
3498 return rc;
3499
f046519f 3500 /* clear SError, some PHYs require this even for SRST to work */
ac371987
TH
3501 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3502 rc = sata_scr_write(link, SCR_ERROR, serror);
ac371987 3503
f046519f 3504 return rc != -EINVAL ? rc : 0;
7a7921e8
TH
3505}
3506
f5914a46 3507/**
0aa1113d 3508 * ata_std_prereset - prepare for reset
cc0680a5 3509 * @link: ATA link to be reset
d4b2bab4 3510 * @deadline: deadline jiffies for the operation
f5914a46 3511 *
cc0680a5 3512 * @link is about to be reset. Initialize it. Failure from
b8cffc6a
TH
3513 * prereset makes libata abort whole reset sequence and give up
3514 * that port, so prereset should be best-effort. It does its
3515 * best to prepare for reset sequence but if things go wrong, it
3516 * should just whine, not fail.
f5914a46
TH
3517 *
3518 * LOCKING:
3519 * Kernel thread context (may sleep)
3520 *
3521 * RETURNS:
3522 * 0 on success, -errno otherwise.
3523 */
0aa1113d 3524int ata_std_prereset(struct ata_link *link, unsigned long deadline)
f5914a46 3525{
cc0680a5 3526 struct ata_port *ap = link->ap;
936fd732 3527 struct ata_eh_context *ehc = &link->eh_context;
e9c83914 3528 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
3529 int rc;
3530
f5914a46
TH
3531 /* if we're about to do hardreset, nothing more to do */
3532 if (ehc->i.action & ATA_EH_HARDRESET)
3533 return 0;
3534
936fd732 3535 /* if SATA, resume link */
a16abc0b 3536 if (ap->flags & ATA_FLAG_SATA) {
936fd732 3537 rc = sata_link_resume(link, timing, deadline);
b8cffc6a
TH
3538 /* whine about phy resume failure but proceed */
3539 if (rc && rc != -EOPNOTSUPP)
cc0680a5 3540 ata_link_printk(link, KERN_WARNING, "failed to resume "
f5914a46 3541 "link for reset (errno=%d)\n", rc);
f5914a46
TH
3542 }
3543
45db2f6c
TH
3544 /* no point in trying softreset on offline link */
3545 if (ata_link_offline(link))
3546 ehc->i.action &= ~ATA_EH_SOFTRESET;
3547
f5914a46
TH
3548 return 0;
3549}
3550
c2bd5804 3551/**
624d5c51
TH
3552 * sata_link_hardreset - reset link via SATA phy reset
3553 * @link: link to reset
3554 * @timing: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3555 * @deadline: deadline jiffies for the operation
9dadd45b
TH
3556 * @online: optional out parameter indicating link onlineness
3557 * @check_ready: optional callback to check link readiness
c2bd5804 3558 *
624d5c51 3559 * SATA phy-reset @link using DET bits of SControl register.
9dadd45b
TH
3560 * After hardreset, link readiness is waited upon using
3561 * ata_wait_ready() if @check_ready is specified. LLDs are
3562 * allowed to not specify @check_ready and wait itself after this
3563 * function returns. Device classification is LLD's
3564 * responsibility.
3565 *
3566 * *@online is set to one iff reset succeeded and @link is online
3567 * after reset.
c2bd5804
TH
3568 *
3569 * LOCKING:
3570 * Kernel thread context (may sleep)
3571 *
3572 * RETURNS:
3573 * 0 on success, -errno otherwise.
3574 */
624d5c51 3575int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
9dadd45b
TH
3576 unsigned long deadline,
3577 bool *online, int (*check_ready)(struct ata_link *))
c2bd5804 3578{
624d5c51 3579 u32 scontrol;
81952c54 3580 int rc;
852ee16a 3581
c2bd5804
TH
3582 DPRINTK("ENTER\n");
3583
9dadd45b
TH
3584 if (online)
3585 *online = false;
3586
936fd732 3587 if (sata_set_spd_needed(link)) {
1c3fae4d
TH
3588 /* SATA spec says nothing about how to reconfigure
3589 * spd. To be on the safe side, turn off phy during
3590 * reconfiguration. This works for at least ICH7 AHCI
3591 * and Sil3124.
3592 */
936fd732 3593 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3594 goto out;
81952c54 3595
a34b6fc0 3596 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54 3597
936fd732 3598 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
b6103f6d 3599 goto out;
1c3fae4d 3600
936fd732 3601 sata_set_spd(link);
1c3fae4d
TH
3602 }
3603
3604 /* issue phy wake/reset */
936fd732 3605 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3606 goto out;
81952c54 3607
852ee16a 3608 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54 3609
936fd732 3610 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
b6103f6d 3611 goto out;
c2bd5804 3612
1c3fae4d 3613 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
3614 * 10.4.2 says at least 1 ms.
3615 */
3616 msleep(1);
3617
936fd732
TH
3618 /* bring link back */
3619 rc = sata_link_resume(link, timing, deadline);
9dadd45b
TH
3620 if (rc)
3621 goto out;
3622 /* if link is offline nothing more to do */
3623 if (ata_link_offline(link))
3624 goto out;
3625
3626 /* Link is online. From this point, -ENODEV too is an error. */
3627 if (online)
3628 *online = true;
3629
071f44b1 3630 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
9dadd45b
TH
3631 /* If PMP is supported, we have to do follow-up SRST.
3632 * Some PMPs don't send D2H Reg FIS after hardreset if
3633 * the first port is empty. Wait only for
3634 * ATA_TMOUT_PMP_SRST_WAIT.
3635 */
3636 if (check_ready) {
3637 unsigned long pmp_deadline;
3638
341c2c95
TH
3639 pmp_deadline = ata_deadline(jiffies,
3640 ATA_TMOUT_PMP_SRST_WAIT);
9dadd45b
TH
3641 if (time_after(pmp_deadline, deadline))
3642 pmp_deadline = deadline;
3643 ata_wait_ready(link, pmp_deadline, check_ready);
3644 }
3645 rc = -EAGAIN;
3646 goto out;
3647 }
3648
3649 rc = 0;
3650 if (check_ready)
3651 rc = ata_wait_ready(link, deadline, check_ready);
b6103f6d 3652 out:
0cbf0711
TH
3653 if (rc && rc != -EAGAIN) {
3654 /* online is set iff link is online && reset succeeded */
3655 if (online)
3656 *online = false;
9dadd45b
TH
3657 ata_link_printk(link, KERN_ERR,
3658 "COMRESET failed (errno=%d)\n", rc);
0cbf0711 3659 }
b6103f6d
TH
3660 DPRINTK("EXIT, rc=%d\n", rc);
3661 return rc;
3662}
3663
57c9efdf
TH
3664/**
3665 * sata_std_hardreset - COMRESET w/o waiting or classification
3666 * @link: link to reset
3667 * @class: resulting class of attached device
3668 * @deadline: deadline jiffies for the operation
3669 *
3670 * Standard SATA COMRESET w/o waiting or classification.
3671 *
3672 * LOCKING:
3673 * Kernel thread context (may sleep)
3674 *
3675 * RETURNS:
3676 * 0 if link offline, -EAGAIN if link online, -errno on errors.
3677 */
3678int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3679 unsigned long deadline)
3680{
3681 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3682 bool online;
3683 int rc;
3684
3685 /* do hardreset */
3686 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
57c9efdf
TH
3687 return online ? -EAGAIN : rc;
3688}
3689
c2bd5804 3690/**
203c75b8 3691 * ata_std_postreset - standard postreset callback
cc0680a5 3692 * @link: the target ata_link
c2bd5804
TH
3693 * @classes: classes of attached devices
3694 *
3695 * This function is invoked after a successful reset. Note that
3696 * the device might have been reset more than once using
3697 * different reset methods before postreset is invoked.
c2bd5804 3698 *
c2bd5804
TH
3699 * LOCKING:
3700 * Kernel thread context (may sleep)
3701 */
203c75b8 3702void ata_std_postreset(struct ata_link *link, unsigned int *classes)
c2bd5804 3703{
f046519f
TH
3704 u32 serror;
3705
c2bd5804
TH
3706 DPRINTK("ENTER\n");
3707
f046519f
TH
3708 /* reset complete, clear SError */
3709 if (!sata_scr_read(link, SCR_ERROR, &serror))
3710 sata_scr_write(link, SCR_ERROR, serror);
3711
c2bd5804 3712 /* print link status */
936fd732 3713 sata_print_link_status(link);
c2bd5804 3714
c2bd5804
TH
3715 DPRINTK("EXIT\n");
3716}
3717
623a3128
TH
3718/**
3719 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3720 * @dev: device to compare against
3721 * @new_class: class of the new device
3722 * @new_id: IDENTIFY page of the new device
3723 *
3724 * Compare @new_class and @new_id against @dev and determine
3725 * whether @dev is the device indicated by @new_class and
3726 * @new_id.
3727 *
3728 * LOCKING:
3729 * None.
3730 *
3731 * RETURNS:
3732 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3733 */
3373efd8
TH
3734static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3735 const u16 *new_id)
623a3128
TH
3736{
3737 const u16 *old_id = dev->id;
a0cf733b
TH
3738 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3739 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
3740
3741 if (dev->class != new_class) {
f15a1daf
TH
3742 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3743 dev->class, new_class);
623a3128
TH
3744 return 0;
3745 }
3746
a0cf733b
TH
3747 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3748 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3749 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3750 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
3751
3752 if (strcmp(model[0], model[1])) {
f15a1daf
TH
3753 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3754 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
3755 return 0;
3756 }
3757
3758 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
3759 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3760 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
3761 return 0;
3762 }
3763
623a3128
TH
3764 return 1;
3765}
3766
3767/**
fe30911b 3768 * ata_dev_reread_id - Re-read IDENTIFY data
3fae450c 3769 * @dev: target ATA device
bff04647 3770 * @readid_flags: read ID flags
623a3128
TH
3771 *
3772 * Re-read IDENTIFY page and make sure @dev is still attached to
3773 * the port.
3774 *
3775 * LOCKING:
3776 * Kernel thread context (may sleep)
3777 *
3778 * RETURNS:
3779 * 0 on success, negative errno otherwise
3780 */
fe30911b 3781int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
623a3128 3782{
5eb45c02 3783 unsigned int class = dev->class;
9af5c9c9 3784 u16 *id = (void *)dev->link->ap->sector_buf;
623a3128
TH
3785 int rc;
3786
fe635c7e 3787 /* read ID data */
bff04647 3788 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128 3789 if (rc)
fe30911b 3790 return rc;
623a3128
TH
3791
3792 /* is the device still there? */
fe30911b
TH
3793 if (!ata_dev_same_device(dev, class, id))
3794 return -ENODEV;
623a3128 3795
fe635c7e 3796 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
fe30911b
TH
3797 return 0;
3798}
3799
3800/**
3801 * ata_dev_revalidate - Revalidate ATA device
3802 * @dev: device to revalidate
422c9daa 3803 * @new_class: new class code
fe30911b
TH
3804 * @readid_flags: read ID flags
3805 *
3806 * Re-read IDENTIFY page, make sure @dev is still attached to the
3807 * port and reconfigure it according to the new IDENTIFY page.
3808 *
3809 * LOCKING:
3810 * Kernel thread context (may sleep)
3811 *
3812 * RETURNS:
3813 * 0 on success, negative errno otherwise
3814 */
422c9daa
TH
3815int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3816 unsigned int readid_flags)
fe30911b 3817{
6ddcd3b0 3818 u64 n_sectors = dev->n_sectors;
fe30911b
TH
3819 int rc;
3820
3821 if (!ata_dev_enabled(dev))
3822 return -ENODEV;
3823
422c9daa
TH
3824 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3825 if (ata_class_enabled(new_class) &&
3826 new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
3827 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
3828 dev->class, new_class);
3829 rc = -ENODEV;
3830 goto fail;
3831 }
3832
fe30911b
TH
3833 /* re-read ID */
3834 rc = ata_dev_reread_id(dev, readid_flags);
3835 if (rc)
3836 goto fail;
623a3128
TH
3837
3838 /* configure device according to the new ID */
efdaedc4 3839 rc = ata_dev_configure(dev);
6ddcd3b0
TH
3840 if (rc)
3841 goto fail;
3842
3843 /* verify n_sectors hasn't changed */
b54eebd6
TH
3844 if (dev->class == ATA_DEV_ATA && n_sectors &&
3845 dev->n_sectors != n_sectors) {
6ddcd3b0
TH
3846 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3847 "%llu != %llu\n",
3848 (unsigned long long)n_sectors,
3849 (unsigned long long)dev->n_sectors);
8270bec4
TH
3850
3851 /* restore original n_sectors */
3852 dev->n_sectors = n_sectors;
3853
6ddcd3b0
TH
3854 rc = -ENODEV;
3855 goto fail;
3856 }
3857
3858 return 0;
623a3128
TH
3859
3860 fail:
f15a1daf 3861 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
3862 return rc;
3863}
3864
6919a0a6
AC
3865struct ata_blacklist_entry {
3866 const char *model_num;
3867 const char *model_rev;
3868 unsigned long horkage;
3869};
3870
3871static const struct ata_blacklist_entry ata_device_blacklist [] = {
3872 /* Devices with DMA related problems under Linux */
3873 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3874 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3875 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3876 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3877 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3878 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3879 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3880 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3881 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3882 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3883 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3884 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3885 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3886 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3887 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3888 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3889 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3890 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3891 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3892 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3893 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3894 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3895 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3896 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3897 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3898 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
3899 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3900 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
2dcb407e 3901 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
39f19886 3902 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
3af9a77a 3903 /* Odd clown on sil3726/4726 PMPs */
50af2fa1 3904 { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
6919a0a6 3905
18d6e9d5 3906 /* Weird ATAPI devices */
40a1d531 3907 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
18d6e9d5 3908
6919a0a6
AC
3909 /* Devices we expect to fail diagnostics */
3910
3911 /* Devices where NCQ should be avoided */
3912 /* NCQ is slow */
2dcb407e 3913 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
459ad688 3914 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
09125ea6
TH
3915 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3916 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30 3917 /* NCQ is broken */
539cc7c7 3918 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
0e3dbc01 3919 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
da6f0ec2 3920 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
e41bd3e8 3921 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
539cc7c7 3922
36e337d0
RH
3923 /* Blacklist entries taken from Silicon Image 3124/3132
3924 Windows driver .inf file - also several Linux problem reports */
3925 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
3926 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
3927 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
6919a0a6 3928
16c55b03
TH
3929 /* devices which puke on READ_NATIVE_MAX */
3930 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
3931 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
3932 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
3933 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
6919a0a6 3934
93328e11
AC
3935 /* Devices which report 1 sector over size HPA */
3936 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
3937 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
b152fcd3 3938 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
93328e11 3939
6bbfd53d
AC
3940 /* Devices which get the IVB wrong */
3941 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
a79067e5
AC
3942 /* Maybe we should just blacklist TSSTcorp... */
3943 { "TSSTcorp CDDVDW SH-S202H", "SB00", ATA_HORKAGE_IVB, },
3944 { "TSSTcorp CDDVDW SH-S202H", "SB01", ATA_HORKAGE_IVB, },
6bbfd53d 3945 { "TSSTcorp CDDVDW SH-S202J", "SB00", ATA_HORKAGE_IVB, },
e9f33406
PM
3946 { "TSSTcorp CDDVDW SH-S202J", "SB01", ATA_HORKAGE_IVB, },
3947 { "TSSTcorp CDDVDW SH-S202N", "SB00", ATA_HORKAGE_IVB, },
3948 { "TSSTcorp CDDVDW SH-S202N", "SB01", ATA_HORKAGE_IVB, },
6bbfd53d 3949
6919a0a6
AC
3950 /* End Marker */
3951 { }
1da177e4 3952};
2e9edbf8 3953
741b7763 3954static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
539cc7c7
JG
3955{
3956 const char *p;
3957 int len;
3958
3959 /*
3960 * check for trailing wildcard: *\0
3961 */
3962 p = strchr(patt, wildchar);
3963 if (p && ((*(p + 1)) == 0))
3964 len = p - patt;
317b50b8 3965 else {
539cc7c7 3966 len = strlen(name);
317b50b8
AP
3967 if (!len) {
3968 if (!*patt)
3969 return 0;
3970 return -1;
3971 }
3972 }
539cc7c7
JG
3973
3974 return strncmp(patt, name, len);
3975}
3976
75683fe7 3977static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
1da177e4 3978{
8bfa79fc
TH
3979 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3980 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 3981 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 3982
8bfa79fc
TH
3983 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3984 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 3985
6919a0a6 3986 while (ad->model_num) {
539cc7c7 3987 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
6919a0a6
AC
3988 if (ad->model_rev == NULL)
3989 return ad->horkage;
539cc7c7 3990 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
6919a0a6 3991 return ad->horkage;
f4b15fef 3992 }
6919a0a6 3993 ad++;
f4b15fef 3994 }
1da177e4
LT
3995 return 0;
3996}
3997
6919a0a6
AC
3998static int ata_dma_blacklisted(const struct ata_device *dev)
3999{
4000 /* We don't support polling DMA.
4001 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4002 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4003 */
9af5c9c9 4004 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
6919a0a6
AC
4005 (dev->flags & ATA_DFLAG_CDB_INTR))
4006 return 1;
75683fe7 4007 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
6919a0a6
AC
4008}
4009
6bbfd53d
AC
4010/**
4011 * ata_is_40wire - check drive side detection
4012 * @dev: device
4013 *
4014 * Perform drive side detection decoding, allowing for device vendors
4015 * who can't follow the documentation.
4016 */
4017
4018static int ata_is_40wire(struct ata_device *dev)
4019{
4020 if (dev->horkage & ATA_HORKAGE_IVB)
4021 return ata_drive_40wire_relaxed(dev->id);
4022 return ata_drive_40wire(dev->id);
4023}
4024
15a5551c
AC
4025/**
4026 * cable_is_40wire - 40/80/SATA decider
4027 * @ap: port to consider
4028 *
4029 * This function encapsulates the policy for speed management
4030 * in one place. At the moment we don't cache the result but
4031 * there is a good case for setting ap->cbl to the result when
4032 * we are called with unknown cables (and figuring out if it
4033 * impacts hotplug at all).
4034 *
4035 * Return 1 if the cable appears to be 40 wire.
4036 */
4037
4038static int cable_is_40wire(struct ata_port *ap)
4039{
4040 struct ata_link *link;
4041 struct ata_device *dev;
4042
4043 /* If the controller thinks we are 40 wire, we are */
4044 if (ap->cbl == ATA_CBL_PATA40)
4045 return 1;
4046 /* If the controller thinks we are 80 wire, we are */
4047 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4048 return 0;
f792068e
AC
4049 /* If the system is known to be 40 wire short cable (eg laptop),
4050 then we allow 80 wire modes even if the drive isn't sure */
4051 if (ap->cbl == ATA_CBL_PATA40_SHORT)
4052 return 0;
15a5551c
AC
4053 /* If the controller doesn't know we scan
4054
4055 - Note: We look for all 40 wire detects at this point.
4056 Any 80 wire detect is taken to be 80 wire cable
4057 because
4058 - In many setups only the one drive (slave if present)
4059 will give a valid detect
4060 - If you have a non detect capable drive you don't
4061 want it to colour the choice
4062 */
4063 ata_port_for_each_link(link, ap) {
4064 ata_link_for_each_dev(dev, link) {
4065 if (!ata_is_40wire(dev))
4066 return 0;
4067 }
4068 }
4069 return 1;
4070}
4071
a6d5a51c
TH
4072/**
4073 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
4074 * @dev: Device to compute xfermask for
4075 *
acf356b1
TH
4076 * Compute supported xfermask of @dev and store it in
4077 * dev->*_mask. This function is responsible for applying all
4078 * known limits including host controller limits, device
4079 * blacklist, etc...
a6d5a51c
TH
4080 *
4081 * LOCKING:
4082 * None.
a6d5a51c 4083 */
3373efd8 4084static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 4085{
9af5c9c9
TH
4086 struct ata_link *link = dev->link;
4087 struct ata_port *ap = link->ap;
cca3974e 4088 struct ata_host *host = ap->host;
a6d5a51c 4089 unsigned long xfer_mask;
1da177e4 4090
37deecb5 4091 /* controller modes available */
565083e1
TH
4092 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4093 ap->mwdma_mask, ap->udma_mask);
4094
8343f889 4095 /* drive modes available */
37deecb5
TH
4096 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4097 dev->mwdma_mask, dev->udma_mask);
4098 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 4099
b352e57d
AC
4100 /*
4101 * CFA Advanced TrueIDE timings are not allowed on a shared
4102 * cable
4103 */
4104 if (ata_dev_pair(dev)) {
4105 /* No PIO5 or PIO6 */
4106 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4107 /* No MWDMA3 or MWDMA 4 */
4108 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4109 }
4110
37deecb5
TH
4111 if (ata_dma_blacklisted(dev)) {
4112 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
4113 ata_dev_printk(dev, KERN_WARNING,
4114 "device is on DMA blacklist, disabling DMA\n");
37deecb5 4115 }
a6d5a51c 4116
14d66ab7 4117 if ((host->flags & ATA_HOST_SIMPLEX) &&
2dcb407e 4118 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5
TH
4119 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4120 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4121 "other device, disabling DMA\n");
5444a6f4 4122 }
565083e1 4123
e424675f
JG
4124 if (ap->flags & ATA_FLAG_NO_IORDY)
4125 xfer_mask &= ata_pio_mask_no_iordy(dev);
4126
5444a6f4 4127 if (ap->ops->mode_filter)
a76b62ca 4128 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
5444a6f4 4129
8343f889
RH
4130 /* Apply cable rule here. Don't apply it early because when
4131 * we handle hot plug the cable type can itself change.
4132 * Check this last so that we know if the transfer rate was
4133 * solely limited by the cable.
4134 * Unknown or 80 wire cables reported host side are checked
4135 * drive side as well. Cases where we know a 40wire cable
4136 * is used safely for 80 are not checked here.
4137 */
4138 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4139 /* UDMA/44 or higher would be available */
15a5551c 4140 if (cable_is_40wire(ap)) {
2dcb407e 4141 ata_dev_printk(dev, KERN_WARNING,
8343f889
RH
4142 "limited to UDMA/33 due to 40-wire cable\n");
4143 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4144 }
4145
565083e1
TH
4146 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4147 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
4148}
4149
1da177e4
LT
4150/**
4151 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
4152 * @dev: Device to which command will be sent
4153 *
780a87f7
JG
4154 * Issue SET FEATURES - XFER MODE command to device @dev
4155 * on port @ap.
4156 *
1da177e4 4157 * LOCKING:
0cba632b 4158 * PCI/etc. bus probe sem.
83206a29
TH
4159 *
4160 * RETURNS:
4161 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
4162 */
4163
3373efd8 4164static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 4165{
a0123703 4166 struct ata_taskfile tf;
83206a29 4167 unsigned int err_mask;
1da177e4
LT
4168
4169 /* set up set-features taskfile */
4170 DPRINTK("set features - xfer mode\n");
4171
464cf177
TH
4172 /* Some controllers and ATAPI devices show flaky interrupt
4173 * behavior after setting xfer mode. Use polling instead.
4174 */
3373efd8 4175 ata_tf_init(dev, &tf);
a0123703
TH
4176 tf.command = ATA_CMD_SET_FEATURES;
4177 tf.feature = SETFEATURES_XFER;
464cf177 4178 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
a0123703 4179 tf.protocol = ATA_PROT_NODATA;
b9f8ab2d 4180 /* If we are using IORDY we must send the mode setting command */
11b7becc
JG
4181 if (ata_pio_need_iordy(dev))
4182 tf.nsect = dev->xfer_mode;
b9f8ab2d
AC
4183 /* If the device has IORDY and the controller does not - turn it off */
4184 else if (ata_id_has_iordy(dev->id))
11b7becc 4185 tf.nsect = 0x01;
b9f8ab2d
AC
4186 else /* In the ancient relic department - skip all of this */
4187 return 0;
1da177e4 4188
2b789108 4189 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
9f45cbd3
KCA
4190
4191 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4192 return err_mask;
4193}
9f45cbd3 4194/**
218f3d30 4195 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
9f45cbd3
KCA
4196 * @dev: Device to which command will be sent
4197 * @enable: Whether to enable or disable the feature
218f3d30 4198 * @feature: The sector count represents the feature to set
9f45cbd3
KCA
4199 *
4200 * Issue SET FEATURES - SATA FEATURES command to device @dev
218f3d30 4201 * on port @ap with sector count
9f45cbd3
KCA
4202 *
4203 * LOCKING:
4204 * PCI/etc. bus probe sem.
4205 *
4206 * RETURNS:
4207 * 0 on success, AC_ERR_* mask otherwise.
4208 */
218f3d30
JG
4209static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4210 u8 feature)
9f45cbd3
KCA
4211{
4212 struct ata_taskfile tf;
4213 unsigned int err_mask;
4214
4215 /* set up set-features taskfile */
4216 DPRINTK("set features - SATA features\n");
4217
4218 ata_tf_init(dev, &tf);
4219 tf.command = ATA_CMD_SET_FEATURES;
4220 tf.feature = enable;
4221 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4222 tf.protocol = ATA_PROT_NODATA;
218f3d30 4223 tf.nsect = feature;
9f45cbd3 4224
2b789108 4225 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1da177e4 4226
83206a29
TH
4227 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4228 return err_mask;
1da177e4
LT
4229}
4230
8bf62ece
AL
4231/**
4232 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 4233 * @dev: Device to which command will be sent
e2a7f77a
RD
4234 * @heads: Number of heads (taskfile parameter)
4235 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
4236 *
4237 * LOCKING:
6aff8f1f
TH
4238 * Kernel thread context (may sleep)
4239 *
4240 * RETURNS:
4241 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 4242 */
3373efd8
TH
4243static unsigned int ata_dev_init_params(struct ata_device *dev,
4244 u16 heads, u16 sectors)
8bf62ece 4245{
a0123703 4246 struct ata_taskfile tf;
6aff8f1f 4247 unsigned int err_mask;
8bf62ece
AL
4248
4249 /* Number of sectors per track 1-255. Number of heads 1-16 */
4250 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 4251 return AC_ERR_INVALID;
8bf62ece
AL
4252
4253 /* set up init dev params taskfile */
4254 DPRINTK("init dev params \n");
4255
3373efd8 4256 ata_tf_init(dev, &tf);
a0123703
TH
4257 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4258 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4259 tf.protocol = ATA_PROT_NODATA;
4260 tf.nsect = sectors;
4261 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 4262
2b789108 4263 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
18b2466c
AC
4264 /* A clean abort indicates an original or just out of spec drive
4265 and we should continue as we issue the setup based on the
4266 drive reported working geometry */
4267 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4268 err_mask = 0;
8bf62ece 4269
6aff8f1f
TH
4270 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4271 return err_mask;
8bf62ece
AL
4272}
4273
1da177e4 4274/**
0cba632b
JG
4275 * ata_sg_clean - Unmap DMA memory associated with command
4276 * @qc: Command containing DMA memory to be released
4277 *
4278 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
4279 *
4280 * LOCKING:
cca3974e 4281 * spin_lock_irqsave(host lock)
1da177e4 4282 */
70e6ad0c 4283void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
4284{
4285 struct ata_port *ap = qc->ap;
ff2aeb1e 4286 struct scatterlist *sg = qc->sg;
1da177e4
LT
4287 int dir = qc->dma_dir;
4288
a4631474 4289 WARN_ON(sg == NULL);
1da177e4 4290
dde20207 4291 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 4292
dde20207
JB
4293 if (qc->n_elem)
4294 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
1da177e4
LT
4295
4296 qc->flags &= ~ATA_QCFLAG_DMAMAP;
ff2aeb1e 4297 qc->sg = NULL;
1da177e4
LT
4298}
4299
1da177e4 4300/**
5895ef9a 4301 * atapi_check_dma - Check whether ATAPI DMA can be supported
1da177e4
LT
4302 * @qc: Metadata associated with taskfile to check
4303 *
780a87f7
JG
4304 * Allow low-level driver to filter ATA PACKET commands, returning
4305 * a status indicating whether or not it is OK to use DMA for the
4306 * supplied PACKET command.
4307 *
1da177e4 4308 * LOCKING:
624d5c51
TH
4309 * spin_lock_irqsave(host lock)
4310 *
4311 * RETURNS: 0 when ATAPI DMA can be used
4312 * nonzero otherwise
4313 */
5895ef9a 4314int atapi_check_dma(struct ata_queued_cmd *qc)
624d5c51
TH
4315{
4316 struct ata_port *ap = qc->ap;
71601958 4317
624d5c51
TH
4318 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4319 * few ATAPI devices choke on such DMA requests.
4320 */
4321 if (unlikely(qc->nbytes & 15))
4322 return 1;
e2cec771 4323
624d5c51
TH
4324 if (ap->ops->check_atapi_dma)
4325 return ap->ops->check_atapi_dma(qc);
e2cec771 4326
624d5c51
TH
4327 return 0;
4328}
1da177e4 4329
624d5c51
TH
4330/**
4331 * ata_std_qc_defer - Check whether a qc needs to be deferred
4332 * @qc: ATA command in question
4333 *
4334 * Non-NCQ commands cannot run with any other command, NCQ or
4335 * not. As upper layer only knows the queue depth, we are
4336 * responsible for maintaining exclusion. This function checks
4337 * whether a new command @qc can be issued.
4338 *
4339 * LOCKING:
4340 * spin_lock_irqsave(host lock)
4341 *
4342 * RETURNS:
4343 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4344 */
4345int ata_std_qc_defer(struct ata_queued_cmd *qc)
4346{
4347 struct ata_link *link = qc->dev->link;
e2cec771 4348
624d5c51
TH
4349 if (qc->tf.protocol == ATA_PROT_NCQ) {
4350 if (!ata_tag_valid(link->active_tag))
4351 return 0;
4352 } else {
4353 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4354 return 0;
4355 }
e2cec771 4356
624d5c51
TH
4357 return ATA_DEFER_LINK;
4358}
6912ccd5 4359
624d5c51 4360void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
1da177e4 4361
624d5c51
TH
4362/**
4363 * ata_sg_init - Associate command with scatter-gather table.
4364 * @qc: Command to be associated
4365 * @sg: Scatter-gather table.
4366 * @n_elem: Number of elements in s/g table.
4367 *
4368 * Initialize the data-related elements of queued_cmd @qc
4369 * to point to a scatter-gather table @sg, containing @n_elem
4370 * elements.
4371 *
4372 * LOCKING:
4373 * spin_lock_irqsave(host lock)
4374 */
4375void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4376 unsigned int n_elem)
4377{
4378 qc->sg = sg;
4379 qc->n_elem = n_elem;
4380 qc->cursg = qc->sg;
4381}
bb5cb290 4382
624d5c51
TH
4383/**
4384 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4385 * @qc: Command with scatter-gather table to be mapped.
4386 *
4387 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4388 *
4389 * LOCKING:
4390 * spin_lock_irqsave(host lock)
4391 *
4392 * RETURNS:
4393 * Zero on success, negative on error.
4394 *
4395 */
4396static int ata_sg_setup(struct ata_queued_cmd *qc)
4397{
4398 struct ata_port *ap = qc->ap;
4399 unsigned int n_elem;
1da177e4 4400
624d5c51 4401 VPRINTK("ENTER, ata%u\n", ap->print_id);
e2cec771 4402
624d5c51
TH
4403 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4404 if (n_elem < 1)
4405 return -1;
bb5cb290 4406
624d5c51 4407 DPRINTK("%d sg elements mapped\n", n_elem);
bb5cb290 4408
624d5c51
TH
4409 qc->n_elem = n_elem;
4410 qc->flags |= ATA_QCFLAG_DMAMAP;
1da177e4 4411
624d5c51 4412 return 0;
1da177e4
LT
4413}
4414
624d5c51
TH
4415/**
4416 * swap_buf_le16 - swap halves of 16-bit words in place
4417 * @buf: Buffer to swap
4418 * @buf_words: Number of 16-bit words in buffer.
4419 *
4420 * Swap halves of 16-bit words if needed to convert from
4421 * little-endian byte order to native cpu byte order, or
4422 * vice-versa.
4423 *
4424 * LOCKING:
4425 * Inherited from caller.
4426 */
4427void swap_buf_le16(u16 *buf, unsigned int buf_words)
8061f5f0 4428{
624d5c51
TH
4429#ifdef __BIG_ENDIAN
4430 unsigned int i;
8061f5f0 4431
624d5c51
TH
4432 for (i = 0; i < buf_words; i++)
4433 buf[i] = le16_to_cpu(buf[i]);
4434#endif /* __BIG_ENDIAN */
8061f5f0
TH
4435}
4436
1da177e4
LT
4437/**
4438 * ata_qc_new - Request an available ATA command, for queueing
4439 * @ap: Port associated with device @dev
4440 * @dev: Device from whom we request an available command structure
4441 *
4442 * LOCKING:
0cba632b 4443 * None.
1da177e4
LT
4444 */
4445
4446static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4447{
4448 struct ata_queued_cmd *qc = NULL;
4449 unsigned int i;
4450
e3180499 4451 /* no command while frozen */
b51e9e5d 4452 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
4453 return NULL;
4454
2ab7db1f
TH
4455 /* the last tag is reserved for internal command. */
4456 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 4457 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 4458 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
4459 break;
4460 }
4461
4462 if (qc)
4463 qc->tag = i;
4464
4465 return qc;
4466}
4467
4468/**
4469 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
4470 * @dev: Device from whom we request an available command structure
4471 *
4472 * LOCKING:
0cba632b 4473 * None.
1da177e4
LT
4474 */
4475
3373efd8 4476struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 4477{
9af5c9c9 4478 struct ata_port *ap = dev->link->ap;
1da177e4
LT
4479 struct ata_queued_cmd *qc;
4480
4481 qc = ata_qc_new(ap);
4482 if (qc) {
1da177e4
LT
4483 qc->scsicmd = NULL;
4484 qc->ap = ap;
4485 qc->dev = dev;
1da177e4 4486
2c13b7ce 4487 ata_qc_reinit(qc);
1da177e4
LT
4488 }
4489
4490 return qc;
4491}
4492
1da177e4
LT
4493/**
4494 * ata_qc_free - free unused ata_queued_cmd
4495 * @qc: Command to complete
4496 *
4497 * Designed to free unused ata_queued_cmd object
4498 * in case something prevents using it.
4499 *
4500 * LOCKING:
cca3974e 4501 * spin_lock_irqsave(host lock)
1da177e4
LT
4502 */
4503void ata_qc_free(struct ata_queued_cmd *qc)
4504{
4ba946e9
TH
4505 struct ata_port *ap = qc->ap;
4506 unsigned int tag;
4507
a4631474 4508 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 4509
4ba946e9
TH
4510 qc->flags = 0;
4511 tag = qc->tag;
4512 if (likely(ata_tag_valid(tag))) {
4ba946e9 4513 qc->tag = ATA_TAG_POISON;
6cec4a39 4514 clear_bit(tag, &ap->qc_allocated);
4ba946e9 4515 }
1da177e4
LT
4516}
4517
76014427 4518void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 4519{
dedaf2b0 4520 struct ata_port *ap = qc->ap;
9af5c9c9 4521 struct ata_link *link = qc->dev->link;
dedaf2b0 4522
a4631474
TH
4523 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4524 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
4525
4526 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4527 ata_sg_clean(qc);
4528
7401abf2 4529 /* command should be marked inactive atomically with qc completion */
da917d69 4530 if (qc->tf.protocol == ATA_PROT_NCQ) {
9af5c9c9 4531 link->sactive &= ~(1 << qc->tag);
da917d69
TH
4532 if (!link->sactive)
4533 ap->nr_active_links--;
4534 } else {
9af5c9c9 4535 link->active_tag = ATA_TAG_POISON;
da917d69
TH
4536 ap->nr_active_links--;
4537 }
4538
4539 /* clear exclusive status */
4540 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4541 ap->excl_link == link))
4542 ap->excl_link = NULL;
7401abf2 4543
3f3791d3
AL
4544 /* atapi: mark qc as inactive to prevent the interrupt handler
4545 * from completing the command twice later, before the error handler
4546 * is called. (when rc != 0 and atapi request sense is needed)
4547 */
4548 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 4549 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 4550
1da177e4 4551 /* call completion callback */
77853bf2 4552 qc->complete_fn(qc);
1da177e4
LT
4553}
4554
39599a53
TH
4555static void fill_result_tf(struct ata_queued_cmd *qc)
4556{
4557 struct ata_port *ap = qc->ap;
4558
39599a53 4559 qc->result_tf.flags = qc->tf.flags;
22183bf5 4560 ap->ops->qc_fill_rtf(qc);
39599a53
TH
4561}
4562
00115e0f
TH
4563static void ata_verify_xfer(struct ata_queued_cmd *qc)
4564{
4565 struct ata_device *dev = qc->dev;
4566
4567 if (ata_tag_internal(qc->tag))
4568 return;
4569
4570 if (ata_is_nodata(qc->tf.protocol))
4571 return;
4572
4573 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4574 return;
4575
4576 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4577}
4578
f686bcb8
TH
4579/**
4580 * ata_qc_complete - Complete an active ATA command
4581 * @qc: Command to complete
4582 * @err_mask: ATA Status register contents
4583 *
4584 * Indicate to the mid and upper layers that an ATA
4585 * command has completed, with either an ok or not-ok status.
4586 *
4587 * LOCKING:
cca3974e 4588 * spin_lock_irqsave(host lock)
f686bcb8
TH
4589 */
4590void ata_qc_complete(struct ata_queued_cmd *qc)
4591{
4592 struct ata_port *ap = qc->ap;
4593
4594 /* XXX: New EH and old EH use different mechanisms to
4595 * synchronize EH with regular execution path.
4596 *
4597 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4598 * Normal execution path is responsible for not accessing a
4599 * failed qc. libata core enforces the rule by returning NULL
4600 * from ata_qc_from_tag() for failed qcs.
4601 *
4602 * Old EH depends on ata_qc_complete() nullifying completion
4603 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4604 * not synchronize with interrupt handler. Only PIO task is
4605 * taken care of.
4606 */
4607 if (ap->ops->error_handler) {
4dbfa39b
TH
4608 struct ata_device *dev = qc->dev;
4609 struct ata_eh_info *ehi = &dev->link->eh_info;
4610
b51e9e5d 4611 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
4612
4613 if (unlikely(qc->err_mask))
4614 qc->flags |= ATA_QCFLAG_FAILED;
4615
4616 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4617 if (!ata_tag_internal(qc->tag)) {
4618 /* always fill result TF for failed qc */
39599a53 4619 fill_result_tf(qc);
f686bcb8
TH
4620 ata_qc_schedule_eh(qc);
4621 return;
4622 }
4623 }
4624
4625 /* read result TF if requested */
4626 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4627 fill_result_tf(qc);
f686bcb8 4628
4dbfa39b
TH
4629 /* Some commands need post-processing after successful
4630 * completion.
4631 */
4632 switch (qc->tf.command) {
4633 case ATA_CMD_SET_FEATURES:
4634 if (qc->tf.feature != SETFEATURES_WC_ON &&
4635 qc->tf.feature != SETFEATURES_WC_OFF)
4636 break;
4637 /* fall through */
4638 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
4639 case ATA_CMD_SET_MULTI: /* multi_count changed */
4640 /* revalidate device */
4641 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4642 ata_port_schedule_eh(ap);
4643 break;
054a5fba
TH
4644
4645 case ATA_CMD_SLEEP:
4646 dev->flags |= ATA_DFLAG_SLEEPING;
4647 break;
4dbfa39b
TH
4648 }
4649
00115e0f
TH
4650 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
4651 ata_verify_xfer(qc);
4652
f686bcb8
TH
4653 __ata_qc_complete(qc);
4654 } else {
4655 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4656 return;
4657
4658 /* read result TF if failed or requested */
4659 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4660 fill_result_tf(qc);
f686bcb8
TH
4661
4662 __ata_qc_complete(qc);
4663 }
4664}
4665
dedaf2b0
TH
4666/**
4667 * ata_qc_complete_multiple - Complete multiple qcs successfully
4668 * @ap: port in question
4669 * @qc_active: new qc_active mask
dedaf2b0
TH
4670 *
4671 * Complete in-flight commands. This functions is meant to be
4672 * called from low-level driver's interrupt routine to complete
4673 * requests normally. ap->qc_active and @qc_active is compared
4674 * and commands are completed accordingly.
4675 *
4676 * LOCKING:
cca3974e 4677 * spin_lock_irqsave(host lock)
dedaf2b0
TH
4678 *
4679 * RETURNS:
4680 * Number of completed commands on success, -errno otherwise.
4681 */
79f97dad 4682int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
dedaf2b0
TH
4683{
4684 int nr_done = 0;
4685 u32 done_mask;
4686 int i;
4687
4688 done_mask = ap->qc_active ^ qc_active;
4689
4690 if (unlikely(done_mask & qc_active)) {
4691 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4692 "(%08x->%08x)\n", ap->qc_active, qc_active);
4693 return -EINVAL;
4694 }
4695
4696 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4697 struct ata_queued_cmd *qc;
4698
4699 if (!(done_mask & (1 << i)))
4700 continue;
4701
4702 if ((qc = ata_qc_from_tag(ap, i))) {
dedaf2b0
TH
4703 ata_qc_complete(qc);
4704 nr_done++;
4705 }
4706 }
4707
4708 return nr_done;
4709}
4710
1da177e4
LT
4711/**
4712 * ata_qc_issue - issue taskfile to device
4713 * @qc: command to issue to device
4714 *
4715 * Prepare an ATA command to submission to device.
4716 * This includes mapping the data into a DMA-able
4717 * area, filling in the S/G table, and finally
4718 * writing the taskfile to hardware, starting the command.
4719 *
4720 * LOCKING:
cca3974e 4721 * spin_lock_irqsave(host lock)
1da177e4 4722 */
8e0e694a 4723void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
4724{
4725 struct ata_port *ap = qc->ap;
9af5c9c9 4726 struct ata_link *link = qc->dev->link;
405e66b3 4727 u8 prot = qc->tf.protocol;
1da177e4 4728
dedaf2b0
TH
4729 /* Make sure only one non-NCQ command is outstanding. The
4730 * check is skipped for old EH because it reuses active qc to
4731 * request ATAPI sense.
4732 */
9af5c9c9 4733 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
dedaf2b0 4734
1973a023 4735 if (ata_is_ncq(prot)) {
9af5c9c9 4736 WARN_ON(link->sactive & (1 << qc->tag));
da917d69
TH
4737
4738 if (!link->sactive)
4739 ap->nr_active_links++;
9af5c9c9 4740 link->sactive |= 1 << qc->tag;
dedaf2b0 4741 } else {
9af5c9c9 4742 WARN_ON(link->sactive);
da917d69
TH
4743
4744 ap->nr_active_links++;
9af5c9c9 4745 link->active_tag = qc->tag;
dedaf2b0
TH
4746 }
4747
e4a70e76 4748 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 4749 ap->qc_active |= 1 << qc->tag;
e4a70e76 4750
f92a2636
TH
4751 /* We guarantee to LLDs that they will have at least one
4752 * non-zero sg if the command is a data command.
4753 */
ff2aeb1e 4754 BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
f92a2636 4755
405e66b3 4756 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
f92a2636 4757 (ap->flags & ATA_FLAG_PIO_DMA)))
001102d7
TH
4758 if (ata_sg_setup(qc))
4759 goto sg_err;
1da177e4 4760
cf480626 4761 /* if device is sleeping, schedule reset and abort the link */
054a5fba 4762 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
cf480626 4763 link->eh_info.action |= ATA_EH_RESET;
054a5fba
TH
4764 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
4765 ata_link_abort(link);
4766 return;
4767 }
4768
1da177e4
LT
4769 ap->ops->qc_prep(qc);
4770
8e0e694a
TH
4771 qc->err_mask |= ap->ops->qc_issue(qc);
4772 if (unlikely(qc->err_mask))
4773 goto err;
4774 return;
1da177e4 4775
8e436af9 4776sg_err:
8e0e694a
TH
4777 qc->err_mask |= AC_ERR_SYSTEM;
4778err:
4779 ata_qc_complete(qc);
1da177e4
LT
4780}
4781
34bf2170
TH
4782/**
4783 * sata_scr_valid - test whether SCRs are accessible
936fd732 4784 * @link: ATA link to test SCR accessibility for
34bf2170 4785 *
936fd732 4786 * Test whether SCRs are accessible for @link.
34bf2170
TH
4787 *
4788 * LOCKING:
4789 * None.
4790 *
4791 * RETURNS:
4792 * 1 if SCRs are accessible, 0 otherwise.
4793 */
936fd732 4794int sata_scr_valid(struct ata_link *link)
34bf2170 4795{
936fd732
TH
4796 struct ata_port *ap = link->ap;
4797
a16abc0b 4798 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
34bf2170
TH
4799}
4800
4801/**
4802 * sata_scr_read - read SCR register of the specified port
936fd732 4803 * @link: ATA link to read SCR for
34bf2170
TH
4804 * @reg: SCR to read
4805 * @val: Place to store read value
4806 *
936fd732 4807 * Read SCR register @reg of @link into *@val. This function is
633273a3
TH
4808 * guaranteed to succeed if @link is ap->link, the cable type of
4809 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
4810 *
4811 * LOCKING:
633273a3 4812 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
4813 *
4814 * RETURNS:
4815 * 0 on success, negative errno on failure.
4816 */
936fd732 4817int sata_scr_read(struct ata_link *link, int reg, u32 *val)
34bf2170 4818{
633273a3
TH
4819 if (ata_is_host_link(link)) {
4820 struct ata_port *ap = link->ap;
936fd732 4821
633273a3
TH
4822 if (sata_scr_valid(link))
4823 return ap->ops->scr_read(ap, reg, val);
4824 return -EOPNOTSUPP;
4825 }
4826
4827 return sata_pmp_scr_read(link, reg, val);
34bf2170
TH
4828}
4829
4830/**
4831 * sata_scr_write - write SCR register of the specified port
936fd732 4832 * @link: ATA link to write SCR for
34bf2170
TH
4833 * @reg: SCR to write
4834 * @val: value to write
4835 *
936fd732 4836 * Write @val to SCR register @reg of @link. This function is
633273a3
TH
4837 * guaranteed to succeed if @link is ap->link, the cable type of
4838 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
4839 *
4840 * LOCKING:
633273a3 4841 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
4842 *
4843 * RETURNS:
4844 * 0 on success, negative errno on failure.
4845 */
936fd732 4846int sata_scr_write(struct ata_link *link, int reg, u32 val)
34bf2170 4847{
633273a3
TH
4848 if (ata_is_host_link(link)) {
4849 struct ata_port *ap = link->ap;
4850
4851 if (sata_scr_valid(link))
4852 return ap->ops->scr_write(ap, reg, val);
4853 return -EOPNOTSUPP;
4854 }
936fd732 4855
633273a3 4856 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
4857}
4858
4859/**
4860 * sata_scr_write_flush - write SCR register of the specified port and flush
936fd732 4861 * @link: ATA link to write SCR for
34bf2170
TH
4862 * @reg: SCR to write
4863 * @val: value to write
4864 *
4865 * This function is identical to sata_scr_write() except that this
4866 * function performs flush after writing to the register.
4867 *
4868 * LOCKING:
633273a3 4869 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
4870 *
4871 * RETURNS:
4872 * 0 on success, negative errno on failure.
4873 */
936fd732 4874int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
34bf2170 4875{
633273a3
TH
4876 if (ata_is_host_link(link)) {
4877 struct ata_port *ap = link->ap;
4878 int rc;
da3dbb17 4879
633273a3
TH
4880 if (sata_scr_valid(link)) {
4881 rc = ap->ops->scr_write(ap, reg, val);
4882 if (rc == 0)
4883 rc = ap->ops->scr_read(ap, reg, &val);
4884 return rc;
4885 }
4886 return -EOPNOTSUPP;
34bf2170 4887 }
633273a3
TH
4888
4889 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
4890}
4891
4892/**
936fd732
TH
4893 * ata_link_online - test whether the given link is online
4894 * @link: ATA link to test
34bf2170 4895 *
936fd732
TH
4896 * Test whether @link is online. Note that this function returns
4897 * 0 if online status of @link cannot be obtained, so
4898 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
4899 *
4900 * LOCKING:
4901 * None.
4902 *
4903 * RETURNS:
4904 * 1 if the port online status is available and online.
4905 */
936fd732 4906int ata_link_online(struct ata_link *link)
34bf2170
TH
4907{
4908 u32 sstatus;
4909
936fd732
TH
4910 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
4911 (sstatus & 0xf) == 0x3)
34bf2170
TH
4912 return 1;
4913 return 0;
4914}
4915
4916/**
936fd732
TH
4917 * ata_link_offline - test whether the given link is offline
4918 * @link: ATA link to test
34bf2170 4919 *
936fd732
TH
4920 * Test whether @link is offline. Note that this function
4921 * returns 0 if offline status of @link cannot be obtained, so
4922 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
4923 *
4924 * LOCKING:
4925 * None.
4926 *
4927 * RETURNS:
4928 * 1 if the port offline status is available and offline.
4929 */
936fd732 4930int ata_link_offline(struct ata_link *link)
34bf2170
TH
4931{
4932 u32 sstatus;
4933
936fd732
TH
4934 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
4935 (sstatus & 0xf) != 0x3)
34bf2170
TH
4936 return 1;
4937 return 0;
4938}
0baab86b 4939
6ffa01d8 4940#ifdef CONFIG_PM
cca3974e
JG
4941static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
4942 unsigned int action, unsigned int ehi_flags,
4943 int wait)
500530f6
TH
4944{
4945 unsigned long flags;
4946 int i, rc;
4947
cca3974e
JG
4948 for (i = 0; i < host->n_ports; i++) {
4949 struct ata_port *ap = host->ports[i];
e3667ebf 4950 struct ata_link *link;
500530f6
TH
4951
4952 /* Previous resume operation might still be in
4953 * progress. Wait for PM_PENDING to clear.
4954 */
4955 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
4956 ata_port_wait_eh(ap);
4957 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
4958 }
4959
4960 /* request PM ops to EH */
4961 spin_lock_irqsave(ap->lock, flags);
4962
4963 ap->pm_mesg = mesg;
4964 if (wait) {
4965 rc = 0;
4966 ap->pm_result = &rc;
4967 }
4968
4969 ap->pflags |= ATA_PFLAG_PM_PENDING;
e3667ebf
TH
4970 __ata_port_for_each_link(link, ap) {
4971 link->eh_info.action |= action;
4972 link->eh_info.flags |= ehi_flags;
4973 }
500530f6
TH
4974
4975 ata_port_schedule_eh(ap);
4976
4977 spin_unlock_irqrestore(ap->lock, flags);
4978
4979 /* wait and check result */
4980 if (wait) {
4981 ata_port_wait_eh(ap);
4982 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
4983 if (rc)
4984 return rc;
4985 }
4986 }
4987
4988 return 0;
4989}
4990
4991/**
cca3974e
JG
4992 * ata_host_suspend - suspend host
4993 * @host: host to suspend
500530f6
TH
4994 * @mesg: PM message
4995 *
cca3974e 4996 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
4997 * function requests EH to perform PM operations and waits for EH
4998 * to finish.
4999 *
5000 * LOCKING:
5001 * Kernel thread context (may sleep).
5002 *
5003 * RETURNS:
5004 * 0 on success, -errno on failure.
5005 */
cca3974e 5006int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6 5007{
9666f400 5008 int rc;
500530f6 5009
ca77329f
KCA
5010 /*
5011 * disable link pm on all ports before requesting
5012 * any pm activity
5013 */
5014 ata_lpm_enable(host);
5015
cca3974e 5016 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
72ad6ec4
JG
5017 if (rc == 0)
5018 host->dev->power.power_state = mesg;
500530f6
TH
5019 return rc;
5020}
5021
5022/**
cca3974e
JG
5023 * ata_host_resume - resume host
5024 * @host: host to resume
500530f6 5025 *
cca3974e 5026 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
5027 * function requests EH to perform PM operations and returns.
5028 * Note that all resume operations are performed parallely.
5029 *
5030 * LOCKING:
5031 * Kernel thread context (may sleep).
5032 */
cca3974e 5033void ata_host_resume(struct ata_host *host)
500530f6 5034{
cf480626 5035 ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET,
cca3974e 5036 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
72ad6ec4 5037 host->dev->power.power_state = PMSG_ON;
ca77329f
KCA
5038
5039 /* reenable link pm */
5040 ata_lpm_disable(host);
500530f6 5041}
6ffa01d8 5042#endif
500530f6 5043
c893a3ae
RD
5044/**
5045 * ata_port_start - Set port up for dma.
5046 * @ap: Port to initialize
5047 *
5048 * Called just after data structures for each port are
5049 * initialized. Allocates space for PRD table.
5050 *
5051 * May be used as the port_start() entry in ata_port_operations.
5052 *
5053 * LOCKING:
5054 * Inherited from caller.
5055 */
f0d36efd 5056int ata_port_start(struct ata_port *ap)
1da177e4 5057{
2f1f610b 5058 struct device *dev = ap->dev;
1da177e4 5059
f0d36efd
TH
5060 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5061 GFP_KERNEL);
1da177e4
LT
5062 if (!ap->prd)
5063 return -ENOMEM;
5064
1da177e4
LT
5065 return 0;
5066}
5067
3ef3b43d
TH
5068/**
5069 * ata_dev_init - Initialize an ata_device structure
5070 * @dev: Device structure to initialize
5071 *
5072 * Initialize @dev in preparation for probing.
5073 *
5074 * LOCKING:
5075 * Inherited from caller.
5076 */
5077void ata_dev_init(struct ata_device *dev)
5078{
9af5c9c9
TH
5079 struct ata_link *link = dev->link;
5080 struct ata_port *ap = link->ap;
72fa4b74
TH
5081 unsigned long flags;
5082
5a04bf4b 5083 /* SATA spd limit is bound to the first device */
9af5c9c9
TH
5084 link->sata_spd_limit = link->hw_sata_spd_limit;
5085 link->sata_spd = 0;
5a04bf4b 5086
72fa4b74
TH
5087 /* High bits of dev->flags are used to record warm plug
5088 * requests which occur asynchronously. Synchronize using
cca3974e 5089 * host lock.
72fa4b74 5090 */
ba6a1308 5091 spin_lock_irqsave(ap->lock, flags);
72fa4b74 5092 dev->flags &= ~ATA_DFLAG_INIT_MASK;
3dcc323f 5093 dev->horkage = 0;
ba6a1308 5094 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 5095
72fa4b74
TH
5096 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5097 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
5098 dev->pio_mask = UINT_MAX;
5099 dev->mwdma_mask = UINT_MAX;
5100 dev->udma_mask = UINT_MAX;
5101}
5102
4fb37a25
TH
5103/**
5104 * ata_link_init - Initialize an ata_link structure
5105 * @ap: ATA port link is attached to
5106 * @link: Link structure to initialize
8989805d 5107 * @pmp: Port multiplier port number
4fb37a25
TH
5108 *
5109 * Initialize @link.
5110 *
5111 * LOCKING:
5112 * Kernel thread context (may sleep)
5113 */
fb7fd614 5114void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
4fb37a25
TH
5115{
5116 int i;
5117
5118 /* clear everything except for devices */
5119 memset(link, 0, offsetof(struct ata_link, device[0]));
5120
5121 link->ap = ap;
8989805d 5122 link->pmp = pmp;
4fb37a25
TH
5123 link->active_tag = ATA_TAG_POISON;
5124 link->hw_sata_spd_limit = UINT_MAX;
5125
5126 /* can't use iterator, ap isn't initialized yet */
5127 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5128 struct ata_device *dev = &link->device[i];
5129
5130 dev->link = link;
5131 dev->devno = dev - link->device;
5132 ata_dev_init(dev);
5133 }
5134}
5135
5136/**
5137 * sata_link_init_spd - Initialize link->sata_spd_limit
5138 * @link: Link to configure sata_spd_limit for
5139 *
5140 * Initialize @link->[hw_]sata_spd_limit to the currently
5141 * configured value.
5142 *
5143 * LOCKING:
5144 * Kernel thread context (may sleep).
5145 *
5146 * RETURNS:
5147 * 0 on success, -errno on failure.
5148 */
fb7fd614 5149int sata_link_init_spd(struct ata_link *link)
4fb37a25 5150{
33267325
TH
5151 u32 scontrol;
5152 u8 spd;
4fb37a25
TH
5153 int rc;
5154
5155 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
5156 if (rc)
5157 return rc;
5158
5159 spd = (scontrol >> 4) & 0xf;
5160 if (spd)
5161 link->hw_sata_spd_limit &= (1 << spd) - 1;
5162
33267325
TH
5163 ata_force_spd_limit(link);
5164
4fb37a25
TH
5165 link->sata_spd_limit = link->hw_sata_spd_limit;
5166
5167 return 0;
5168}
5169
1da177e4 5170/**
f3187195
TH
5171 * ata_port_alloc - allocate and initialize basic ATA port resources
5172 * @host: ATA host this allocated port belongs to
1da177e4 5173 *
f3187195
TH
5174 * Allocate and initialize basic ATA port resources.
5175 *
5176 * RETURNS:
5177 * Allocate ATA port on success, NULL on failure.
0cba632b 5178 *
1da177e4 5179 * LOCKING:
f3187195 5180 * Inherited from calling layer (may sleep).
1da177e4 5181 */
f3187195 5182struct ata_port *ata_port_alloc(struct ata_host *host)
1da177e4 5183{
f3187195 5184 struct ata_port *ap;
1da177e4 5185
f3187195
TH
5186 DPRINTK("ENTER\n");
5187
5188 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5189 if (!ap)
5190 return NULL;
5191
f4d6d004 5192 ap->pflags |= ATA_PFLAG_INITIALIZING;
cca3974e 5193 ap->lock = &host->lock;
198e0fed 5194 ap->flags = ATA_FLAG_DISABLED;
f3187195 5195 ap->print_id = -1;
1da177e4 5196 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 5197 ap->host = host;
f3187195 5198 ap->dev = host->dev;
1da177e4 5199 ap->last_ctl = 0xFF;
bd5d825c
BP
5200
5201#if defined(ATA_VERBOSE_DEBUG)
5202 /* turn on all debugging levels */
5203 ap->msg_enable = 0x00FF;
5204#elif defined(ATA_DEBUG)
5205 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 5206#else
0dd4b21f 5207 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 5208#endif
1da177e4 5209
127102ae 5210#ifdef CONFIG_ATA_SFF
442eacc3 5211 INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
127102ae 5212#endif
65f27f38
DH
5213 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5214 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 5215 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 5216 init_waitqueue_head(&ap->eh_wait_q);
5ddf24c5
TH
5217 init_timer_deferrable(&ap->fastdrain_timer);
5218 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5219 ap->fastdrain_timer.data = (unsigned long)ap;
1da177e4 5220
838df628 5221 ap->cbl = ATA_CBL_NONE;
838df628 5222
8989805d 5223 ata_link_init(ap, &ap->link, 0);
1da177e4
LT
5224
5225#ifdef ATA_IRQ_TRAP
5226 ap->stats.unhandled_irq = 1;
5227 ap->stats.idle_irq = 1;
5228#endif
1da177e4 5229 return ap;
1da177e4
LT
5230}
5231
f0d36efd
TH
5232static void ata_host_release(struct device *gendev, void *res)
5233{
5234 struct ata_host *host = dev_get_drvdata(gendev);
5235 int i;
5236
1aa506e4
TH
5237 for (i = 0; i < host->n_ports; i++) {
5238 struct ata_port *ap = host->ports[i];
5239
4911487a
TH
5240 if (!ap)
5241 continue;
5242
5243 if (ap->scsi_host)
1aa506e4
TH
5244 scsi_host_put(ap->scsi_host);
5245
633273a3 5246 kfree(ap->pmp_link);
4911487a 5247 kfree(ap);
1aa506e4
TH
5248 host->ports[i] = NULL;
5249 }
5250
1aa56cca 5251 dev_set_drvdata(gendev, NULL);
f0d36efd
TH
5252}
5253
f3187195
TH
5254/**
5255 * ata_host_alloc - allocate and init basic ATA host resources
5256 * @dev: generic device this host is associated with
5257 * @max_ports: maximum number of ATA ports associated with this host
5258 *
5259 * Allocate and initialize basic ATA host resources. LLD calls
5260 * this function to allocate a host, initializes it fully and
5261 * attaches it using ata_host_register().
5262 *
5263 * @max_ports ports are allocated and host->n_ports is
5264 * initialized to @max_ports. The caller is allowed to decrease
5265 * host->n_ports before calling ata_host_register(). The unused
5266 * ports will be automatically freed on registration.
5267 *
5268 * RETURNS:
5269 * Allocate ATA host on success, NULL on failure.
5270 *
5271 * LOCKING:
5272 * Inherited from calling layer (may sleep).
5273 */
5274struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5275{
5276 struct ata_host *host;
5277 size_t sz;
5278 int i;
5279
5280 DPRINTK("ENTER\n");
5281
5282 if (!devres_open_group(dev, NULL, GFP_KERNEL))
5283 return NULL;
5284
5285 /* alloc a container for our list of ATA ports (buses) */
5286 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5287 /* alloc a container for our list of ATA ports (buses) */
5288 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5289 if (!host)
5290 goto err_out;
5291
5292 devres_add(dev, host);
5293 dev_set_drvdata(dev, host);
5294
5295 spin_lock_init(&host->lock);
5296 host->dev = dev;
5297 host->n_ports = max_ports;
5298
5299 /* allocate ports bound to this host */
5300 for (i = 0; i < max_ports; i++) {
5301 struct ata_port *ap;
5302
5303 ap = ata_port_alloc(host);
5304 if (!ap)
5305 goto err_out;
5306
5307 ap->port_no = i;
5308 host->ports[i] = ap;
5309 }
5310
5311 devres_remove_group(dev, NULL);
5312 return host;
5313
5314 err_out:
5315 devres_release_group(dev, NULL);
5316 return NULL;
5317}
5318
f5cda257
TH
5319/**
5320 * ata_host_alloc_pinfo - alloc host and init with port_info array
5321 * @dev: generic device this host is associated with
5322 * @ppi: array of ATA port_info to initialize host with
5323 * @n_ports: number of ATA ports attached to this host
5324 *
5325 * Allocate ATA host and initialize with info from @ppi. If NULL
5326 * terminated, @ppi may contain fewer entries than @n_ports. The
5327 * last entry will be used for the remaining ports.
5328 *
5329 * RETURNS:
5330 * Allocate ATA host on success, NULL on failure.
5331 *
5332 * LOCKING:
5333 * Inherited from calling layer (may sleep).
5334 */
5335struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5336 const struct ata_port_info * const * ppi,
5337 int n_ports)
5338{
5339 const struct ata_port_info *pi;
5340 struct ata_host *host;
5341 int i, j;
5342
5343 host = ata_host_alloc(dev, n_ports);
5344 if (!host)
5345 return NULL;
5346
5347 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5348 struct ata_port *ap = host->ports[i];
5349
5350 if (ppi[j])
5351 pi = ppi[j++];
5352
5353 ap->pio_mask = pi->pio_mask;
5354 ap->mwdma_mask = pi->mwdma_mask;
5355 ap->udma_mask = pi->udma_mask;
5356 ap->flags |= pi->flags;
0c88758b 5357 ap->link.flags |= pi->link_flags;
f5cda257
TH
5358 ap->ops = pi->port_ops;
5359
5360 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5361 host->ops = pi->port_ops;
f5cda257
TH
5362 }
5363
5364 return host;
5365}
5366
32ebbc0c
TH
5367static void ata_host_stop(struct device *gendev, void *res)
5368{
5369 struct ata_host *host = dev_get_drvdata(gendev);
5370 int i;
5371
5372 WARN_ON(!(host->flags & ATA_HOST_STARTED));
5373
5374 for (i = 0; i < host->n_ports; i++) {
5375 struct ata_port *ap = host->ports[i];
5376
5377 if (ap->ops->port_stop)
5378 ap->ops->port_stop(ap);
5379 }
5380
5381 if (host->ops->host_stop)
5382 host->ops->host_stop(host);
5383}
5384
029cfd6b
TH
5385/**
5386 * ata_finalize_port_ops - finalize ata_port_operations
5387 * @ops: ata_port_operations to finalize
5388 *
5389 * An ata_port_operations can inherit from another ops and that
5390 * ops can again inherit from another. This can go on as many
5391 * times as necessary as long as there is no loop in the
5392 * inheritance chain.
5393 *
5394 * Ops tables are finalized when the host is started. NULL or
5395 * unspecified entries are inherited from the closet ancestor
5396 * which has the method and the entry is populated with it.
5397 * After finalization, the ops table directly points to all the
5398 * methods and ->inherits is no longer necessary and cleared.
5399 *
5400 * Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5401 *
5402 * LOCKING:
5403 * None.
5404 */
5405static void ata_finalize_port_ops(struct ata_port_operations *ops)
5406{
2da67659 5407 static DEFINE_SPINLOCK(lock);
029cfd6b
TH
5408 const struct ata_port_operations *cur;
5409 void **begin = (void **)ops;
5410 void **end = (void **)&ops->inherits;
5411 void **pp;
5412
5413 if (!ops || !ops->inherits)
5414 return;
5415
5416 spin_lock(&lock);
5417
5418 for (cur = ops->inherits; cur; cur = cur->inherits) {
5419 void **inherit = (void **)cur;
5420
5421 for (pp = begin; pp < end; pp++, inherit++)
5422 if (!*pp)
5423 *pp = *inherit;
5424 }
5425
5426 for (pp = begin; pp < end; pp++)
5427 if (IS_ERR(*pp))
5428 *pp = NULL;
5429
5430 ops->inherits = NULL;
5431
5432 spin_unlock(&lock);
5433}
5434
ecef7253
TH
5435/**
5436 * ata_host_start - start and freeze ports of an ATA host
5437 * @host: ATA host to start ports for
5438 *
5439 * Start and then freeze ports of @host. Started status is
5440 * recorded in host->flags, so this function can be called
5441 * multiple times. Ports are guaranteed to get started only
f3187195
TH
5442 * once. If host->ops isn't initialized yet, its set to the
5443 * first non-dummy port ops.
ecef7253
TH
5444 *
5445 * LOCKING:
5446 * Inherited from calling layer (may sleep).
5447 *
5448 * RETURNS:
5449 * 0 if all ports are started successfully, -errno otherwise.
5450 */
5451int ata_host_start(struct ata_host *host)
5452{
32ebbc0c
TH
5453 int have_stop = 0;
5454 void *start_dr = NULL;
ecef7253
TH
5455 int i, rc;
5456
5457 if (host->flags & ATA_HOST_STARTED)
5458 return 0;
5459
029cfd6b
TH
5460 ata_finalize_port_ops(host->ops);
5461
ecef7253
TH
5462 for (i = 0; i < host->n_ports; i++) {
5463 struct ata_port *ap = host->ports[i];
5464
029cfd6b
TH
5465 ata_finalize_port_ops(ap->ops);
5466
f3187195
TH
5467 if (!host->ops && !ata_port_is_dummy(ap))
5468 host->ops = ap->ops;
5469
32ebbc0c
TH
5470 if (ap->ops->port_stop)
5471 have_stop = 1;
5472 }
5473
5474 if (host->ops->host_stop)
5475 have_stop = 1;
5476
5477 if (have_stop) {
5478 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5479 if (!start_dr)
5480 return -ENOMEM;
5481 }
5482
5483 for (i = 0; i < host->n_ports; i++) {
5484 struct ata_port *ap = host->ports[i];
5485
ecef7253
TH
5486 if (ap->ops->port_start) {
5487 rc = ap->ops->port_start(ap);
5488 if (rc) {
0f9fe9b7 5489 if (rc != -ENODEV)
0f757743
AM
5490 dev_printk(KERN_ERR, host->dev,
5491 "failed to start port %d "
5492 "(errno=%d)\n", i, rc);
ecef7253
TH
5493 goto err_out;
5494 }
5495 }
ecef7253
TH
5496 ata_eh_freeze_port(ap);
5497 }
5498
32ebbc0c
TH
5499 if (start_dr)
5500 devres_add(host->dev, start_dr);
ecef7253
TH
5501 host->flags |= ATA_HOST_STARTED;
5502 return 0;
5503
5504 err_out:
5505 while (--i >= 0) {
5506 struct ata_port *ap = host->ports[i];
5507
5508 if (ap->ops->port_stop)
5509 ap->ops->port_stop(ap);
5510 }
32ebbc0c 5511 devres_free(start_dr);
ecef7253
TH
5512 return rc;
5513}
5514
b03732f0 5515/**
cca3974e
JG
5516 * ata_sas_host_init - Initialize a host struct
5517 * @host: host to initialize
5518 * @dev: device host is attached to
5519 * @flags: host flags
5520 * @ops: port_ops
b03732f0
BK
5521 *
5522 * LOCKING:
5523 * PCI/etc. bus probe sem.
5524 *
5525 */
f3187195 5526/* KILLME - the only user left is ipr */
cca3974e 5527void ata_host_init(struct ata_host *host, struct device *dev,
029cfd6b 5528 unsigned long flags, struct ata_port_operations *ops)
b03732f0 5529{
cca3974e
JG
5530 spin_lock_init(&host->lock);
5531 host->dev = dev;
5532 host->flags = flags;
5533 host->ops = ops;
b03732f0
BK
5534}
5535
f3187195
TH
5536/**
5537 * ata_host_register - register initialized ATA host
5538 * @host: ATA host to register
5539 * @sht: template for SCSI host
5540 *
5541 * Register initialized ATA host. @host is allocated using
5542 * ata_host_alloc() and fully initialized by LLD. This function
5543 * starts ports, registers @host with ATA and SCSI layers and
5544 * probe registered devices.
5545 *
5546 * LOCKING:
5547 * Inherited from calling layer (may sleep).
5548 *
5549 * RETURNS:
5550 * 0 on success, -errno otherwise.
5551 */
5552int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
5553{
5554 int i, rc;
5555
5556 /* host must have been started */
5557 if (!(host->flags & ATA_HOST_STARTED)) {
5558 dev_printk(KERN_ERR, host->dev,
5559 "BUG: trying to register unstarted host\n");
5560 WARN_ON(1);
5561 return -EINVAL;
5562 }
5563
5564 /* Blow away unused ports. This happens when LLD can't
5565 * determine the exact number of ports to allocate at
5566 * allocation time.
5567 */
5568 for (i = host->n_ports; host->ports[i]; i++)
5569 kfree(host->ports[i]);
5570
5571 /* give ports names and add SCSI hosts */
5572 for (i = 0; i < host->n_ports; i++)
5573 host->ports[i]->print_id = ata_print_id++;
5574
5575 rc = ata_scsi_add_hosts(host, sht);
5576 if (rc)
5577 return rc;
5578
fafbae87
TH
5579 /* associate with ACPI nodes */
5580 ata_acpi_associate(host);
5581
f3187195
TH
5582 /* set cable, sata_spd_limit and report */
5583 for (i = 0; i < host->n_ports; i++) {
5584 struct ata_port *ap = host->ports[i];
f3187195
TH
5585 unsigned long xfer_mask;
5586
5587 /* set SATA cable type if still unset */
5588 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
5589 ap->cbl = ATA_CBL_SATA;
5590
5591 /* init sata_spd_limit to the current value */
4fb37a25 5592 sata_link_init_spd(&ap->link);
f3187195 5593
cbcdd875 5594 /* print per-port info to dmesg */
f3187195
TH
5595 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
5596 ap->udma_mask);
5597
abf6e8ed 5598 if (!ata_port_is_dummy(ap)) {
cbcdd875
TH
5599 ata_port_printk(ap, KERN_INFO,
5600 "%cATA max %s %s\n",
a16abc0b 5601 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
f3187195 5602 ata_mode_string(xfer_mask),
cbcdd875 5603 ap->link.eh_info.desc);
abf6e8ed
TH
5604 ata_ehi_clear_desc(&ap->link.eh_info);
5605 } else
f3187195
TH
5606 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
5607 }
5608
5609 /* perform each probe synchronously */
5610 DPRINTK("probe begin\n");
5611 for (i = 0; i < host->n_ports; i++) {
5612 struct ata_port *ap = host->ports[i];
f3187195
TH
5613
5614 /* probe */
5615 if (ap->ops->error_handler) {
9af5c9c9 5616 struct ata_eh_info *ehi = &ap->link.eh_info;
f3187195
TH
5617 unsigned long flags;
5618
5619 ata_port_probe(ap);
5620
5621 /* kick EH for boot probing */
5622 spin_lock_irqsave(ap->lock, flags);
5623
b558eddd 5624 ehi->probe_mask |= ATA_ALL_DEVICES;
391191c1 5625 ehi->action |= ATA_EH_RESET | ATA_EH_LPM;
f3187195
TH
5626 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5627
f4d6d004 5628 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
f3187195
TH
5629 ap->pflags |= ATA_PFLAG_LOADING;
5630 ata_port_schedule_eh(ap);
5631
5632 spin_unlock_irqrestore(ap->lock, flags);
5633
5634 /* wait for EH to finish */
5635 ata_port_wait_eh(ap);
5636 } else {
5637 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
5638 rc = ata_bus_probe(ap);
5639 DPRINTK("ata%u: bus probe end\n", ap->print_id);
5640
5641 if (rc) {
5642 /* FIXME: do something useful here?
5643 * Current libata behavior will
5644 * tear down everything when
5645 * the module is removed
5646 * or the h/w is unplugged.
5647 */
5648 }
5649 }
5650 }
5651
5652 /* probes are done, now scan each port's disk(s) */
5653 DPRINTK("host probe begin\n");
5654 for (i = 0; i < host->n_ports; i++) {
5655 struct ata_port *ap = host->ports[i];
5656
1ae46317 5657 ata_scsi_scan_host(ap, 1);
f3187195
TH
5658 }
5659
5660 return 0;
5661}
5662
f5cda257
TH
5663/**
5664 * ata_host_activate - start host, request IRQ and register it
5665 * @host: target ATA host
5666 * @irq: IRQ to request
5667 * @irq_handler: irq_handler used when requesting IRQ
5668 * @irq_flags: irq_flags used when requesting IRQ
5669 * @sht: scsi_host_template to use when registering the host
5670 *
5671 * After allocating an ATA host and initializing it, most libata
5672 * LLDs perform three steps to activate the host - start host,
5673 * request IRQ and register it. This helper takes necessasry
5674 * arguments and performs the three steps in one go.
5675 *
3d46b2e2
PM
5676 * An invalid IRQ skips the IRQ registration and expects the host to
5677 * have set polling mode on the port. In this case, @irq_handler
5678 * should be NULL.
5679 *
f5cda257
TH
5680 * LOCKING:
5681 * Inherited from calling layer (may sleep).
5682 *
5683 * RETURNS:
5684 * 0 on success, -errno otherwise.
5685 */
5686int ata_host_activate(struct ata_host *host, int irq,
5687 irq_handler_t irq_handler, unsigned long irq_flags,
5688 struct scsi_host_template *sht)
5689{
cbcdd875 5690 int i, rc;
f5cda257
TH
5691
5692 rc = ata_host_start(host);
5693 if (rc)
5694 return rc;
5695
3d46b2e2
PM
5696 /* Special case for polling mode */
5697 if (!irq) {
5698 WARN_ON(irq_handler);
5699 return ata_host_register(host, sht);
5700 }
5701
f5cda257
TH
5702 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
5703 dev_driver_string(host->dev), host);
5704 if (rc)
5705 return rc;
5706
cbcdd875
TH
5707 for (i = 0; i < host->n_ports; i++)
5708 ata_port_desc(host->ports[i], "irq %d", irq);
4031826b 5709
f5cda257
TH
5710 rc = ata_host_register(host, sht);
5711 /* if failed, just free the IRQ and leave ports alone */
5712 if (rc)
5713 devm_free_irq(host->dev, irq, host);
5714
5715 return rc;
5716}
5717
720ba126
TH
5718/**
5719 * ata_port_detach - Detach ATA port in prepration of device removal
5720 * @ap: ATA port to be detached
5721 *
5722 * Detach all ATA devices and the associated SCSI devices of @ap;
5723 * then, remove the associated SCSI host. @ap is guaranteed to
5724 * be quiescent on return from this function.
5725 *
5726 * LOCKING:
5727 * Kernel thread context (may sleep).
5728 */
741b7763 5729static void ata_port_detach(struct ata_port *ap)
720ba126
TH
5730{
5731 unsigned long flags;
41bda9c9 5732 struct ata_link *link;
f58229f8 5733 struct ata_device *dev;
720ba126
TH
5734
5735 if (!ap->ops->error_handler)
c3cf30a9 5736 goto skip_eh;
720ba126
TH
5737
5738 /* tell EH we're leaving & flush EH */
ba6a1308 5739 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 5740 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 5741 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5742
5743 ata_port_wait_eh(ap);
5744
7f9ad9b8
TH
5745 /* EH is now guaranteed to see UNLOADING - EH context belongs
5746 * to us. Disable all existing devices.
720ba126 5747 */
41bda9c9
TH
5748 ata_port_for_each_link(link, ap) {
5749 ata_link_for_each_dev(dev, link)
5750 ata_dev_disable(dev);
5751 }
720ba126 5752
720ba126
TH
5753 /* Final freeze & EH. All in-flight commands are aborted. EH
5754 * will be skipped and retrials will be terminated with bad
5755 * target.
5756 */
ba6a1308 5757 spin_lock_irqsave(ap->lock, flags);
720ba126 5758 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 5759 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5760
5761 ata_port_wait_eh(ap);
45a66c1c 5762 cancel_rearming_delayed_work(&ap->hotplug_task);
720ba126 5763
c3cf30a9 5764 skip_eh:
720ba126 5765 /* remove the associated SCSI host */
cca3974e 5766 scsi_remove_host(ap->scsi_host);
720ba126
TH
5767}
5768
0529c159
TH
5769/**
5770 * ata_host_detach - Detach all ports of an ATA host
5771 * @host: Host to detach
5772 *
5773 * Detach all ports of @host.
5774 *
5775 * LOCKING:
5776 * Kernel thread context (may sleep).
5777 */
5778void ata_host_detach(struct ata_host *host)
5779{
5780 int i;
5781
5782 for (i = 0; i < host->n_ports; i++)
5783 ata_port_detach(host->ports[i]);
562f0c2d
TH
5784
5785 /* the host is dead now, dissociate ACPI */
5786 ata_acpi_dissociate(host);
0529c159
TH
5787}
5788
374b1873
JG
5789#ifdef CONFIG_PCI
5790
1da177e4
LT
5791/**
5792 * ata_pci_remove_one - PCI layer callback for device removal
5793 * @pdev: PCI device that was removed
5794 *
b878ca5d
TH
5795 * PCI layer indicates to libata via this hook that hot-unplug or
5796 * module unload event has occurred. Detach all ports. Resource
5797 * release is handled via devres.
1da177e4
LT
5798 *
5799 * LOCKING:
5800 * Inherited from PCI layer (may sleep).
5801 */
f0d36efd 5802void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4 5803{
2855568b 5804 struct device *dev = &pdev->dev;
cca3974e 5805 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 5806
b878ca5d 5807 ata_host_detach(host);
1da177e4
LT
5808}
5809
5810/* move to PCI subsystem */
057ace5e 5811int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
5812{
5813 unsigned long tmp = 0;
5814
5815 switch (bits->width) {
5816 case 1: {
5817 u8 tmp8 = 0;
5818 pci_read_config_byte(pdev, bits->reg, &tmp8);
5819 tmp = tmp8;
5820 break;
5821 }
5822 case 2: {
5823 u16 tmp16 = 0;
5824 pci_read_config_word(pdev, bits->reg, &tmp16);
5825 tmp = tmp16;
5826 break;
5827 }
5828 case 4: {
5829 u32 tmp32 = 0;
5830 pci_read_config_dword(pdev, bits->reg, &tmp32);
5831 tmp = tmp32;
5832 break;
5833 }
5834
5835 default:
5836 return -EINVAL;
5837 }
5838
5839 tmp &= bits->mask;
5840
5841 return (tmp == bits->val) ? 1 : 0;
5842}
9b847548 5843
6ffa01d8 5844#ifdef CONFIG_PM
3c5100c1 5845void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
5846{
5847 pci_save_state(pdev);
4c90d971 5848 pci_disable_device(pdev);
500530f6 5849
3a2d5b70 5850 if (mesg.event & PM_EVENT_SLEEP)
500530f6 5851 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
5852}
5853
553c4aa6 5854int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 5855{
553c4aa6
TH
5856 int rc;
5857
9b847548
JA
5858 pci_set_power_state(pdev, PCI_D0);
5859 pci_restore_state(pdev);
553c4aa6 5860
b878ca5d 5861 rc = pcim_enable_device(pdev);
553c4aa6
TH
5862 if (rc) {
5863 dev_printk(KERN_ERR, &pdev->dev,
5864 "failed to enable device after resume (%d)\n", rc);
5865 return rc;
5866 }
5867
9b847548 5868 pci_set_master(pdev);
553c4aa6 5869 return 0;
500530f6
TH
5870}
5871
3c5100c1 5872int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 5873{
cca3974e 5874 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
5875 int rc = 0;
5876
cca3974e 5877 rc = ata_host_suspend(host, mesg);
500530f6
TH
5878 if (rc)
5879 return rc;
5880
3c5100c1 5881 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
5882
5883 return 0;
5884}
5885
5886int ata_pci_device_resume(struct pci_dev *pdev)
5887{
cca3974e 5888 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 5889 int rc;
500530f6 5890
553c4aa6
TH
5891 rc = ata_pci_device_do_resume(pdev);
5892 if (rc == 0)
5893 ata_host_resume(host);
5894 return rc;
9b847548 5895}
6ffa01d8
TH
5896#endif /* CONFIG_PM */
5897
1da177e4
LT
5898#endif /* CONFIG_PCI */
5899
33267325
TH
5900static int __init ata_parse_force_one(char **cur,
5901 struct ata_force_ent *force_ent,
5902 const char **reason)
5903{
5904 /* FIXME: Currently, there's no way to tag init const data and
5905 * using __initdata causes build failure on some versions of
5906 * gcc. Once __initdataconst is implemented, add const to the
5907 * following structure.
5908 */
5909 static struct ata_force_param force_tbl[] __initdata = {
5910 { "40c", .cbl = ATA_CBL_PATA40 },
5911 { "80c", .cbl = ATA_CBL_PATA80 },
5912 { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
5913 { "unk", .cbl = ATA_CBL_PATA_UNK },
5914 { "ign", .cbl = ATA_CBL_PATA_IGN },
5915 { "sata", .cbl = ATA_CBL_SATA },
5916 { "1.5Gbps", .spd_limit = 1 },
5917 { "3.0Gbps", .spd_limit = 2 },
5918 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
5919 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
5920 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
5921 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
5922 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
5923 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
5924 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
5925 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
5926 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
5927 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
5928 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
5929 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
5930 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
5931 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
5932 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
5933 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
5934 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
5935 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
5936 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
5937 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
5938 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
5939 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
5940 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
5941 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
5942 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
5943 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
5944 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
5945 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
5946 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
5947 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
5948 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
5949 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
5950 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
5951 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
5952 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
5953 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
5954 };
5955 char *start = *cur, *p = *cur;
5956 char *id, *val, *endp;
5957 const struct ata_force_param *match_fp = NULL;
5958 int nr_matches = 0, i;
5959
5960 /* find where this param ends and update *cur */
5961 while (*p != '\0' && *p != ',')
5962 p++;
5963
5964 if (*p == '\0')
5965 *cur = p;
5966 else
5967 *cur = p + 1;
5968
5969 *p = '\0';
5970
5971 /* parse */
5972 p = strchr(start, ':');
5973 if (!p) {
5974 val = strstrip(start);
5975 goto parse_val;
5976 }
5977 *p = '\0';
5978
5979 id = strstrip(start);
5980 val = strstrip(p + 1);
5981
5982 /* parse id */
5983 p = strchr(id, '.');
5984 if (p) {
5985 *p++ = '\0';
5986 force_ent->device = simple_strtoul(p, &endp, 10);
5987 if (p == endp || *endp != '\0') {
5988 *reason = "invalid device";
5989 return -EINVAL;
5990 }
5991 }
5992
5993 force_ent->port = simple_strtoul(id, &endp, 10);
5994 if (p == endp || *endp != '\0') {
5995 *reason = "invalid port/link";
5996 return -EINVAL;
5997 }
5998
5999 parse_val:
6000 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6001 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6002 const struct ata_force_param *fp = &force_tbl[i];
6003
6004 if (strncasecmp(val, fp->name, strlen(val)))
6005 continue;
6006
6007 nr_matches++;
6008 match_fp = fp;
6009
6010 if (strcasecmp(val, fp->name) == 0) {
6011 nr_matches = 1;
6012 break;
6013 }
6014 }
6015
6016 if (!nr_matches) {
6017 *reason = "unknown value";
6018 return -EINVAL;
6019 }
6020 if (nr_matches > 1) {
6021 *reason = "ambigious value";
6022 return -EINVAL;
6023 }
6024
6025 force_ent->param = *match_fp;
6026
6027 return 0;
6028}
6029
6030static void __init ata_parse_force_param(void)
6031{
6032 int idx = 0, size = 1;
6033 int last_port = -1, last_device = -1;
6034 char *p, *cur, *next;
6035
6036 /* calculate maximum number of params and allocate force_tbl */
6037 for (p = ata_force_param_buf; *p; p++)
6038 if (*p == ',')
6039 size++;
6040
6041 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6042 if (!ata_force_tbl) {
6043 printk(KERN_WARNING "ata: failed to extend force table, "
6044 "libata.force ignored\n");
6045 return;
6046 }
6047
6048 /* parse and populate the table */
6049 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6050 const char *reason = "";
6051 struct ata_force_ent te = { .port = -1, .device = -1 };
6052
6053 next = cur;
6054 if (ata_parse_force_one(&next, &te, &reason)) {
6055 printk(KERN_WARNING "ata: failed to parse force "
6056 "parameter \"%s\" (%s)\n",
6057 cur, reason);
6058 continue;
6059 }
6060
6061 if (te.port == -1) {
6062 te.port = last_port;
6063 te.device = last_device;
6064 }
6065
6066 ata_force_tbl[idx++] = te;
6067
6068 last_port = te.port;
6069 last_device = te.device;
6070 }
6071
6072 ata_force_tbl_size = idx;
6073}
1da177e4 6074
1da177e4
LT
6075static int __init ata_init(void)
6076{
33267325
TH
6077 ata_parse_force_param();
6078
1da177e4
LT
6079 ata_wq = create_workqueue("ata");
6080 if (!ata_wq)
6081 return -ENOMEM;
6082
453b07ac
TH
6083 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6084 if (!ata_aux_wq) {
6085 destroy_workqueue(ata_wq);
6086 return -ENOMEM;
6087 }
6088
1da177e4
LT
6089 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6090 return 0;
6091}
6092
6093static void __exit ata_exit(void)
6094{
33267325 6095 kfree(ata_force_tbl);
1da177e4 6096 destroy_workqueue(ata_wq);
453b07ac 6097 destroy_workqueue(ata_aux_wq);
1da177e4
LT
6098}
6099
a4625085 6100subsys_initcall(ata_init);
1da177e4
LT
6101module_exit(ata_exit);
6102
67846b30 6103static unsigned long ratelimit_time;
34af946a 6104static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
6105
6106int ata_ratelimit(void)
6107{
6108 int rc;
6109 unsigned long flags;
6110
6111 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6112
6113 if (time_after(jiffies, ratelimit_time)) {
6114 rc = 1;
6115 ratelimit_time = jiffies + (HZ/5);
6116 } else
6117 rc = 0;
6118
6119 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6120
6121 return rc;
6122}
6123
c22daff4
TH
6124/**
6125 * ata_wait_register - wait until register value changes
6126 * @reg: IO-mapped register
6127 * @mask: Mask to apply to read register value
6128 * @val: Wait condition
341c2c95
TH
6129 * @interval: polling interval in milliseconds
6130 * @timeout: timeout in milliseconds
c22daff4
TH
6131 *
6132 * Waiting for some bits of register to change is a common
6133 * operation for ATA controllers. This function reads 32bit LE
6134 * IO-mapped register @reg and tests for the following condition.
6135 *
6136 * (*@reg & mask) != val
6137 *
6138 * If the condition is met, it returns; otherwise, the process is
6139 * repeated after @interval_msec until timeout.
6140 *
6141 * LOCKING:
6142 * Kernel thread context (may sleep)
6143 *
6144 * RETURNS:
6145 * The final register value.
6146 */
6147u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
341c2c95 6148 unsigned long interval, unsigned long timeout)
c22daff4 6149{
341c2c95 6150 unsigned long deadline;
c22daff4
TH
6151 u32 tmp;
6152
6153 tmp = ioread32(reg);
6154
6155 /* Calculate timeout _after_ the first read to make sure
6156 * preceding writes reach the controller before starting to
6157 * eat away the timeout.
6158 */
341c2c95 6159 deadline = ata_deadline(jiffies, timeout);
c22daff4 6160
341c2c95
TH
6161 while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6162 msleep(interval);
c22daff4
TH
6163 tmp = ioread32(reg);
6164 }
6165
6166 return tmp;
6167}
6168
dd5b06c4
TH
6169/*
6170 * Dummy port_ops
6171 */
182d7bba 6172static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
dd5b06c4 6173{
182d7bba 6174 return AC_ERR_SYSTEM;
dd5b06c4
TH
6175}
6176
182d7bba 6177static void ata_dummy_error_handler(struct ata_port *ap)
dd5b06c4 6178{
182d7bba 6179 /* truly dummy */
dd5b06c4
TH
6180}
6181
029cfd6b 6182struct ata_port_operations ata_dummy_port_ops = {
dd5b06c4
TH
6183 .qc_prep = ata_noop_qc_prep,
6184 .qc_issue = ata_dummy_qc_issue,
182d7bba 6185 .error_handler = ata_dummy_error_handler,
dd5b06c4
TH
6186};
6187
21b0ad4f
TH
6188const struct ata_port_info ata_dummy_port_info = {
6189 .port_ops = &ata_dummy_port_ops,
6190};
6191
1da177e4
LT
6192/*
6193 * libata is essentially a library of internal helper functions for
6194 * low-level ATA host controller drivers. As such, the API/ABI is
6195 * likely to change as new drivers are added and updated.
6196 * Do not depend on ABI/API stability.
6197 */
e9c83914
TH
6198EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6199EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6200EXPORT_SYMBOL_GPL(sata_deb_timing_long);
029cfd6b
TH
6201EXPORT_SYMBOL_GPL(ata_base_port_ops);
6202EXPORT_SYMBOL_GPL(sata_port_ops);
dd5b06c4 6203EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
21b0ad4f 6204EXPORT_SYMBOL_GPL(ata_dummy_port_info);
1da177e4 6205EXPORT_SYMBOL_GPL(ata_std_bios_param);
cca3974e 6206EXPORT_SYMBOL_GPL(ata_host_init);
f3187195 6207EXPORT_SYMBOL_GPL(ata_host_alloc);
f5cda257 6208EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
ecef7253 6209EXPORT_SYMBOL_GPL(ata_host_start);
f3187195 6210EXPORT_SYMBOL_GPL(ata_host_register);
f5cda257 6211EXPORT_SYMBOL_GPL(ata_host_activate);
0529c159 6212EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4 6213EXPORT_SYMBOL_GPL(ata_sg_init);
f686bcb8 6214EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 6215EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
436d34b3 6216EXPORT_SYMBOL_GPL(atapi_cmd_type);
1da177e4
LT
6217EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6218EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6357357c
TH
6219EXPORT_SYMBOL_GPL(ata_pack_xfermask);
6220EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
6221EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
6222EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
6223EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
6224EXPORT_SYMBOL_GPL(ata_mode_string);
6225EXPORT_SYMBOL_GPL(ata_id_xfermask);
1da177e4 6226EXPORT_SYMBOL_GPL(ata_port_start);
04351821 6227EXPORT_SYMBOL_GPL(ata_do_set_mode);
31cc23b3 6228EXPORT_SYMBOL_GPL(ata_std_qc_defer);
e46834cd 6229EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4 6230EXPORT_SYMBOL_GPL(ata_port_probe);
10305f0f 6231EXPORT_SYMBOL_GPL(ata_dev_disable);
3c567b7d 6232EXPORT_SYMBOL_GPL(sata_set_spd);
aa2731ad 6233EXPORT_SYMBOL_GPL(ata_wait_after_reset);
936fd732
TH
6234EXPORT_SYMBOL_GPL(sata_link_debounce);
6235EXPORT_SYMBOL_GPL(sata_link_resume);
0aa1113d 6236EXPORT_SYMBOL_GPL(ata_std_prereset);
cc0680a5 6237EXPORT_SYMBOL_GPL(sata_link_hardreset);
57c9efdf 6238EXPORT_SYMBOL_GPL(sata_std_hardreset);
203c75b8 6239EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
6240EXPORT_SYMBOL_GPL(ata_dev_classify);
6241EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 6242EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 6243EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 6244EXPORT_SYMBOL_GPL(ata_wait_register);
1da177e4
LT
6245EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6246EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 6247EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 6248EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 6249EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
34bf2170
TH
6250EXPORT_SYMBOL_GPL(sata_scr_valid);
6251EXPORT_SYMBOL_GPL(sata_scr_read);
6252EXPORT_SYMBOL_GPL(sata_scr_write);
6253EXPORT_SYMBOL_GPL(sata_scr_write_flush);
936fd732
TH
6254EXPORT_SYMBOL_GPL(ata_link_online);
6255EXPORT_SYMBOL_GPL(ata_link_offline);
6ffa01d8 6256#ifdef CONFIG_PM
cca3974e
JG
6257EXPORT_SYMBOL_GPL(ata_host_suspend);
6258EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 6259#endif /* CONFIG_PM */
6a62a04d
TH
6260EXPORT_SYMBOL_GPL(ata_id_string);
6261EXPORT_SYMBOL_GPL(ata_id_c_string);
1da177e4
LT
6262EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6263
1bc4ccff 6264EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6357357c 6265EXPORT_SYMBOL_GPL(ata_timing_find_mode);
452503f9
AC
6266EXPORT_SYMBOL_GPL(ata_timing_compute);
6267EXPORT_SYMBOL_GPL(ata_timing_merge);
a0f79b92 6268EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
452503f9 6269
1da177e4
LT
6270#ifdef CONFIG_PCI
6271EXPORT_SYMBOL_GPL(pci_test_config_bits);
1da177e4 6272EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6ffa01d8 6273#ifdef CONFIG_PM
500530f6
TH
6274EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6275EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
6276EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6277EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 6278#endif /* CONFIG_PM */
1da177e4 6279#endif /* CONFIG_PCI */
9b847548 6280
b64bbc39
TH
6281EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
6282EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
6283EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
cbcdd875
TH
6284EXPORT_SYMBOL_GPL(ata_port_desc);
6285#ifdef CONFIG_PCI
6286EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
6287#endif /* CONFIG_PCI */
7b70fc03 6288EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
dbd82616 6289EXPORT_SYMBOL_GPL(ata_link_abort);
7b70fc03 6290EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499 6291EXPORT_SYMBOL_GPL(ata_port_freeze);
7d77b247 6292EXPORT_SYMBOL_GPL(sata_async_notification);
e3180499
TH
6293EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6294EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
6295EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6296EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
10acf3b0 6297EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
022bdb07 6298EXPORT_SYMBOL_GPL(ata_do_eh);
a1efdaba 6299EXPORT_SYMBOL_GPL(ata_std_error_handler);
be0d18df
AC
6300
6301EXPORT_SYMBOL_GPL(ata_cable_40wire);
6302EXPORT_SYMBOL_GPL(ata_cable_80wire);
6303EXPORT_SYMBOL_GPL(ata_cable_unknown);
c88f90c3 6304EXPORT_SYMBOL_GPL(ata_cable_ignore);
be0d18df 6305EXPORT_SYMBOL_GPL(ata_cable_sata);