]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/ata/libata-core.c
libata: misc updates to prepare for slave link
[net-next-2.6.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
92c52c52
AC
33 * Standards documents from:
34 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
35 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
36 * http://www.sata-io.org (SATA)
37 * http://www.compactflash.org (CF)
38 * http://www.qic.org (QIC157 - Tape and DSC)
39 * http://www.ce-ata.org (CE-ATA: not supported)
40 *
1da177e4
LT
41 */
42
1da177e4
LT
43#include <linux/kernel.h>
44#include <linux/module.h>
45#include <linux/pci.h>
46#include <linux/init.h>
47#include <linux/list.h>
48#include <linux/mm.h>
1da177e4
LT
49#include <linux/spinlock.h>
50#include <linux/blkdev.h>
51#include <linux/delay.h>
52#include <linux/timer.h>
53#include <linux/interrupt.h>
54#include <linux/completion.h>
55#include <linux/suspend.h>
56#include <linux/workqueue.h>
378f058c 57#include <linux/scatterlist.h>
2dcb407e 58#include <linux/io.h>
1da177e4 59#include <scsi/scsi.h>
193515d5 60#include <scsi/scsi_cmnd.h>
1da177e4
LT
61#include <scsi/scsi_host.h>
62#include <linux/libata.h>
1da177e4 63#include <asm/byteorder.h>
140b5e59 64#include <linux/cdrom.h>
1da177e4
LT
65
66#include "libata.h"
67
fda0efc5 68
d7bb4cc7 69/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
70const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
71const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
72const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 73
029cfd6b 74const struct ata_port_operations ata_base_port_ops = {
0aa1113d 75 .prereset = ata_std_prereset,
203c75b8 76 .postreset = ata_std_postreset,
a1efdaba 77 .error_handler = ata_std_error_handler,
029cfd6b
TH
78};
79
80const struct ata_port_operations sata_port_ops = {
81 .inherits = &ata_base_port_ops,
82
83 .qc_defer = ata_std_qc_defer,
57c9efdf 84 .hardreset = sata_std_hardreset,
029cfd6b
TH
85};
86
3373efd8
TH
87static unsigned int ata_dev_init_params(struct ata_device *dev,
88 u16 heads, u16 sectors);
89static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
218f3d30
JG
90static unsigned int ata_dev_set_feature(struct ata_device *dev,
91 u8 enable, u8 feature);
3373efd8 92static void ata_dev_xfermask(struct ata_device *dev);
75683fe7 93static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
1da177e4 94
f3187195 95unsigned int ata_print_id = 1;
1da177e4
LT
96static struct workqueue_struct *ata_wq;
97
453b07ac
TH
98struct workqueue_struct *ata_aux_wq;
99
33267325
TH
100struct ata_force_param {
101 const char *name;
102 unsigned int cbl;
103 int spd_limit;
104 unsigned long xfer_mask;
105 unsigned int horkage_on;
106 unsigned int horkage_off;
05944bdf 107 unsigned int lflags;
33267325
TH
108};
109
110struct ata_force_ent {
111 int port;
112 int device;
113 struct ata_force_param param;
114};
115
116static struct ata_force_ent *ata_force_tbl;
117static int ata_force_tbl_size;
118
119static char ata_force_param_buf[PAGE_SIZE] __initdata;
7afb4222
TH
120/* param_buf is thrown away after initialization, disallow read */
121module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
33267325
TH
122MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
123
2486fa56 124static int atapi_enabled = 1;
1623c81e
JG
125module_param(atapi_enabled, int, 0444);
126MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
127
c5c61bda 128static int atapi_dmadir = 0;
95de719a
AL
129module_param(atapi_dmadir, int, 0444);
130MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
131
baf4fdfa
ML
132int atapi_passthru16 = 1;
133module_param(atapi_passthru16, int, 0444);
134MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
135
c3c013a2
JG
136int libata_fua = 0;
137module_param_named(fua, libata_fua, int, 0444);
138MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
139
2dcb407e 140static int ata_ignore_hpa;
1e999736
AC
141module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
142MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
143
b3a70601
AC
144static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
145module_param_named(dma, libata_dma_mask, int, 0444);
146MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
147
87fbc5a0 148static int ata_probe_timeout;
a8601e5f
AM
149module_param(ata_probe_timeout, int, 0444);
150MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
151
6ebe9d86 152int libata_noacpi = 0;
d7d0dad6 153module_param_named(noacpi, libata_noacpi, int, 0444);
6ebe9d86 154MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
11ef697b 155
ae8d4ee7
AC
156int libata_allow_tpm = 0;
157module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
158MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands");
159
1da177e4
LT
160MODULE_AUTHOR("Jeff Garzik");
161MODULE_DESCRIPTION("Library module for ATA devices");
162MODULE_LICENSE("GPL");
163MODULE_VERSION(DRV_VERSION);
164
0baab86b 165
aadffb68
TH
166/*
167 * Iterator helpers. Don't use directly.
168 *
169 * LOCKING:
170 * Host lock or EH context.
171 */
172struct ata_link *__ata_port_next_link(struct ata_port *ap,
173 struct ata_link *link, bool dev_only)
174{
175 /* NULL link indicates start of iteration */
176 if (!link) {
177 if (dev_only && sata_pmp_attached(ap))
178 return ap->pmp_link;
179 return &ap->link;
180 }
181
182 /* we just iterated over the host link, what's next? */
183 if (ata_is_host_link(link)) {
184 if (!sata_pmp_attached(ap))
185 return NULL;
186 return ap->pmp_link;
187 }
188
189 /* iterate to the next PMP link */
190 if (++link < ap->pmp_link + ap->nr_pmp_links)
191 return link;
192 return NULL;
193}
194
33267325
TH
195/**
196 * ata_force_cbl - force cable type according to libata.force
4cdfa1b3 197 * @ap: ATA port of interest
33267325
TH
198 *
199 * Force cable type according to libata.force and whine about it.
200 * The last entry which has matching port number is used, so it
201 * can be specified as part of device force parameters. For
202 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
203 * same effect.
204 *
205 * LOCKING:
206 * EH context.
207 */
208void ata_force_cbl(struct ata_port *ap)
209{
210 int i;
211
212 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
213 const struct ata_force_ent *fe = &ata_force_tbl[i];
214
215 if (fe->port != -1 && fe->port != ap->print_id)
216 continue;
217
218 if (fe->param.cbl == ATA_CBL_NONE)
219 continue;
220
221 ap->cbl = fe->param.cbl;
222 ata_port_printk(ap, KERN_NOTICE,
223 "FORCE: cable set to %s\n", fe->param.name);
224 return;
225 }
226}
227
228/**
05944bdf 229 * ata_force_link_limits - force link limits according to libata.force
33267325
TH
230 * @link: ATA link of interest
231 *
05944bdf
TH
232 * Force link flags and SATA spd limit according to libata.force
233 * and whine about it. When only the port part is specified
234 * (e.g. 1:), the limit applies to all links connected to both
235 * the host link and all fan-out ports connected via PMP. If the
236 * device part is specified as 0 (e.g. 1.00:), it specifies the
237 * first fan-out link not the host link. Device number 15 always
238 * points to the host link whether PMP is attached or not.
33267325
TH
239 *
240 * LOCKING:
241 * EH context.
242 */
05944bdf 243static void ata_force_link_limits(struct ata_link *link)
33267325 244{
05944bdf 245 bool did_spd = false;
33267325
TH
246 int linkno, i;
247
248 if (ata_is_host_link(link))
249 linkno = 15;
250 else
251 linkno = link->pmp;
252
253 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
254 const struct ata_force_ent *fe = &ata_force_tbl[i];
255
256 if (fe->port != -1 && fe->port != link->ap->print_id)
257 continue;
258
259 if (fe->device != -1 && fe->device != linkno)
260 continue;
261
05944bdf
TH
262 /* only honor the first spd limit */
263 if (!did_spd && fe->param.spd_limit) {
264 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
265 ata_link_printk(link, KERN_NOTICE,
266 "FORCE: PHY spd limit set to %s\n",
267 fe->param.name);
268 did_spd = true;
269 }
33267325 270
05944bdf
TH
271 /* let lflags stack */
272 if (fe->param.lflags) {
273 link->flags |= fe->param.lflags;
274 ata_link_printk(link, KERN_NOTICE,
275 "FORCE: link flag 0x%x forced -> 0x%x\n",
276 fe->param.lflags, link->flags);
277 }
33267325
TH
278 }
279}
280
281/**
282 * ata_force_xfermask - force xfermask according to libata.force
283 * @dev: ATA device of interest
284 *
285 * Force xfer_mask according to libata.force and whine about it.
286 * For consistency with link selection, device number 15 selects
287 * the first device connected to the host link.
288 *
289 * LOCKING:
290 * EH context.
291 */
292static void ata_force_xfermask(struct ata_device *dev)
293{
294 int devno = dev->link->pmp + dev->devno;
295 int alt_devno = devno;
296 int i;
297
298 /* allow n.15 for the first device attached to host port */
299 if (ata_is_host_link(dev->link) && devno == 0)
300 alt_devno = 15;
301
302 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
303 const struct ata_force_ent *fe = &ata_force_tbl[i];
304 unsigned long pio_mask, mwdma_mask, udma_mask;
305
306 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
307 continue;
308
309 if (fe->device != -1 && fe->device != devno &&
310 fe->device != alt_devno)
311 continue;
312
313 if (!fe->param.xfer_mask)
314 continue;
315
316 ata_unpack_xfermask(fe->param.xfer_mask,
317 &pio_mask, &mwdma_mask, &udma_mask);
318 if (udma_mask)
319 dev->udma_mask = udma_mask;
320 else if (mwdma_mask) {
321 dev->udma_mask = 0;
322 dev->mwdma_mask = mwdma_mask;
323 } else {
324 dev->udma_mask = 0;
325 dev->mwdma_mask = 0;
326 dev->pio_mask = pio_mask;
327 }
328
329 ata_dev_printk(dev, KERN_NOTICE,
330 "FORCE: xfer_mask set to %s\n", fe->param.name);
331 return;
332 }
333}
334
335/**
336 * ata_force_horkage - force horkage according to libata.force
337 * @dev: ATA device of interest
338 *
339 * Force horkage according to libata.force and whine about it.
340 * For consistency with link selection, device number 15 selects
341 * the first device connected to the host link.
342 *
343 * LOCKING:
344 * EH context.
345 */
346static void ata_force_horkage(struct ata_device *dev)
347{
348 int devno = dev->link->pmp + dev->devno;
349 int alt_devno = devno;
350 int i;
351
352 /* allow n.15 for the first device attached to host port */
353 if (ata_is_host_link(dev->link) && devno == 0)
354 alt_devno = 15;
355
356 for (i = 0; i < ata_force_tbl_size; i++) {
357 const struct ata_force_ent *fe = &ata_force_tbl[i];
358
359 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
360 continue;
361
362 if (fe->device != -1 && fe->device != devno &&
363 fe->device != alt_devno)
364 continue;
365
366 if (!(~dev->horkage & fe->param.horkage_on) &&
367 !(dev->horkage & fe->param.horkage_off))
368 continue;
369
370 dev->horkage |= fe->param.horkage_on;
371 dev->horkage &= ~fe->param.horkage_off;
372
373 ata_dev_printk(dev, KERN_NOTICE,
374 "FORCE: horkage modified (%s)\n", fe->param.name);
375 }
376}
377
436d34b3
TH
378/**
379 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode
380 * @opcode: SCSI opcode
381 *
382 * Determine ATAPI command type from @opcode.
383 *
384 * LOCKING:
385 * None.
386 *
387 * RETURNS:
388 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
389 */
390int atapi_cmd_type(u8 opcode)
391{
392 switch (opcode) {
393 case GPCMD_READ_10:
394 case GPCMD_READ_12:
395 return ATAPI_READ;
396
397 case GPCMD_WRITE_10:
398 case GPCMD_WRITE_12:
399 case GPCMD_WRITE_AND_VERIFY_10:
400 return ATAPI_WRITE;
401
402 case GPCMD_READ_CD:
403 case GPCMD_READ_CD_MSF:
404 return ATAPI_READ_CD;
405
e52dcc48
TH
406 case ATA_16:
407 case ATA_12:
408 if (atapi_passthru16)
409 return ATAPI_PASS_THRU;
410 /* fall thru */
436d34b3
TH
411 default:
412 return ATAPI_MISC;
413 }
414}
415
1da177e4
LT
416/**
417 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
418 * @tf: Taskfile to convert
1da177e4 419 * @pmp: Port multiplier port
9977126c
TH
420 * @is_cmd: This FIS is for command
421 * @fis: Buffer into which data will output
1da177e4
LT
422 *
423 * Converts a standard ATA taskfile to a Serial ATA
424 * FIS structure (Register - Host to Device).
425 *
426 * LOCKING:
427 * Inherited from caller.
428 */
9977126c 429void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
1da177e4 430{
9977126c
TH
431 fis[0] = 0x27; /* Register - Host to Device FIS */
432 fis[1] = pmp & 0xf; /* Port multiplier number*/
433 if (is_cmd)
434 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
435
1da177e4
LT
436 fis[2] = tf->command;
437 fis[3] = tf->feature;
438
439 fis[4] = tf->lbal;
440 fis[5] = tf->lbam;
441 fis[6] = tf->lbah;
442 fis[7] = tf->device;
443
444 fis[8] = tf->hob_lbal;
445 fis[9] = tf->hob_lbam;
446 fis[10] = tf->hob_lbah;
447 fis[11] = tf->hob_feature;
448
449 fis[12] = tf->nsect;
450 fis[13] = tf->hob_nsect;
451 fis[14] = 0;
452 fis[15] = tf->ctl;
453
454 fis[16] = 0;
455 fis[17] = 0;
456 fis[18] = 0;
457 fis[19] = 0;
458}
459
460/**
461 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
462 * @fis: Buffer from which data will be input
463 * @tf: Taskfile to output
464 *
e12a1be6 465 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
466 *
467 * LOCKING:
468 * Inherited from caller.
469 */
470
057ace5e 471void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
472{
473 tf->command = fis[2]; /* status */
474 tf->feature = fis[3]; /* error */
475
476 tf->lbal = fis[4];
477 tf->lbam = fis[5];
478 tf->lbah = fis[6];
479 tf->device = fis[7];
480
481 tf->hob_lbal = fis[8];
482 tf->hob_lbam = fis[9];
483 tf->hob_lbah = fis[10];
484
485 tf->nsect = fis[12];
486 tf->hob_nsect = fis[13];
487}
488
8cbd6df1
AL
489static const u8 ata_rw_cmds[] = {
490 /* pio multi */
491 ATA_CMD_READ_MULTI,
492 ATA_CMD_WRITE_MULTI,
493 ATA_CMD_READ_MULTI_EXT,
494 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
495 0,
496 0,
497 0,
498 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
499 /* pio */
500 ATA_CMD_PIO_READ,
501 ATA_CMD_PIO_WRITE,
502 ATA_CMD_PIO_READ_EXT,
503 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
504 0,
505 0,
506 0,
507 0,
8cbd6df1
AL
508 /* dma */
509 ATA_CMD_READ,
510 ATA_CMD_WRITE,
511 ATA_CMD_READ_EXT,
9a3dccc4
TH
512 ATA_CMD_WRITE_EXT,
513 0,
514 0,
515 0,
516 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 517};
1da177e4
LT
518
519/**
8cbd6df1 520 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
521 * @tf: command to examine and configure
522 * @dev: device tf belongs to
1da177e4 523 *
2e9edbf8 524 * Examine the device configuration and tf->flags to calculate
8cbd6df1 525 * the proper read/write commands and protocol to use.
1da177e4
LT
526 *
527 * LOCKING:
528 * caller.
529 */
bd056d7e 530static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 531{
9a3dccc4 532 u8 cmd;
1da177e4 533
9a3dccc4 534 int index, fua, lba48, write;
2e9edbf8 535
9a3dccc4 536 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
537 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
538 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 539
8cbd6df1
AL
540 if (dev->flags & ATA_DFLAG_PIO) {
541 tf->protocol = ATA_PROT_PIO;
9a3dccc4 542 index = dev->multi_count ? 0 : 8;
9af5c9c9 543 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
544 /* Unable to use DMA due to host limitation */
545 tf->protocol = ATA_PROT_PIO;
0565c26d 546 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
547 } else {
548 tf->protocol = ATA_PROT_DMA;
9a3dccc4 549 index = 16;
8cbd6df1 550 }
1da177e4 551
9a3dccc4
TH
552 cmd = ata_rw_cmds[index + fua + lba48 + write];
553 if (cmd) {
554 tf->command = cmd;
555 return 0;
556 }
557 return -1;
1da177e4
LT
558}
559
35b649fe
TH
560/**
561 * ata_tf_read_block - Read block address from ATA taskfile
562 * @tf: ATA taskfile of interest
563 * @dev: ATA device @tf belongs to
564 *
565 * LOCKING:
566 * None.
567 *
568 * Read block address from @tf. This function can handle all
569 * three address formats - LBA, LBA48 and CHS. tf->protocol and
570 * flags select the address format to use.
571 *
572 * RETURNS:
573 * Block address read from @tf.
574 */
575u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
576{
577 u64 block = 0;
578
579 if (tf->flags & ATA_TFLAG_LBA) {
580 if (tf->flags & ATA_TFLAG_LBA48) {
581 block |= (u64)tf->hob_lbah << 40;
582 block |= (u64)tf->hob_lbam << 32;
583 block |= tf->hob_lbal << 24;
584 } else
585 block |= (tf->device & 0xf) << 24;
586
587 block |= tf->lbah << 16;
588 block |= tf->lbam << 8;
589 block |= tf->lbal;
590 } else {
591 u32 cyl, head, sect;
592
593 cyl = tf->lbam | (tf->lbah << 8);
594 head = tf->device & 0xf;
595 sect = tf->lbal;
596
597 block = (cyl * dev->heads + head) * dev->sectors + sect;
598 }
599
600 return block;
601}
602
bd056d7e
TH
603/**
604 * ata_build_rw_tf - Build ATA taskfile for given read/write request
605 * @tf: Target ATA taskfile
606 * @dev: ATA device @tf belongs to
607 * @block: Block address
608 * @n_block: Number of blocks
609 * @tf_flags: RW/FUA etc...
610 * @tag: tag
611 *
612 * LOCKING:
613 * None.
614 *
615 * Build ATA taskfile @tf for read/write request described by
616 * @block, @n_block, @tf_flags and @tag on @dev.
617 *
618 * RETURNS:
619 *
620 * 0 on success, -ERANGE if the request is too large for @dev,
621 * -EINVAL if the request is invalid.
622 */
623int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
624 u64 block, u32 n_block, unsigned int tf_flags,
625 unsigned int tag)
626{
627 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
628 tf->flags |= tf_flags;
629
6d1245bf 630 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
631 /* yay, NCQ */
632 if (!lba_48_ok(block, n_block))
633 return -ERANGE;
634
635 tf->protocol = ATA_PROT_NCQ;
636 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
637
638 if (tf->flags & ATA_TFLAG_WRITE)
639 tf->command = ATA_CMD_FPDMA_WRITE;
640 else
641 tf->command = ATA_CMD_FPDMA_READ;
642
643 tf->nsect = tag << 3;
644 tf->hob_feature = (n_block >> 8) & 0xff;
645 tf->feature = n_block & 0xff;
646
647 tf->hob_lbah = (block >> 40) & 0xff;
648 tf->hob_lbam = (block >> 32) & 0xff;
649 tf->hob_lbal = (block >> 24) & 0xff;
650 tf->lbah = (block >> 16) & 0xff;
651 tf->lbam = (block >> 8) & 0xff;
652 tf->lbal = block & 0xff;
653
654 tf->device = 1 << 6;
655 if (tf->flags & ATA_TFLAG_FUA)
656 tf->device |= 1 << 7;
657 } else if (dev->flags & ATA_DFLAG_LBA) {
658 tf->flags |= ATA_TFLAG_LBA;
659
660 if (lba_28_ok(block, n_block)) {
661 /* use LBA28 */
662 tf->device |= (block >> 24) & 0xf;
663 } else if (lba_48_ok(block, n_block)) {
664 if (!(dev->flags & ATA_DFLAG_LBA48))
665 return -ERANGE;
666
667 /* use LBA48 */
668 tf->flags |= ATA_TFLAG_LBA48;
669
670 tf->hob_nsect = (n_block >> 8) & 0xff;
671
672 tf->hob_lbah = (block >> 40) & 0xff;
673 tf->hob_lbam = (block >> 32) & 0xff;
674 tf->hob_lbal = (block >> 24) & 0xff;
675 } else
676 /* request too large even for LBA48 */
677 return -ERANGE;
678
679 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
680 return -EINVAL;
681
682 tf->nsect = n_block & 0xff;
683
684 tf->lbah = (block >> 16) & 0xff;
685 tf->lbam = (block >> 8) & 0xff;
686 tf->lbal = block & 0xff;
687
688 tf->device |= ATA_LBA;
689 } else {
690 /* CHS */
691 u32 sect, head, cyl, track;
692
693 /* The request -may- be too large for CHS addressing. */
694 if (!lba_28_ok(block, n_block))
695 return -ERANGE;
696
697 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
698 return -EINVAL;
699
700 /* Convert LBA to CHS */
701 track = (u32)block / dev->sectors;
702 cyl = track / dev->heads;
703 head = track % dev->heads;
704 sect = (u32)block % dev->sectors + 1;
705
706 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
707 (u32)block, track, cyl, head, sect);
708
709 /* Check whether the converted CHS can fit.
710 Cylinder: 0-65535
711 Head: 0-15
712 Sector: 1-255*/
713 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
714 return -ERANGE;
715
716 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
717 tf->lbal = sect;
718 tf->lbam = cyl;
719 tf->lbah = cyl >> 8;
720 tf->device |= head;
721 }
722
723 return 0;
724}
725
cb95d562
TH
726/**
727 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
728 * @pio_mask: pio_mask
729 * @mwdma_mask: mwdma_mask
730 * @udma_mask: udma_mask
731 *
732 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
733 * unsigned int xfer_mask.
734 *
735 * LOCKING:
736 * None.
737 *
738 * RETURNS:
739 * Packed xfer_mask.
740 */
7dc951ae
TH
741unsigned long ata_pack_xfermask(unsigned long pio_mask,
742 unsigned long mwdma_mask,
743 unsigned long udma_mask)
cb95d562
TH
744{
745 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
746 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
747 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
748}
749
c0489e4e
TH
750/**
751 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
752 * @xfer_mask: xfer_mask to unpack
753 * @pio_mask: resulting pio_mask
754 * @mwdma_mask: resulting mwdma_mask
755 * @udma_mask: resulting udma_mask
756 *
757 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
758 * Any NULL distination masks will be ignored.
759 */
7dc951ae
TH
760void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
761 unsigned long *mwdma_mask, unsigned long *udma_mask)
c0489e4e
TH
762{
763 if (pio_mask)
764 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
765 if (mwdma_mask)
766 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
767 if (udma_mask)
768 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
769}
770
cb95d562 771static const struct ata_xfer_ent {
be9a50c8 772 int shift, bits;
cb95d562
TH
773 u8 base;
774} ata_xfer_tbl[] = {
70cd071e
TH
775 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
776 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
777 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
cb95d562
TH
778 { -1, },
779};
780
781/**
782 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
783 * @xfer_mask: xfer_mask of interest
784 *
785 * Return matching XFER_* value for @xfer_mask. Only the highest
786 * bit of @xfer_mask is considered.
787 *
788 * LOCKING:
789 * None.
790 *
791 * RETURNS:
70cd071e 792 * Matching XFER_* value, 0xff if no match found.
cb95d562 793 */
7dc951ae 794u8 ata_xfer_mask2mode(unsigned long xfer_mask)
cb95d562
TH
795{
796 int highbit = fls(xfer_mask) - 1;
797 const struct ata_xfer_ent *ent;
798
799 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
800 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
801 return ent->base + highbit - ent->shift;
70cd071e 802 return 0xff;
cb95d562
TH
803}
804
805/**
806 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
807 * @xfer_mode: XFER_* of interest
808 *
809 * Return matching xfer_mask for @xfer_mode.
810 *
811 * LOCKING:
812 * None.
813 *
814 * RETURNS:
815 * Matching xfer_mask, 0 if no match found.
816 */
7dc951ae 817unsigned long ata_xfer_mode2mask(u8 xfer_mode)
cb95d562
TH
818{
819 const struct ata_xfer_ent *ent;
820
821 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
822 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
70cd071e
TH
823 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
824 & ~((1 << ent->shift) - 1);
cb95d562
TH
825 return 0;
826}
827
828/**
829 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
830 * @xfer_mode: XFER_* of interest
831 *
832 * Return matching xfer_shift for @xfer_mode.
833 *
834 * LOCKING:
835 * None.
836 *
837 * RETURNS:
838 * Matching xfer_shift, -1 if no match found.
839 */
7dc951ae 840int ata_xfer_mode2shift(unsigned long xfer_mode)
cb95d562
TH
841{
842 const struct ata_xfer_ent *ent;
843
844 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
845 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
846 return ent->shift;
847 return -1;
848}
849
1da177e4 850/**
1da7b0d0
TH
851 * ata_mode_string - convert xfer_mask to string
852 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
853 *
854 * Determine string which represents the highest speed
1da7b0d0 855 * (highest bit in @modemask).
1da177e4
LT
856 *
857 * LOCKING:
858 * None.
859 *
860 * RETURNS:
861 * Constant C string representing highest speed listed in
1da7b0d0 862 * @mode_mask, or the constant C string "<n/a>".
1da177e4 863 */
7dc951ae 864const char *ata_mode_string(unsigned long xfer_mask)
1da177e4 865{
75f554bc
TH
866 static const char * const xfer_mode_str[] = {
867 "PIO0",
868 "PIO1",
869 "PIO2",
870 "PIO3",
871 "PIO4",
b352e57d
AC
872 "PIO5",
873 "PIO6",
75f554bc
TH
874 "MWDMA0",
875 "MWDMA1",
876 "MWDMA2",
b352e57d
AC
877 "MWDMA3",
878 "MWDMA4",
75f554bc
TH
879 "UDMA/16",
880 "UDMA/25",
881 "UDMA/33",
882 "UDMA/44",
883 "UDMA/66",
884 "UDMA/100",
885 "UDMA/133",
886 "UDMA7",
887 };
1da7b0d0 888 int highbit;
1da177e4 889
1da7b0d0
TH
890 highbit = fls(xfer_mask) - 1;
891 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
892 return xfer_mode_str[highbit];
1da177e4 893 return "<n/a>";
1da177e4
LT
894}
895
4c360c81
TH
896static const char *sata_spd_string(unsigned int spd)
897{
898 static const char * const spd_str[] = {
899 "1.5 Gbps",
900 "3.0 Gbps",
901 };
902
903 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
904 return "<unknown>";
905 return spd_str[spd - 1];
906}
907
3373efd8 908void ata_dev_disable(struct ata_device *dev)
0b8efb0a 909{
09d7f9b0 910 if (ata_dev_enabled(dev)) {
9af5c9c9 911 if (ata_msg_drv(dev->link->ap))
09d7f9b0 912 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
562f0c2d 913 ata_acpi_on_disable(dev);
4ae72a1e
TH
914 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
915 ATA_DNXFER_QUIET);
0b8efb0a
TH
916 dev->class++;
917 }
918}
919
ca77329f
KCA
920static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
921{
922 struct ata_link *link = dev->link;
923 struct ata_port *ap = link->ap;
924 u32 scontrol;
925 unsigned int err_mask;
926 int rc;
927
928 /*
929 * disallow DIPM for drivers which haven't set
930 * ATA_FLAG_IPM. This is because when DIPM is enabled,
931 * phy ready will be set in the interrupt status on
932 * state changes, which will cause some drivers to
933 * think there are errors - additionally drivers will
934 * need to disable hot plug.
935 */
936 if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
937 ap->pm_policy = NOT_AVAILABLE;
938 return -EINVAL;
939 }
940
941 /*
942 * For DIPM, we will only enable it for the
943 * min_power setting.
944 *
945 * Why? Because Disks are too stupid to know that
946 * If the host rejects a request to go to SLUMBER
947 * they should retry at PARTIAL, and instead it
948 * just would give up. So, for medium_power to
949 * work at all, we need to only allow HIPM.
950 */
951 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
952 if (rc)
953 return rc;
954
955 switch (policy) {
956 case MIN_POWER:
957 /* no restrictions on IPM transitions */
958 scontrol &= ~(0x3 << 8);
959 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
960 if (rc)
961 return rc;
962
963 /* enable DIPM */
964 if (dev->flags & ATA_DFLAG_DIPM)
965 err_mask = ata_dev_set_feature(dev,
966 SETFEATURES_SATA_ENABLE, SATA_DIPM);
967 break;
968 case MEDIUM_POWER:
969 /* allow IPM to PARTIAL */
970 scontrol &= ~(0x1 << 8);
971 scontrol |= (0x2 << 8);
972 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
973 if (rc)
974 return rc;
975
f5456b63
KCA
976 /*
977 * we don't have to disable DIPM since IPM flags
978 * disallow transitions to SLUMBER, which effectively
979 * disable DIPM if it does not support PARTIAL
980 */
ca77329f
KCA
981 break;
982 case NOT_AVAILABLE:
983 case MAX_PERFORMANCE:
984 /* disable all IPM transitions */
985 scontrol |= (0x3 << 8);
986 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
987 if (rc)
988 return rc;
989
f5456b63
KCA
990 /*
991 * we don't have to disable DIPM since IPM flags
992 * disallow all transitions which effectively
993 * disable DIPM anyway.
994 */
ca77329f
KCA
995 break;
996 }
997
998 /* FIXME: handle SET FEATURES failure */
999 (void) err_mask;
1000
1001 return 0;
1002}
1003
1004/**
1005 * ata_dev_enable_pm - enable SATA interface power management
48166fd9
SH
1006 * @dev: device to enable power management
1007 * @policy: the link power management policy
ca77329f
KCA
1008 *
1009 * Enable SATA Interface power management. This will enable
1010 * Device Interface Power Management (DIPM) for min_power
1011 * policy, and then call driver specific callbacks for
1012 * enabling Host Initiated Power management.
1013 *
1014 * Locking: Caller.
1015 * Returns: -EINVAL if IPM is not supported, 0 otherwise.
1016 */
1017void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
1018{
1019 int rc = 0;
1020 struct ata_port *ap = dev->link->ap;
1021
1022 /* set HIPM first, then DIPM */
1023 if (ap->ops->enable_pm)
1024 rc = ap->ops->enable_pm(ap, policy);
1025 if (rc)
1026 goto enable_pm_out;
1027 rc = ata_dev_set_dipm(dev, policy);
1028
1029enable_pm_out:
1030 if (rc)
1031 ap->pm_policy = MAX_PERFORMANCE;
1032 else
1033 ap->pm_policy = policy;
1034 return /* rc */; /* hopefully we can use 'rc' eventually */
1035}
1036
1992a5ed 1037#ifdef CONFIG_PM
ca77329f
KCA
1038/**
1039 * ata_dev_disable_pm - disable SATA interface power management
48166fd9 1040 * @dev: device to disable power management
ca77329f
KCA
1041 *
1042 * Disable SATA Interface power management. This will disable
1043 * Device Interface Power Management (DIPM) without changing
1044 * policy, call driver specific callbacks for disabling Host
1045 * Initiated Power management.
1046 *
1047 * Locking: Caller.
1048 * Returns: void
1049 */
1050static void ata_dev_disable_pm(struct ata_device *dev)
1051{
1052 struct ata_port *ap = dev->link->ap;
1053
1054 ata_dev_set_dipm(dev, MAX_PERFORMANCE);
1055 if (ap->ops->disable_pm)
1056 ap->ops->disable_pm(ap);
1057}
1992a5ed 1058#endif /* CONFIG_PM */
ca77329f
KCA
1059
1060void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
1061{
1062 ap->pm_policy = policy;
3ec25ebd 1063 ap->link.eh_info.action |= ATA_EH_LPM;
ca77329f
KCA
1064 ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
1065 ata_port_schedule_eh(ap);
1066}
1067
1992a5ed 1068#ifdef CONFIG_PM
ca77329f
KCA
1069static void ata_lpm_enable(struct ata_host *host)
1070{
1071 struct ata_link *link;
1072 struct ata_port *ap;
1073 struct ata_device *dev;
1074 int i;
1075
1076 for (i = 0; i < host->n_ports; i++) {
1077 ap = host->ports[i];
1078 ata_port_for_each_link(link, ap) {
1079 ata_link_for_each_dev(dev, link)
1080 ata_dev_disable_pm(dev);
1081 }
1082 }
1083}
1084
1085static void ata_lpm_disable(struct ata_host *host)
1086{
1087 int i;
1088
1089 for (i = 0; i < host->n_ports; i++) {
1090 struct ata_port *ap = host->ports[i];
1091 ata_lpm_schedule(ap, ap->pm_policy);
1092 }
1093}
1992a5ed 1094#endif /* CONFIG_PM */
ca77329f 1095
1da177e4
LT
1096/**
1097 * ata_dev_classify - determine device type based on ATA-spec signature
1098 * @tf: ATA taskfile register set for device to be identified
1099 *
1100 * Determine from taskfile register contents whether a device is
1101 * ATA or ATAPI, as per "Signature and persistence" section
1102 * of ATA/PI spec (volume 1, sect 5.14).
1103 *
1104 * LOCKING:
1105 * None.
1106 *
1107 * RETURNS:
633273a3
TH
1108 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
1109 * %ATA_DEV_UNKNOWN the event of failure.
1da177e4 1110 */
057ace5e 1111unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
1112{
1113 /* Apple's open source Darwin code hints that some devices only
1114 * put a proper signature into the LBA mid/high registers,
1115 * So, we only check those. It's sufficient for uniqueness.
633273a3
TH
1116 *
1117 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1118 * signatures for ATA and ATAPI devices attached on SerialATA,
1119 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
1120 * spec has never mentioned about using different signatures
1121 * for ATA/ATAPI devices. Then, Serial ATA II: Port
1122 * Multiplier specification began to use 0x69/0x96 to identify
1123 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1124 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1125 * 0x69/0x96 shortly and described them as reserved for
1126 * SerialATA.
1127 *
1128 * We follow the current spec and consider that 0x69/0x96
1129 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1da177e4 1130 */
633273a3 1131 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1da177e4
LT
1132 DPRINTK("found ATA device by sig\n");
1133 return ATA_DEV_ATA;
1134 }
1135
633273a3 1136 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1da177e4
LT
1137 DPRINTK("found ATAPI device by sig\n");
1138 return ATA_DEV_ATAPI;
1139 }
1140
633273a3
TH
1141 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1142 DPRINTK("found PMP device by sig\n");
1143 return ATA_DEV_PMP;
1144 }
1145
1146 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
2dcb407e 1147 printk(KERN_INFO "ata: SEMB device ignored\n");
633273a3
TH
1148 return ATA_DEV_SEMB_UNSUP; /* not yet */
1149 }
1150
1da177e4
LT
1151 DPRINTK("unknown device\n");
1152 return ATA_DEV_UNKNOWN;
1153}
1154
1da177e4 1155/**
6a62a04d 1156 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
1157 * @id: IDENTIFY DEVICE results we will examine
1158 * @s: string into which data is output
1159 * @ofs: offset into identify device page
1160 * @len: length of string to return. must be an even number.
1161 *
1162 * The strings in the IDENTIFY DEVICE page are broken up into
1163 * 16-bit chunks. Run through the string, and output each
1164 * 8-bit chunk linearly, regardless of platform.
1165 *
1166 * LOCKING:
1167 * caller.
1168 */
1169
6a62a04d
TH
1170void ata_id_string(const u16 *id, unsigned char *s,
1171 unsigned int ofs, unsigned int len)
1da177e4
LT
1172{
1173 unsigned int c;
1174
963e4975
AC
1175 BUG_ON(len & 1);
1176
1da177e4
LT
1177 while (len > 0) {
1178 c = id[ofs] >> 8;
1179 *s = c;
1180 s++;
1181
1182 c = id[ofs] & 0xff;
1183 *s = c;
1184 s++;
1185
1186 ofs++;
1187 len -= 2;
1188 }
1189}
1190
0e949ff3 1191/**
6a62a04d 1192 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
1193 * @id: IDENTIFY DEVICE results we will examine
1194 * @s: string into which data is output
1195 * @ofs: offset into identify device page
1196 * @len: length of string to return. must be an odd number.
1197 *
6a62a04d 1198 * This function is identical to ata_id_string except that it
0e949ff3
TH
1199 * trims trailing spaces and terminates the resulting string with
1200 * null. @len must be actual maximum length (even number) + 1.
1201 *
1202 * LOCKING:
1203 * caller.
1204 */
6a62a04d
TH
1205void ata_id_c_string(const u16 *id, unsigned char *s,
1206 unsigned int ofs, unsigned int len)
0e949ff3
TH
1207{
1208 unsigned char *p;
1209
6a62a04d 1210 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
1211
1212 p = s + strnlen(s, len - 1);
1213 while (p > s && p[-1] == ' ')
1214 p--;
1215 *p = '\0';
1216}
0baab86b 1217
db6f8759
TH
1218static u64 ata_id_n_sectors(const u16 *id)
1219{
1220 if (ata_id_has_lba(id)) {
1221 if (ata_id_has_lba48(id))
1222 return ata_id_u64(id, 100);
1223 else
1224 return ata_id_u32(id, 60);
1225 } else {
1226 if (ata_id_current_chs_valid(id))
1227 return ata_id_u32(id, 57);
1228 else
1229 return id[1] * id[3] * id[6];
1230 }
1231}
1232
a5987e0a 1233u64 ata_tf_to_lba48(const struct ata_taskfile *tf)
1e999736
AC
1234{
1235 u64 sectors = 0;
1236
1237 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1238 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1239 sectors |= (tf->hob_lbal & 0xff) << 24;
1240 sectors |= (tf->lbah & 0xff) << 16;
1241 sectors |= (tf->lbam & 0xff) << 8;
1242 sectors |= (tf->lbal & 0xff);
1243
a5987e0a 1244 return sectors;
1e999736
AC
1245}
1246
a5987e0a 1247u64 ata_tf_to_lba(const struct ata_taskfile *tf)
1e999736
AC
1248{
1249 u64 sectors = 0;
1250
1251 sectors |= (tf->device & 0x0f) << 24;
1252 sectors |= (tf->lbah & 0xff) << 16;
1253 sectors |= (tf->lbam & 0xff) << 8;
1254 sectors |= (tf->lbal & 0xff);
1255
a5987e0a 1256 return sectors;
1e999736
AC
1257}
1258
1259/**
c728a914
TH
1260 * ata_read_native_max_address - Read native max address
1261 * @dev: target device
1262 * @max_sectors: out parameter for the result native max address
1e999736 1263 *
c728a914
TH
1264 * Perform an LBA48 or LBA28 native size query upon the device in
1265 * question.
1e999736 1266 *
c728a914
TH
1267 * RETURNS:
1268 * 0 on success, -EACCES if command is aborted by the drive.
1269 * -EIO on other errors.
1e999736 1270 */
c728a914 1271static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1e999736 1272{
c728a914 1273 unsigned int err_mask;
1e999736 1274 struct ata_taskfile tf;
c728a914 1275 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1276
1277 ata_tf_init(dev, &tf);
1278
c728a914 1279 /* always clear all address registers */
1e999736 1280 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1e999736 1281
c728a914
TH
1282 if (lba48) {
1283 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1284 tf.flags |= ATA_TFLAG_LBA48;
1285 } else
1286 tf.command = ATA_CMD_READ_NATIVE_MAX;
1e999736 1287
1e999736 1288 tf.protocol |= ATA_PROT_NODATA;
c728a914
TH
1289 tf.device |= ATA_LBA;
1290
2b789108 1291 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914
TH
1292 if (err_mask) {
1293 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1294 "max address (err_mask=0x%x)\n", err_mask);
1295 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1296 return -EACCES;
1297 return -EIO;
1298 }
1e999736 1299
c728a914 1300 if (lba48)
a5987e0a 1301 *max_sectors = ata_tf_to_lba48(&tf) + 1;
c728a914 1302 else
a5987e0a 1303 *max_sectors = ata_tf_to_lba(&tf) + 1;
2dcb407e 1304 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
93328e11 1305 (*max_sectors)--;
c728a914 1306 return 0;
1e999736
AC
1307}
1308
1309/**
c728a914
TH
1310 * ata_set_max_sectors - Set max sectors
1311 * @dev: target device
6b38d1d1 1312 * @new_sectors: new max sectors value to set for the device
1e999736 1313 *
c728a914
TH
1314 * Set max sectors of @dev to @new_sectors.
1315 *
1316 * RETURNS:
1317 * 0 on success, -EACCES if command is aborted or denied (due to
1318 * previous non-volatile SET_MAX) by the drive. -EIO on other
1319 * errors.
1e999736 1320 */
05027adc 1321static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1e999736 1322{
c728a914 1323 unsigned int err_mask;
1e999736 1324 struct ata_taskfile tf;
c728a914 1325 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1326
1327 new_sectors--;
1328
1329 ata_tf_init(dev, &tf);
1330
1e999736 1331 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
c728a914
TH
1332
1333 if (lba48) {
1334 tf.command = ATA_CMD_SET_MAX_EXT;
1335 tf.flags |= ATA_TFLAG_LBA48;
1336
1337 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1338 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1339 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1e582ba4 1340 } else {
c728a914
TH
1341 tf.command = ATA_CMD_SET_MAX;
1342
1e582ba4
TH
1343 tf.device |= (new_sectors >> 24) & 0xf;
1344 }
1345
1e999736 1346 tf.protocol |= ATA_PROT_NODATA;
c728a914 1347 tf.device |= ATA_LBA;
1e999736
AC
1348
1349 tf.lbal = (new_sectors >> 0) & 0xff;
1350 tf.lbam = (new_sectors >> 8) & 0xff;
1351 tf.lbah = (new_sectors >> 16) & 0xff;
1e999736 1352
2b789108 1353 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914
TH
1354 if (err_mask) {
1355 ata_dev_printk(dev, KERN_WARNING, "failed to set "
1356 "max address (err_mask=0x%x)\n", err_mask);
1357 if (err_mask == AC_ERR_DEV &&
1358 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1359 return -EACCES;
1360 return -EIO;
1361 }
1362
c728a914 1363 return 0;
1e999736
AC
1364}
1365
1366/**
1367 * ata_hpa_resize - Resize a device with an HPA set
1368 * @dev: Device to resize
1369 *
1370 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1371 * it if required to the full size of the media. The caller must check
1372 * the drive has the HPA feature set enabled.
05027adc
TH
1373 *
1374 * RETURNS:
1375 * 0 on success, -errno on failure.
1e999736 1376 */
05027adc 1377static int ata_hpa_resize(struct ata_device *dev)
1e999736 1378{
05027adc
TH
1379 struct ata_eh_context *ehc = &dev->link->eh_context;
1380 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1381 u64 sectors = ata_id_n_sectors(dev->id);
1382 u64 native_sectors;
c728a914 1383 int rc;
a617c09f 1384
05027adc
TH
1385 /* do we need to do it? */
1386 if (dev->class != ATA_DEV_ATA ||
1387 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1388 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
c728a914 1389 return 0;
1e999736 1390
05027adc
TH
1391 /* read native max address */
1392 rc = ata_read_native_max_address(dev, &native_sectors);
1393 if (rc) {
dda7aba1
TH
1394 /* If device aborted the command or HPA isn't going to
1395 * be unlocked, skip HPA resizing.
05027adc 1396 */
dda7aba1 1397 if (rc == -EACCES || !ata_ignore_hpa) {
05027adc 1398 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
dda7aba1 1399 "broken, skipping HPA handling\n");
05027adc
TH
1400 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1401
1402 /* we can continue if device aborted the command */
1403 if (rc == -EACCES)
1404 rc = 0;
1e999736 1405 }
37301a55 1406
05027adc
TH
1407 return rc;
1408 }
1409
1410 /* nothing to do? */
1411 if (native_sectors <= sectors || !ata_ignore_hpa) {
1412 if (!print_info || native_sectors == sectors)
1413 return 0;
1414
1415 if (native_sectors > sectors)
1416 ata_dev_printk(dev, KERN_INFO,
1417 "HPA detected: current %llu, native %llu\n",
1418 (unsigned long long)sectors,
1419 (unsigned long long)native_sectors);
1420 else if (native_sectors < sectors)
1421 ata_dev_printk(dev, KERN_WARNING,
1422 "native sectors (%llu) is smaller than "
1423 "sectors (%llu)\n",
1424 (unsigned long long)native_sectors,
1425 (unsigned long long)sectors);
1426 return 0;
1427 }
1428
1429 /* let's unlock HPA */
1430 rc = ata_set_max_sectors(dev, native_sectors);
1431 if (rc == -EACCES) {
1432 /* if device aborted the command, skip HPA resizing */
1433 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1434 "(%llu -> %llu), skipping HPA handling\n",
1435 (unsigned long long)sectors,
1436 (unsigned long long)native_sectors);
1437 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1438 return 0;
1439 } else if (rc)
1440 return rc;
1441
1442 /* re-read IDENTIFY data */
1443 rc = ata_dev_reread_id(dev, 0);
1444 if (rc) {
1445 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1446 "data after HPA resizing\n");
1447 return rc;
1448 }
1449
1450 if (print_info) {
1451 u64 new_sectors = ata_id_n_sectors(dev->id);
1452 ata_dev_printk(dev, KERN_INFO,
1453 "HPA unlocked: %llu -> %llu, native %llu\n",
1454 (unsigned long long)sectors,
1455 (unsigned long long)new_sectors,
1456 (unsigned long long)native_sectors);
1457 }
1458
1459 return 0;
1e999736
AC
1460}
1461
1da177e4
LT
1462/**
1463 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 1464 * @id: IDENTIFY DEVICE page to dump
1da177e4 1465 *
0bd3300a
TH
1466 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1467 * page.
1da177e4
LT
1468 *
1469 * LOCKING:
1470 * caller.
1471 */
1472
0bd3300a 1473static inline void ata_dump_id(const u16 *id)
1da177e4
LT
1474{
1475 DPRINTK("49==0x%04x "
1476 "53==0x%04x "
1477 "63==0x%04x "
1478 "64==0x%04x "
1479 "75==0x%04x \n",
0bd3300a
TH
1480 id[49],
1481 id[53],
1482 id[63],
1483 id[64],
1484 id[75]);
1da177e4
LT
1485 DPRINTK("80==0x%04x "
1486 "81==0x%04x "
1487 "82==0x%04x "
1488 "83==0x%04x "
1489 "84==0x%04x \n",
0bd3300a
TH
1490 id[80],
1491 id[81],
1492 id[82],
1493 id[83],
1494 id[84]);
1da177e4
LT
1495 DPRINTK("88==0x%04x "
1496 "93==0x%04x\n",
0bd3300a
TH
1497 id[88],
1498 id[93]);
1da177e4
LT
1499}
1500
cb95d562
TH
1501/**
1502 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1503 * @id: IDENTIFY data to compute xfer mask from
1504 *
1505 * Compute the xfermask for this device. This is not as trivial
1506 * as it seems if we must consider early devices correctly.
1507 *
1508 * FIXME: pre IDE drive timing (do we care ?).
1509 *
1510 * LOCKING:
1511 * None.
1512 *
1513 * RETURNS:
1514 * Computed xfermask
1515 */
7dc951ae 1516unsigned long ata_id_xfermask(const u16 *id)
cb95d562 1517{
7dc951ae 1518 unsigned long pio_mask, mwdma_mask, udma_mask;
cb95d562
TH
1519
1520 /* Usual case. Word 53 indicates word 64 is valid */
1521 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1522 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1523 pio_mask <<= 3;
1524 pio_mask |= 0x7;
1525 } else {
1526 /* If word 64 isn't valid then Word 51 high byte holds
1527 * the PIO timing number for the maximum. Turn it into
1528 * a mask.
1529 */
7a0f1c8a 1530 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb 1531 if (mode < 5) /* Valid PIO range */
2dcb407e 1532 pio_mask = (2 << mode) - 1;
46767aeb
AC
1533 else
1534 pio_mask = 1;
cb95d562
TH
1535
1536 /* But wait.. there's more. Design your standards by
1537 * committee and you too can get a free iordy field to
1538 * process. However its the speeds not the modes that
1539 * are supported... Note drivers using the timing API
1540 * will get this right anyway
1541 */
1542 }
1543
1544 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1545
b352e57d
AC
1546 if (ata_id_is_cfa(id)) {
1547 /*
1548 * Process compact flash extended modes
1549 */
1550 int pio = id[163] & 0x7;
1551 int dma = (id[163] >> 3) & 7;
1552
1553 if (pio)
1554 pio_mask |= (1 << 5);
1555 if (pio > 1)
1556 pio_mask |= (1 << 6);
1557 if (dma)
1558 mwdma_mask |= (1 << 3);
1559 if (dma > 1)
1560 mwdma_mask |= (1 << 4);
1561 }
1562
fb21f0d0
TH
1563 udma_mask = 0;
1564 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1565 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1566
1567 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1568}
1569
86e45b6b 1570/**
442eacc3 1571 * ata_pio_queue_task - Queue port_task
86e45b6b 1572 * @ap: The ata_port to queue port_task for
e2a7f77a 1573 * @fn: workqueue function to be scheduled
65f27f38 1574 * @data: data for @fn to use
341c2c95 1575 * @delay: delay time in msecs for workqueue function
86e45b6b
TH
1576 *
1577 * Schedule @fn(@data) for execution after @delay jiffies using
1578 * port_task. There is one port_task per port and it's the
1579 * user(low level driver)'s responsibility to make sure that only
1580 * one task is active at any given time.
1581 *
1582 * libata core layer takes care of synchronization between
442eacc3 1583 * port_task and EH. ata_pio_queue_task() may be ignored for EH
86e45b6b
TH
1584 * synchronization.
1585 *
1586 * LOCKING:
1587 * Inherited from caller.
1588 */
624d5c51 1589void ata_pio_queue_task(struct ata_port *ap, void *data, unsigned long delay)
86e45b6b 1590{
65f27f38 1591 ap->port_task_data = data;
86e45b6b 1592
45a66c1c 1593 /* may fail if ata_port_flush_task() in progress */
341c2c95 1594 queue_delayed_work(ata_wq, &ap->port_task, msecs_to_jiffies(delay));
86e45b6b
TH
1595}
1596
1597/**
1598 * ata_port_flush_task - Flush port_task
1599 * @ap: The ata_port to flush port_task for
1600 *
1601 * After this function completes, port_task is guranteed not to
1602 * be running or scheduled.
1603 *
1604 * LOCKING:
1605 * Kernel thread context (may sleep)
1606 */
1607void ata_port_flush_task(struct ata_port *ap)
1608{
86e45b6b
TH
1609 DPRINTK("ENTER\n");
1610
45a66c1c 1611 cancel_rearming_delayed_work(&ap->port_task);
86e45b6b 1612
0dd4b21f 1613 if (ata_msg_ctl(ap))
7f5e4e8d 1614 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);
86e45b6b
TH
1615}
1616
7102d230 1617static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1618{
77853bf2 1619 struct completion *waiting = qc->private_data;
a2a7a662 1620
a2a7a662 1621 complete(waiting);
a2a7a662
TH
1622}
1623
1624/**
2432697b 1625 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1626 * @dev: Device to which the command is sent
1627 * @tf: Taskfile registers for the command and the result
d69cf37d 1628 * @cdb: CDB for packet command
a2a7a662 1629 * @dma_dir: Data tranfer direction of the command
5c1ad8b3 1630 * @sgl: sg list for the data buffer of the command
2432697b 1631 * @n_elem: Number of sg entries
2b789108 1632 * @timeout: Timeout in msecs (0 for default)
a2a7a662
TH
1633 *
1634 * Executes libata internal command with timeout. @tf contains
1635 * command on entry and result on return. Timeout and error
1636 * conditions are reported via return value. No recovery action
1637 * is taken after a command times out. It's caller's duty to
1638 * clean up after timeout.
1639 *
1640 * LOCKING:
1641 * None. Should be called with kernel context, might sleep.
551e8889
TH
1642 *
1643 * RETURNS:
1644 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1645 */
2432697b
TH
1646unsigned ata_exec_internal_sg(struct ata_device *dev,
1647 struct ata_taskfile *tf, const u8 *cdb,
87260216 1648 int dma_dir, struct scatterlist *sgl,
2b789108 1649 unsigned int n_elem, unsigned long timeout)
a2a7a662 1650{
9af5c9c9
TH
1651 struct ata_link *link = dev->link;
1652 struct ata_port *ap = link->ap;
a2a7a662 1653 u8 command = tf->command;
87fbc5a0 1654 int auto_timeout = 0;
a2a7a662 1655 struct ata_queued_cmd *qc;
2ab7db1f 1656 unsigned int tag, preempted_tag;
dedaf2b0 1657 u32 preempted_sactive, preempted_qc_active;
da917d69 1658 int preempted_nr_active_links;
60be6b9a 1659 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1660 unsigned long flags;
77853bf2 1661 unsigned int err_mask;
d95a717f 1662 int rc;
a2a7a662 1663
ba6a1308 1664 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1665
e3180499 1666 /* no internal command while frozen */
b51e9e5d 1667 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1668 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1669 return AC_ERR_SYSTEM;
1670 }
1671
2ab7db1f 1672 /* initialize internal qc */
a2a7a662 1673
2ab7db1f
TH
1674 /* XXX: Tag 0 is used for drivers with legacy EH as some
1675 * drivers choke if any other tag is given. This breaks
1676 * ata_tag_internal() test for those drivers. Don't use new
1677 * EH stuff without converting to it.
1678 */
1679 if (ap->ops->error_handler)
1680 tag = ATA_TAG_INTERNAL;
1681 else
1682 tag = 0;
1683
6cec4a39 1684 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1685 BUG();
f69499f4 1686 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1687
1688 qc->tag = tag;
1689 qc->scsicmd = NULL;
1690 qc->ap = ap;
1691 qc->dev = dev;
1692 ata_qc_reinit(qc);
1693
9af5c9c9
TH
1694 preempted_tag = link->active_tag;
1695 preempted_sactive = link->sactive;
dedaf2b0 1696 preempted_qc_active = ap->qc_active;
da917d69 1697 preempted_nr_active_links = ap->nr_active_links;
9af5c9c9
TH
1698 link->active_tag = ATA_TAG_POISON;
1699 link->sactive = 0;
dedaf2b0 1700 ap->qc_active = 0;
da917d69 1701 ap->nr_active_links = 0;
2ab7db1f
TH
1702
1703 /* prepare & issue qc */
a2a7a662 1704 qc->tf = *tf;
d69cf37d
TH
1705 if (cdb)
1706 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1707 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1708 qc->dma_dir = dma_dir;
1709 if (dma_dir != DMA_NONE) {
2432697b 1710 unsigned int i, buflen = 0;
87260216 1711 struct scatterlist *sg;
2432697b 1712
87260216
JA
1713 for_each_sg(sgl, sg, n_elem, i)
1714 buflen += sg->length;
2432697b 1715
87260216 1716 ata_sg_init(qc, sgl, n_elem);
49c80429 1717 qc->nbytes = buflen;
a2a7a662
TH
1718 }
1719
77853bf2 1720 qc->private_data = &wait;
a2a7a662
TH
1721 qc->complete_fn = ata_qc_complete_internal;
1722
8e0e694a 1723 ata_qc_issue(qc);
a2a7a662 1724
ba6a1308 1725 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1726
87fbc5a0
TH
1727 if (!timeout) {
1728 if (ata_probe_timeout)
1729 timeout = ata_probe_timeout * 1000;
1730 else {
1731 timeout = ata_internal_cmd_timeout(dev, command);
1732 auto_timeout = 1;
1733 }
1734 }
2b789108
TH
1735
1736 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
d95a717f
TH
1737
1738 ata_port_flush_task(ap);
41ade50c 1739
d95a717f 1740 if (!rc) {
ba6a1308 1741 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1742
1743 /* We're racing with irq here. If we lose, the
1744 * following test prevents us from completing the qc
d95a717f
TH
1745 * twice. If we win, the port is frozen and will be
1746 * cleaned up by ->post_internal_cmd().
a2a7a662 1747 */
77853bf2 1748 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1749 qc->err_mask |= AC_ERR_TIMEOUT;
1750
1751 if (ap->ops->error_handler)
1752 ata_port_freeze(ap);
1753 else
1754 ata_qc_complete(qc);
f15a1daf 1755
0dd4b21f
BP
1756 if (ata_msg_warn(ap))
1757 ata_dev_printk(dev, KERN_WARNING,
88574551 1758 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1759 }
1760
ba6a1308 1761 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1762 }
1763
d95a717f
TH
1764 /* do post_internal_cmd */
1765 if (ap->ops->post_internal_cmd)
1766 ap->ops->post_internal_cmd(qc);
1767
a51d644a
TH
1768 /* perform minimal error analysis */
1769 if (qc->flags & ATA_QCFLAG_FAILED) {
1770 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1771 qc->err_mask |= AC_ERR_DEV;
1772
1773 if (!qc->err_mask)
1774 qc->err_mask |= AC_ERR_OTHER;
1775
1776 if (qc->err_mask & ~AC_ERR_OTHER)
1777 qc->err_mask &= ~AC_ERR_OTHER;
d95a717f
TH
1778 }
1779
15869303 1780 /* finish up */
ba6a1308 1781 spin_lock_irqsave(ap->lock, flags);
15869303 1782
e61e0672 1783 *tf = qc->result_tf;
77853bf2
TH
1784 err_mask = qc->err_mask;
1785
1786 ata_qc_free(qc);
9af5c9c9
TH
1787 link->active_tag = preempted_tag;
1788 link->sactive = preempted_sactive;
dedaf2b0 1789 ap->qc_active = preempted_qc_active;
da917d69 1790 ap->nr_active_links = preempted_nr_active_links;
77853bf2 1791
1f7dd3e9
TH
1792 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1793 * Until those drivers are fixed, we detect the condition
1794 * here, fail the command with AC_ERR_SYSTEM and reenable the
1795 * port.
1796 *
1797 * Note that this doesn't change any behavior as internal
1798 * command failure results in disabling the device in the
1799 * higher layer for LLDDs without new reset/EH callbacks.
1800 *
1801 * Kill the following code as soon as those drivers are fixed.
1802 */
198e0fed 1803 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1804 err_mask |= AC_ERR_SYSTEM;
1805 ata_port_probe(ap);
1806 }
1807
ba6a1308 1808 spin_unlock_irqrestore(ap->lock, flags);
15869303 1809
87fbc5a0
TH
1810 if ((err_mask & AC_ERR_TIMEOUT) && auto_timeout)
1811 ata_internal_cmd_timed_out(dev, command);
1812
77853bf2 1813 return err_mask;
a2a7a662
TH
1814}
1815
2432697b 1816/**
33480a0e 1817 * ata_exec_internal - execute libata internal command
2432697b
TH
1818 * @dev: Device to which the command is sent
1819 * @tf: Taskfile registers for the command and the result
1820 * @cdb: CDB for packet command
1821 * @dma_dir: Data tranfer direction of the command
1822 * @buf: Data buffer of the command
1823 * @buflen: Length of data buffer
2b789108 1824 * @timeout: Timeout in msecs (0 for default)
2432697b
TH
1825 *
1826 * Wrapper around ata_exec_internal_sg() which takes simple
1827 * buffer instead of sg list.
1828 *
1829 * LOCKING:
1830 * None. Should be called with kernel context, might sleep.
1831 *
1832 * RETURNS:
1833 * Zero on success, AC_ERR_* mask on failure
1834 */
1835unsigned ata_exec_internal(struct ata_device *dev,
1836 struct ata_taskfile *tf, const u8 *cdb,
2b789108
TH
1837 int dma_dir, void *buf, unsigned int buflen,
1838 unsigned long timeout)
2432697b 1839{
33480a0e
TH
1840 struct scatterlist *psg = NULL, sg;
1841 unsigned int n_elem = 0;
2432697b 1842
33480a0e
TH
1843 if (dma_dir != DMA_NONE) {
1844 WARN_ON(!buf);
1845 sg_init_one(&sg, buf, buflen);
1846 psg = &sg;
1847 n_elem++;
1848 }
2432697b 1849
2b789108
TH
1850 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1851 timeout);
2432697b
TH
1852}
1853
977e6b9f
TH
1854/**
1855 * ata_do_simple_cmd - execute simple internal command
1856 * @dev: Device to which the command is sent
1857 * @cmd: Opcode to execute
1858 *
1859 * Execute a 'simple' command, that only consists of the opcode
1860 * 'cmd' itself, without filling any other registers
1861 *
1862 * LOCKING:
1863 * Kernel thread context (may sleep).
1864 *
1865 * RETURNS:
1866 * Zero on success, AC_ERR_* mask on failure
e58eb583 1867 */
77b08fb5 1868unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1869{
1870 struct ata_taskfile tf;
e58eb583
TH
1871
1872 ata_tf_init(dev, &tf);
1873
1874 tf.command = cmd;
1875 tf.flags |= ATA_TFLAG_DEVICE;
1876 tf.protocol = ATA_PROT_NODATA;
1877
2b789108 1878 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
e58eb583
TH
1879}
1880
1bc4ccff
AC
1881/**
1882 * ata_pio_need_iordy - check if iordy needed
1883 * @adev: ATA device
1884 *
1885 * Check if the current speed of the device requires IORDY. Used
1886 * by various controllers for chip configuration.
1887 */
a617c09f 1888
1bc4ccff
AC
1889unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1890{
432729f0
AC
1891 /* Controller doesn't support IORDY. Probably a pointless check
1892 as the caller should know this */
9af5c9c9 1893 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1bc4ccff 1894 return 0;
432729f0
AC
1895 /* PIO3 and higher it is mandatory */
1896 if (adev->pio_mode > XFER_PIO_2)
1897 return 1;
1898 /* We turn it on when possible */
1899 if (ata_id_has_iordy(adev->id))
1bc4ccff 1900 return 1;
432729f0
AC
1901 return 0;
1902}
2e9edbf8 1903
432729f0
AC
1904/**
1905 * ata_pio_mask_no_iordy - Return the non IORDY mask
1906 * @adev: ATA device
1907 *
1908 * Compute the highest mode possible if we are not using iordy. Return
1909 * -1 if no iordy mode is available.
1910 */
a617c09f 1911
432729f0
AC
1912static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1913{
1bc4ccff 1914 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1bc4ccff 1915 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
432729f0 1916 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1bc4ccff
AC
1917 /* Is the speed faster than the drive allows non IORDY ? */
1918 if (pio) {
1919 /* This is cycle times not frequency - watch the logic! */
1920 if (pio > 240) /* PIO2 is 240nS per cycle */
432729f0
AC
1921 return 3 << ATA_SHIFT_PIO;
1922 return 7 << ATA_SHIFT_PIO;
1bc4ccff
AC
1923 }
1924 }
432729f0 1925 return 3 << ATA_SHIFT_PIO;
1bc4ccff
AC
1926}
1927
963e4975
AC
1928/**
1929 * ata_do_dev_read_id - default ID read method
1930 * @dev: device
1931 * @tf: proposed taskfile
1932 * @id: data buffer
1933 *
1934 * Issue the identify taskfile and hand back the buffer containing
1935 * identify data. For some RAID controllers and for pre ATA devices
1936 * this function is wrapped or replaced by the driver
1937 */
1938unsigned int ata_do_dev_read_id(struct ata_device *dev,
1939 struct ata_taskfile *tf, u16 *id)
1940{
1941 return ata_exec_internal(dev, tf, NULL, DMA_FROM_DEVICE,
1942 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
1943}
1944
1da177e4 1945/**
49016aca 1946 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1947 * @dev: target device
1948 * @p_class: pointer to class of the target device (may be changed)
bff04647 1949 * @flags: ATA_READID_* flags
fe635c7e 1950 * @id: buffer to read IDENTIFY data into
1da177e4 1951 *
49016aca
TH
1952 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1953 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1954 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1955 * for pre-ATA4 drives.
1da177e4 1956 *
50a99018 1957 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
2dcb407e 1958 * now we abort if we hit that case.
50a99018 1959 *
1da177e4 1960 * LOCKING:
49016aca
TH
1961 * Kernel thread context (may sleep)
1962 *
1963 * RETURNS:
1964 * 0 on success, -errno otherwise.
1da177e4 1965 */
a9beec95 1966int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1967 unsigned int flags, u16 *id)
1da177e4 1968{
9af5c9c9 1969 struct ata_port *ap = dev->link->ap;
49016aca 1970 unsigned int class = *p_class;
a0123703 1971 struct ata_taskfile tf;
49016aca
TH
1972 unsigned int err_mask = 0;
1973 const char *reason;
54936f8b 1974 int may_fallback = 1, tried_spinup = 0;
49016aca 1975 int rc;
1da177e4 1976
0dd4b21f 1977 if (ata_msg_ctl(ap))
7f5e4e8d 1978 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
1da177e4 1979
963e4975 1980retry:
3373efd8 1981 ata_tf_init(dev, &tf);
a0123703 1982
49016aca
TH
1983 switch (class) {
1984 case ATA_DEV_ATA:
a0123703 1985 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1986 break;
1987 case ATA_DEV_ATAPI:
a0123703 1988 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1989 break;
1990 default:
1991 rc = -ENODEV;
1992 reason = "unsupported class";
1993 goto err_out;
1da177e4
LT
1994 }
1995
a0123703 1996 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1997
1998 /* Some devices choke if TF registers contain garbage. Make
1999 * sure those are properly initialized.
2000 */
2001 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2002
2003 /* Device presence detection is unreliable on some
2004 * controllers. Always poll IDENTIFY if available.
2005 */
2006 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 2007
963e4975
AC
2008 if (ap->ops->read_id)
2009 err_mask = ap->ops->read_id(dev, &tf, id);
2010 else
2011 err_mask = ata_do_dev_read_id(dev, &tf, id);
2012
a0123703 2013 if (err_mask) {
800b3996 2014 if (err_mask & AC_ERR_NODEV_HINT) {
1ffc151f
TH
2015 ata_dev_printk(dev, KERN_DEBUG,
2016 "NODEV after polling detection\n");
55a8e2c8
TH
2017 return -ENOENT;
2018 }
2019
1ffc151f
TH
2020 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
2021 /* Device or controller might have reported
2022 * the wrong device class. Give a shot at the
2023 * other IDENTIFY if the current one is
2024 * aborted by the device.
2025 */
2026 if (may_fallback) {
2027 may_fallback = 0;
2028
2029 if (class == ATA_DEV_ATA)
2030 class = ATA_DEV_ATAPI;
2031 else
2032 class = ATA_DEV_ATA;
2033 goto retry;
2034 }
2035
2036 /* Control reaches here iff the device aborted
2037 * both flavors of IDENTIFYs which happens
2038 * sometimes with phantom devices.
2039 */
2040 ata_dev_printk(dev, KERN_DEBUG,
2041 "both IDENTIFYs aborted, assuming NODEV\n");
2042 return -ENOENT;
54936f8b
TH
2043 }
2044
49016aca
TH
2045 rc = -EIO;
2046 reason = "I/O error";
1da177e4
LT
2047 goto err_out;
2048 }
2049
54936f8b
TH
2050 /* Falling back doesn't make sense if ID data was read
2051 * successfully at least once.
2052 */
2053 may_fallback = 0;
2054
49016aca 2055 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 2056
49016aca 2057 /* sanity check */
a4f5749b 2058 rc = -EINVAL;
6070068b 2059 reason = "device reports invalid type";
a4f5749b
TH
2060
2061 if (class == ATA_DEV_ATA) {
2062 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
2063 goto err_out;
2064 } else {
2065 if (ata_id_is_ata(id))
2066 goto err_out;
49016aca
TH
2067 }
2068
169439c2
ML
2069 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
2070 tried_spinup = 1;
2071 /*
2072 * Drive powered-up in standby mode, and requires a specific
2073 * SET_FEATURES spin-up subcommand before it will accept
2074 * anything other than the original IDENTIFY command.
2075 */
218f3d30 2076 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
fb0582f9 2077 if (err_mask && id[2] != 0x738c) {
169439c2
ML
2078 rc = -EIO;
2079 reason = "SPINUP failed";
2080 goto err_out;
2081 }
2082 /*
2083 * If the drive initially returned incomplete IDENTIFY info,
2084 * we now must reissue the IDENTIFY command.
2085 */
2086 if (id[2] == 0x37c8)
2087 goto retry;
2088 }
2089
bff04647 2090 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
2091 /*
2092 * The exact sequence expected by certain pre-ATA4 drives is:
2093 * SRST RESET
50a99018
AC
2094 * IDENTIFY (optional in early ATA)
2095 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
49016aca
TH
2096 * anything else..
2097 * Some drives were very specific about that exact sequence.
50a99018
AC
2098 *
2099 * Note that ATA4 says lba is mandatory so the second check
2100 * shoud never trigger.
49016aca
TH
2101 */
2102 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 2103 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
2104 if (err_mask) {
2105 rc = -EIO;
2106 reason = "INIT_DEV_PARAMS failed";
2107 goto err_out;
2108 }
2109
2110 /* current CHS translation info (id[53-58]) might be
2111 * changed. reread the identify device info.
2112 */
bff04647 2113 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
2114 goto retry;
2115 }
2116 }
2117
2118 *p_class = class;
fe635c7e 2119
49016aca
TH
2120 return 0;
2121
2122 err_out:
88574551 2123 if (ata_msg_warn(ap))
0dd4b21f 2124 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 2125 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
2126 return rc;
2127}
2128
3373efd8 2129static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 2130{
9af5c9c9
TH
2131 struct ata_port *ap = dev->link->ap;
2132 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
2133}
2134
a6e6ce8e
TH
2135static void ata_dev_config_ncq(struct ata_device *dev,
2136 char *desc, size_t desc_sz)
2137{
9af5c9c9 2138 struct ata_port *ap = dev->link->ap;
a6e6ce8e
TH
2139 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2140
2141 if (!ata_id_has_ncq(dev->id)) {
2142 desc[0] = '\0';
2143 return;
2144 }
75683fe7 2145 if (dev->horkage & ATA_HORKAGE_NONCQ) {
6919a0a6
AC
2146 snprintf(desc, desc_sz, "NCQ (not used)");
2147 return;
2148 }
a6e6ce8e 2149 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 2150 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
2151 dev->flags |= ATA_DFLAG_NCQ;
2152 }
2153
2154 if (hdepth >= ddepth)
2155 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
2156 else
2157 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
2158}
2159
49016aca 2160/**
ffeae418 2161 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
2162 * @dev: Target device to configure
2163 *
2164 * Configure @dev according to @dev->id. Generic and low-level
2165 * driver specific fixups are also applied.
49016aca
TH
2166 *
2167 * LOCKING:
ffeae418
TH
2168 * Kernel thread context (may sleep)
2169 *
2170 * RETURNS:
2171 * 0 on success, -errno otherwise
49016aca 2172 */
efdaedc4 2173int ata_dev_configure(struct ata_device *dev)
49016aca 2174{
9af5c9c9
TH
2175 struct ata_port *ap = dev->link->ap;
2176 struct ata_eh_context *ehc = &dev->link->eh_context;
6746544c 2177 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1148c3a7 2178 const u16 *id = dev->id;
7dc951ae 2179 unsigned long xfer_mask;
b352e57d 2180 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
2181 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2182 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 2183 int rc;
49016aca 2184
0dd4b21f 2185 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
44877b4e 2186 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
7f5e4e8d 2187 __func__);
ffeae418 2188 return 0;
49016aca
TH
2189 }
2190
0dd4b21f 2191 if (ata_msg_probe(ap))
7f5e4e8d 2192 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
1da177e4 2193
75683fe7
TH
2194 /* set horkage */
2195 dev->horkage |= ata_dev_blacklisted(dev);
33267325 2196 ata_force_horkage(dev);
75683fe7 2197
50af2fa1
TH
2198 if (dev->horkage & ATA_HORKAGE_DISABLE) {
2199 ata_dev_printk(dev, KERN_INFO,
2200 "unsupported device, disabling\n");
2201 ata_dev_disable(dev);
2202 return 0;
2203 }
2204
2486fa56
TH
2205 if ((!atapi_enabled || (ap->flags & ATA_FLAG_NO_ATAPI)) &&
2206 dev->class == ATA_DEV_ATAPI) {
2207 ata_dev_printk(dev, KERN_WARNING,
2208 "WARNING: ATAPI is %s, device ignored.\n",
2209 atapi_enabled ? "not supported with this driver"
2210 : "disabled");
2211 ata_dev_disable(dev);
2212 return 0;
2213 }
2214
6746544c
TH
2215 /* let ACPI work its magic */
2216 rc = ata_acpi_on_devcfg(dev);
2217 if (rc)
2218 return rc;
08573a86 2219
05027adc
TH
2220 /* massage HPA, do it early as it might change IDENTIFY data */
2221 rc = ata_hpa_resize(dev);
2222 if (rc)
2223 return rc;
2224
c39f5ebe 2225 /* print device capabilities */
0dd4b21f 2226 if (ata_msg_probe(ap))
88574551
TH
2227 ata_dev_printk(dev, KERN_DEBUG,
2228 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2229 "85:%04x 86:%04x 87:%04x 88:%04x\n",
7f5e4e8d 2230 __func__,
f15a1daf
TH
2231 id[49], id[82], id[83], id[84],
2232 id[85], id[86], id[87], id[88]);
c39f5ebe 2233
208a9933 2234 /* initialize to-be-configured parameters */
ea1dd4e1 2235 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
2236 dev->max_sectors = 0;
2237 dev->cdb_len = 0;
2238 dev->n_sectors = 0;
2239 dev->cylinders = 0;
2240 dev->heads = 0;
2241 dev->sectors = 0;
2242
1da177e4
LT
2243 /*
2244 * common ATA, ATAPI feature tests
2245 */
2246
ff8854b2 2247 /* find max transfer mode; for printk only */
1148c3a7 2248 xfer_mask = ata_id_xfermask(id);
1da177e4 2249
0dd4b21f
BP
2250 if (ata_msg_probe(ap))
2251 ata_dump_id(id);
1da177e4 2252
ef143d57
AL
2253 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2254 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2255 sizeof(fwrevbuf));
2256
2257 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2258 sizeof(modelbuf));
2259
1da177e4
LT
2260 /* ATA-specific feature tests */
2261 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
2262 if (ata_id_is_cfa(id)) {
2263 if (id[162] & 1) /* CPRM may make this media unusable */
44877b4e
TH
2264 ata_dev_printk(dev, KERN_WARNING,
2265 "supports DRM functions and may "
2266 "not be fully accessable.\n");
b352e57d 2267 snprintf(revbuf, 7, "CFA");
ae8d4ee7 2268 } else {
2dcb407e 2269 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
ae8d4ee7
AC
2270 /* Warn the user if the device has TPM extensions */
2271 if (ata_id_has_tpm(id))
2272 ata_dev_printk(dev, KERN_WARNING,
2273 "supports DRM functions and may "
2274 "not be fully accessable.\n");
2275 }
b352e57d 2276
1148c3a7 2277 dev->n_sectors = ata_id_n_sectors(id);
2940740b 2278
3f64f565
EM
2279 if (dev->id[59] & 0x100)
2280 dev->multi_count = dev->id[59] & 0xff;
2281
1148c3a7 2282 if (ata_id_has_lba(id)) {
4c2d721a 2283 const char *lba_desc;
a6e6ce8e 2284 char ncq_desc[20];
8bf62ece 2285
4c2d721a
TH
2286 lba_desc = "LBA";
2287 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 2288 if (ata_id_has_lba48(id)) {
8bf62ece 2289 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 2290 lba_desc = "LBA48";
6fc49adb
TH
2291
2292 if (dev->n_sectors >= (1UL << 28) &&
2293 ata_id_has_flush_ext(id))
2294 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 2295 }
8bf62ece 2296
a6e6ce8e
TH
2297 /* config NCQ */
2298 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2299
8bf62ece 2300 /* print device info to dmesg */
3f64f565
EM
2301 if (ata_msg_drv(ap) && print_info) {
2302 ata_dev_printk(dev, KERN_INFO,
2303 "%s: %s, %s, max %s\n",
2304 revbuf, modelbuf, fwrevbuf,
2305 ata_mode_string(xfer_mask));
2306 ata_dev_printk(dev, KERN_INFO,
2307 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 2308 (unsigned long long)dev->n_sectors,
3f64f565
EM
2309 dev->multi_count, lba_desc, ncq_desc);
2310 }
ffeae418 2311 } else {
8bf62ece
AL
2312 /* CHS */
2313
2314 /* Default translation */
1148c3a7
TH
2315 dev->cylinders = id[1];
2316 dev->heads = id[3];
2317 dev->sectors = id[6];
8bf62ece 2318
1148c3a7 2319 if (ata_id_current_chs_valid(id)) {
8bf62ece 2320 /* Current CHS translation is valid. */
1148c3a7
TH
2321 dev->cylinders = id[54];
2322 dev->heads = id[55];
2323 dev->sectors = id[56];
8bf62ece
AL
2324 }
2325
2326 /* print device info to dmesg */
3f64f565 2327 if (ata_msg_drv(ap) && print_info) {
88574551 2328 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2329 "%s: %s, %s, max %s\n",
2330 revbuf, modelbuf, fwrevbuf,
2331 ata_mode_string(xfer_mask));
a84471fe 2332 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2333 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2334 (unsigned long long)dev->n_sectors,
2335 dev->multi_count, dev->cylinders,
2336 dev->heads, dev->sectors);
2337 }
07f6f7d0
AL
2338 }
2339
6e7846e9 2340 dev->cdb_len = 16;
1da177e4
LT
2341 }
2342
2343 /* ATAPI-specific feature tests */
2c13b7ce 2344 else if (dev->class == ATA_DEV_ATAPI) {
854c73a2
TH
2345 const char *cdb_intr_string = "";
2346 const char *atapi_an_string = "";
91163006 2347 const char *dma_dir_string = "";
7d77b247 2348 u32 sntf;
08a556db 2349
1148c3a7 2350 rc = atapi_cdb_len(id);
1da177e4 2351 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 2352 if (ata_msg_warn(ap))
88574551
TH
2353 ata_dev_printk(dev, KERN_WARNING,
2354 "unsupported CDB len\n");
ffeae418 2355 rc = -EINVAL;
1da177e4
LT
2356 goto err_out_nosup;
2357 }
6e7846e9 2358 dev->cdb_len = (unsigned int) rc;
1da177e4 2359
7d77b247
TH
2360 /* Enable ATAPI AN if both the host and device have
2361 * the support. If PMP is attached, SNTF is required
2362 * to enable ATAPI AN to discern between PHY status
2363 * changed notifications and ATAPI ANs.
9f45cbd3 2364 */
7d77b247 2365 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
071f44b1 2366 (!sata_pmp_attached(ap) ||
7d77b247 2367 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
854c73a2
TH
2368 unsigned int err_mask;
2369
9f45cbd3 2370 /* issue SET feature command to turn this on */
218f3d30
JG
2371 err_mask = ata_dev_set_feature(dev,
2372 SETFEATURES_SATA_ENABLE, SATA_AN);
854c73a2 2373 if (err_mask)
9f45cbd3 2374 ata_dev_printk(dev, KERN_ERR,
854c73a2
TH
2375 "failed to enable ATAPI AN "
2376 "(err_mask=0x%x)\n", err_mask);
2377 else {
9f45cbd3 2378 dev->flags |= ATA_DFLAG_AN;
854c73a2
TH
2379 atapi_an_string = ", ATAPI AN";
2380 }
9f45cbd3
KCA
2381 }
2382
08a556db 2383 if (ata_id_cdb_intr(dev->id)) {
312f7da2 2384 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
2385 cdb_intr_string = ", CDB intr";
2386 }
312f7da2 2387
91163006
TH
2388 if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
2389 dev->flags |= ATA_DFLAG_DMADIR;
2390 dma_dir_string = ", DMADIR";
2391 }
2392
1da177e4 2393 /* print device info to dmesg */
5afc8142 2394 if (ata_msg_drv(ap) && print_info)
ef143d57 2395 ata_dev_printk(dev, KERN_INFO,
91163006 2396 "ATAPI: %s, %s, max %s%s%s%s\n",
ef143d57 2397 modelbuf, fwrevbuf,
12436c30 2398 ata_mode_string(xfer_mask),
91163006
TH
2399 cdb_intr_string, atapi_an_string,
2400 dma_dir_string);
1da177e4
LT
2401 }
2402
914ed354
TH
2403 /* determine max_sectors */
2404 dev->max_sectors = ATA_MAX_SECTORS;
2405 if (dev->flags & ATA_DFLAG_LBA48)
2406 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2407
ca77329f
KCA
2408 if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2409 if (ata_id_has_hipm(dev->id))
2410 dev->flags |= ATA_DFLAG_HIPM;
2411 if (ata_id_has_dipm(dev->id))
2412 dev->flags |= ATA_DFLAG_DIPM;
2413 }
2414
c5038fc0
AC
2415 /* Limit PATA drive on SATA cable bridge transfers to udma5,
2416 200 sectors */
3373efd8 2417 if (ata_dev_knobble(dev)) {
5afc8142 2418 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
2419 ata_dev_printk(dev, KERN_INFO,
2420 "applying bridge limits\n");
5a529139 2421 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
2422 dev->max_sectors = ATA_MAX_SECTORS;
2423 }
2424
f8d8e579 2425 if ((dev->class == ATA_DEV_ATAPI) &&
f442cd86 2426 (atapi_command_packet_set(id) == TYPE_TAPE)) {
f8d8e579 2427 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
f442cd86
AL
2428 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2429 }
f8d8e579 2430
75683fe7 2431 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
03ec52de
TH
2432 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2433 dev->max_sectors);
18d6e9d5 2434
ca77329f
KCA
2435 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2436 dev->horkage |= ATA_HORKAGE_IPM;
2437
2438 /* reset link pm_policy for this port to no pm */
2439 ap->pm_policy = MAX_PERFORMANCE;
2440 }
2441
4b2f3ede 2442 if (ap->ops->dev_config)
cd0d3bbc 2443 ap->ops->dev_config(dev);
4b2f3ede 2444
c5038fc0
AC
2445 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2446 /* Let the user know. We don't want to disallow opens for
2447 rescue purposes, or in case the vendor is just a blithering
2448 idiot. Do this after the dev_config call as some controllers
2449 with buggy firmware may want to avoid reporting false device
2450 bugs */
2451
2452 if (print_info) {
2453 ata_dev_printk(dev, KERN_WARNING,
2454"Drive reports diagnostics failure. This may indicate a drive\n");
2455 ata_dev_printk(dev, KERN_WARNING,
2456"fault or invalid emulation. Contact drive vendor for information.\n");
2457 }
2458 }
2459
ffeae418 2460 return 0;
1da177e4
LT
2461
2462err_out_nosup:
0dd4b21f 2463 if (ata_msg_probe(ap))
88574551 2464 ata_dev_printk(dev, KERN_DEBUG,
7f5e4e8d 2465 "%s: EXIT, err\n", __func__);
ffeae418 2466 return rc;
1da177e4
LT
2467}
2468
be0d18df 2469/**
2e41e8e6 2470 * ata_cable_40wire - return 40 wire cable type
be0d18df
AC
2471 * @ap: port
2472 *
2e41e8e6 2473 * Helper method for drivers which want to hardwire 40 wire cable
be0d18df
AC
2474 * detection.
2475 */
2476
2477int ata_cable_40wire(struct ata_port *ap)
2478{
2479 return ATA_CBL_PATA40;
2480}
2481
2482/**
2e41e8e6 2483 * ata_cable_80wire - return 80 wire cable type
be0d18df
AC
2484 * @ap: port
2485 *
2e41e8e6 2486 * Helper method for drivers which want to hardwire 80 wire cable
be0d18df
AC
2487 * detection.
2488 */
2489
2490int ata_cable_80wire(struct ata_port *ap)
2491{
2492 return ATA_CBL_PATA80;
2493}
2494
2495/**
2496 * ata_cable_unknown - return unknown PATA cable.
2497 * @ap: port
2498 *
2499 * Helper method for drivers which have no PATA cable detection.
2500 */
2501
2502int ata_cable_unknown(struct ata_port *ap)
2503{
2504 return ATA_CBL_PATA_UNK;
2505}
2506
c88f90c3
TH
2507/**
2508 * ata_cable_ignore - return ignored PATA cable.
2509 * @ap: port
2510 *
2511 * Helper method for drivers which don't use cable type to limit
2512 * transfer mode.
2513 */
2514int ata_cable_ignore(struct ata_port *ap)
2515{
2516 return ATA_CBL_PATA_IGN;
2517}
2518
be0d18df
AC
2519/**
2520 * ata_cable_sata - return SATA cable type
2521 * @ap: port
2522 *
2523 * Helper method for drivers which have SATA cables
2524 */
2525
2526int ata_cable_sata(struct ata_port *ap)
2527{
2528 return ATA_CBL_SATA;
2529}
2530
1da177e4
LT
2531/**
2532 * ata_bus_probe - Reset and probe ATA bus
2533 * @ap: Bus to probe
2534 *
0cba632b
JG
2535 * Master ATA bus probing function. Initiates a hardware-dependent
2536 * bus reset, then attempts to identify any devices found on
2537 * the bus.
2538 *
1da177e4 2539 * LOCKING:
0cba632b 2540 * PCI/etc. bus probe sem.
1da177e4
LT
2541 *
2542 * RETURNS:
96072e69 2543 * Zero on success, negative errno otherwise.
1da177e4
LT
2544 */
2545
80289167 2546int ata_bus_probe(struct ata_port *ap)
1da177e4 2547{
28ca5c57 2548 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 2549 int tries[ATA_MAX_DEVICES];
f58229f8 2550 int rc;
e82cbdb9 2551 struct ata_device *dev;
1da177e4 2552
28ca5c57 2553 ata_port_probe(ap);
c19ba8af 2554
f58229f8
TH
2555 ata_link_for_each_dev(dev, &ap->link)
2556 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
14d2bac1
TH
2557
2558 retry:
cdeab114
TH
2559 ata_link_for_each_dev(dev, &ap->link) {
2560 /* If we issue an SRST then an ATA drive (not ATAPI)
2561 * may change configuration and be in PIO0 timing. If
2562 * we do a hard reset (or are coming from power on)
2563 * this is true for ATA or ATAPI. Until we've set a
2564 * suitable controller mode we should not touch the
2565 * bus as we may be talking too fast.
2566 */
2567 dev->pio_mode = XFER_PIO_0;
2568
2569 /* If the controller has a pio mode setup function
2570 * then use it to set the chipset to rights. Don't
2571 * touch the DMA setup as that will be dealt with when
2572 * configuring devices.
2573 */
2574 if (ap->ops->set_piomode)
2575 ap->ops->set_piomode(ap, dev);
2576 }
2577
2044470c 2578 /* reset and determine device classes */
52783c5d 2579 ap->ops->phy_reset(ap);
2061a47a 2580
f58229f8 2581 ata_link_for_each_dev(dev, &ap->link) {
52783c5d
TH
2582 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2583 dev->class != ATA_DEV_UNKNOWN)
2584 classes[dev->devno] = dev->class;
2585 else
2586 classes[dev->devno] = ATA_DEV_NONE;
2044470c 2587
52783c5d 2588 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 2589 }
1da177e4 2590
52783c5d 2591 ata_port_probe(ap);
2044470c 2592
f31f0cc2
JG
2593 /* read IDENTIFY page and configure devices. We have to do the identify
2594 specific sequence bass-ackwards so that PDIAG- is released by
2595 the slave device */
2596
a4ba7fe2 2597 ata_link_for_each_dev_reverse(dev, &ap->link) {
f58229f8
TH
2598 if (tries[dev->devno])
2599 dev->class = classes[dev->devno];
ffeae418 2600
14d2bac1 2601 if (!ata_dev_enabled(dev))
ffeae418 2602 continue;
ffeae418 2603
bff04647
TH
2604 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2605 dev->id);
14d2bac1
TH
2606 if (rc)
2607 goto fail;
f31f0cc2
JG
2608 }
2609
be0d18df
AC
2610 /* Now ask for the cable type as PDIAG- should have been released */
2611 if (ap->ops->cable_detect)
2612 ap->cbl = ap->ops->cable_detect(ap);
2613
614fe29b
AC
2614 /* We may have SATA bridge glue hiding here irrespective of the
2615 reported cable types and sensed types */
2616 ata_link_for_each_dev(dev, &ap->link) {
2617 if (!ata_dev_enabled(dev))
2618 continue;
2619 /* SATA drives indicate we have a bridge. We don't know which
2620 end of the link the bridge is which is a problem */
2621 if (ata_id_is_sata(dev->id))
2622 ap->cbl = ATA_CBL_SATA;
2623 }
2624
f31f0cc2
JG
2625 /* After the identify sequence we can now set up the devices. We do
2626 this in the normal order so that the user doesn't get confused */
2627
f58229f8 2628 ata_link_for_each_dev(dev, &ap->link) {
f31f0cc2
JG
2629 if (!ata_dev_enabled(dev))
2630 continue;
14d2bac1 2631
9af5c9c9 2632 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
efdaedc4 2633 rc = ata_dev_configure(dev);
9af5c9c9 2634 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
2635 if (rc)
2636 goto fail;
1da177e4
LT
2637 }
2638
e82cbdb9 2639 /* configure transfer mode */
0260731f 2640 rc = ata_set_mode(&ap->link, &dev);
4ae72a1e 2641 if (rc)
51713d35 2642 goto fail;
1da177e4 2643
f58229f8
TH
2644 ata_link_for_each_dev(dev, &ap->link)
2645 if (ata_dev_enabled(dev))
e82cbdb9 2646 return 0;
1da177e4 2647
e82cbdb9
TH
2648 /* no device present, disable port */
2649 ata_port_disable(ap);
96072e69 2650 return -ENODEV;
14d2bac1
TH
2651
2652 fail:
4ae72a1e
TH
2653 tries[dev->devno]--;
2654
14d2bac1
TH
2655 switch (rc) {
2656 case -EINVAL:
4ae72a1e 2657 /* eeek, something went very wrong, give up */
14d2bac1
TH
2658 tries[dev->devno] = 0;
2659 break;
4ae72a1e
TH
2660
2661 case -ENODEV:
2662 /* give it just one more chance */
2663 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 2664 case -EIO:
4ae72a1e
TH
2665 if (tries[dev->devno] == 1) {
2666 /* This is the last chance, better to slow
2667 * down than lose it.
2668 */
936fd732 2669 sata_down_spd_limit(&ap->link);
4ae72a1e
TH
2670 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2671 }
14d2bac1
TH
2672 }
2673
4ae72a1e 2674 if (!tries[dev->devno])
3373efd8 2675 ata_dev_disable(dev);
ec573755 2676
14d2bac1 2677 goto retry;
1da177e4
LT
2678}
2679
2680/**
0cba632b
JG
2681 * ata_port_probe - Mark port as enabled
2682 * @ap: Port for which we indicate enablement
1da177e4 2683 *
0cba632b
JG
2684 * Modify @ap data structure such that the system
2685 * thinks that the entire port is enabled.
2686 *
cca3974e 2687 * LOCKING: host lock, or some other form of
0cba632b 2688 * serialization.
1da177e4
LT
2689 */
2690
2691void ata_port_probe(struct ata_port *ap)
2692{
198e0fed 2693 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
2694}
2695
3be680b7
TH
2696/**
2697 * sata_print_link_status - Print SATA link status
936fd732 2698 * @link: SATA link to printk link status about
3be680b7
TH
2699 *
2700 * This function prints link speed and status of a SATA link.
2701 *
2702 * LOCKING:
2703 * None.
2704 */
6bdb4fc9 2705static void sata_print_link_status(struct ata_link *link)
3be680b7 2706{
6d5f9732 2707 u32 sstatus, scontrol, tmp;
3be680b7 2708
936fd732 2709 if (sata_scr_read(link, SCR_STATUS, &sstatus))
3be680b7 2710 return;
936fd732 2711 sata_scr_read(link, SCR_CONTROL, &scontrol);
3be680b7 2712
936fd732 2713 if (ata_link_online(link)) {
3be680b7 2714 tmp = (sstatus >> 4) & 0xf;
936fd732 2715 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2716 "SATA link up %s (SStatus %X SControl %X)\n",
2717 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 2718 } else {
936fd732 2719 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2720 "SATA link down (SStatus %X SControl %X)\n",
2721 sstatus, scontrol);
3be680b7
TH
2722 }
2723}
2724
ebdfca6e
AC
2725/**
2726 * ata_dev_pair - return other device on cable
ebdfca6e
AC
2727 * @adev: device
2728 *
2729 * Obtain the other device on the same cable, or if none is
2730 * present NULL is returned
2731 */
2e9edbf8 2732
3373efd8 2733struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2734{
9af5c9c9
TH
2735 struct ata_link *link = adev->link;
2736 struct ata_device *pair = &link->device[1 - adev->devno];
e1211e3f 2737 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2738 return NULL;
2739 return pair;
2740}
2741
1da177e4 2742/**
780a87f7
JG
2743 * ata_port_disable - Disable port.
2744 * @ap: Port to be disabled.
1da177e4 2745 *
780a87f7
JG
2746 * Modify @ap data structure such that the system
2747 * thinks that the entire port is disabled, and should
2748 * never attempt to probe or communicate with devices
2749 * on this port.
2750 *
cca3974e 2751 * LOCKING: host lock, or some other form of
780a87f7 2752 * serialization.
1da177e4
LT
2753 */
2754
2755void ata_port_disable(struct ata_port *ap)
2756{
9af5c9c9
TH
2757 ap->link.device[0].class = ATA_DEV_NONE;
2758 ap->link.device[1].class = ATA_DEV_NONE;
198e0fed 2759 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2760}
2761
1c3fae4d 2762/**
3c567b7d 2763 * sata_down_spd_limit - adjust SATA spd limit downward
936fd732 2764 * @link: Link to adjust SATA spd limit for
1c3fae4d 2765 *
936fd732 2766 * Adjust SATA spd limit of @link downward. Note that this
1c3fae4d 2767 * function only adjusts the limit. The change must be applied
3c567b7d 2768 * using sata_set_spd().
1c3fae4d
TH
2769 *
2770 * LOCKING:
2771 * Inherited from caller.
2772 *
2773 * RETURNS:
2774 * 0 on success, negative errno on failure
2775 */
936fd732 2776int sata_down_spd_limit(struct ata_link *link)
1c3fae4d 2777{
81952c54
TH
2778 u32 sstatus, spd, mask;
2779 int rc, highbit;
1c3fae4d 2780
936fd732 2781 if (!sata_scr_valid(link))
008a7896
TH
2782 return -EOPNOTSUPP;
2783
2784 /* If SCR can be read, use it to determine the current SPD.
936fd732 2785 * If not, use cached value in link->sata_spd.
008a7896 2786 */
936fd732 2787 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
008a7896
TH
2788 if (rc == 0)
2789 spd = (sstatus >> 4) & 0xf;
2790 else
936fd732 2791 spd = link->sata_spd;
1c3fae4d 2792
936fd732 2793 mask = link->sata_spd_limit;
1c3fae4d
TH
2794 if (mask <= 1)
2795 return -EINVAL;
008a7896
TH
2796
2797 /* unconditionally mask off the highest bit */
1c3fae4d
TH
2798 highbit = fls(mask) - 1;
2799 mask &= ~(1 << highbit);
2800
008a7896
TH
2801 /* Mask off all speeds higher than or equal to the current
2802 * one. Force 1.5Gbps if current SPD is not available.
2803 */
2804 if (spd > 1)
2805 mask &= (1 << (spd - 1)) - 1;
2806 else
2807 mask &= 1;
2808
2809 /* were we already at the bottom? */
1c3fae4d
TH
2810 if (!mask)
2811 return -EINVAL;
2812
936fd732 2813 link->sata_spd_limit = mask;
1c3fae4d 2814
936fd732 2815 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
f15a1daf 2816 sata_spd_string(fls(mask)));
1c3fae4d
TH
2817
2818 return 0;
2819}
2820
936fd732 2821static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
1c3fae4d 2822{
5270222f
TH
2823 struct ata_link *host_link = &link->ap->link;
2824 u32 limit, target, spd;
1c3fae4d 2825
5270222f
TH
2826 limit = link->sata_spd_limit;
2827
2828 /* Don't configure downstream link faster than upstream link.
2829 * It doesn't speed up anything and some PMPs choke on such
2830 * configuration.
2831 */
2832 if (!ata_is_host_link(link) && host_link->sata_spd)
2833 limit &= (1 << host_link->sata_spd) - 1;
2834
2835 if (limit == UINT_MAX)
2836 target = 0;
1c3fae4d 2837 else
5270222f 2838 target = fls(limit);
1c3fae4d
TH
2839
2840 spd = (*scontrol >> 4) & 0xf;
5270222f 2841 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
1c3fae4d 2842
5270222f 2843 return spd != target;
1c3fae4d
TH
2844}
2845
2846/**
3c567b7d 2847 * sata_set_spd_needed - is SATA spd configuration needed
936fd732 2848 * @link: Link in question
1c3fae4d
TH
2849 *
2850 * Test whether the spd limit in SControl matches
936fd732 2851 * @link->sata_spd_limit. This function is used to determine
1c3fae4d
TH
2852 * whether hardreset is necessary to apply SATA spd
2853 * configuration.
2854 *
2855 * LOCKING:
2856 * Inherited from caller.
2857 *
2858 * RETURNS:
2859 * 1 if SATA spd configuration is needed, 0 otherwise.
2860 */
1dc55e87 2861static int sata_set_spd_needed(struct ata_link *link)
1c3fae4d
TH
2862{
2863 u32 scontrol;
2864
936fd732 2865 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
db64bcf3 2866 return 1;
1c3fae4d 2867
936fd732 2868 return __sata_set_spd_needed(link, &scontrol);
1c3fae4d
TH
2869}
2870
2871/**
3c567b7d 2872 * sata_set_spd - set SATA spd according to spd limit
936fd732 2873 * @link: Link to set SATA spd for
1c3fae4d 2874 *
936fd732 2875 * Set SATA spd of @link according to sata_spd_limit.
1c3fae4d
TH
2876 *
2877 * LOCKING:
2878 * Inherited from caller.
2879 *
2880 * RETURNS:
2881 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2882 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2883 */
936fd732 2884int sata_set_spd(struct ata_link *link)
1c3fae4d
TH
2885{
2886 u32 scontrol;
81952c54 2887 int rc;
1c3fae4d 2888
936fd732 2889 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 2890 return rc;
1c3fae4d 2891
936fd732 2892 if (!__sata_set_spd_needed(link, &scontrol))
1c3fae4d
TH
2893 return 0;
2894
936fd732 2895 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54
TH
2896 return rc;
2897
1c3fae4d
TH
2898 return 1;
2899}
2900
452503f9
AC
2901/*
2902 * This mode timing computation functionality is ported over from
2903 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2904 */
2905/*
b352e57d 2906 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2907 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2908 * for UDMA6, which is currently supported only by Maxtor drives.
2909 *
2910 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2911 */
2912
2913static const struct ata_timing ata_timing[] = {
70cd071e
TH
2914/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2915 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2916 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2917 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2918 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2919 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2920 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
2921 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
452503f9 2922
70cd071e
TH
2923 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2924 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2925 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
452503f9 2926
70cd071e
TH
2927 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2928 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2929 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
b352e57d 2930 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
70cd071e 2931 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
452503f9
AC
2932
2933/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
70cd071e
TH
2934 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2935 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2936 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2937 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2938 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2939 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2940 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
452503f9
AC
2941
2942 { 0xFF }
2943};
2944
2dcb407e
JG
2945#define ENOUGH(v, unit) (((v)-1)/(unit)+1)
2946#define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
452503f9
AC
2947
2948static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2949{
2950 q->setup = EZ(t->setup * 1000, T);
2951 q->act8b = EZ(t->act8b * 1000, T);
2952 q->rec8b = EZ(t->rec8b * 1000, T);
2953 q->cyc8b = EZ(t->cyc8b * 1000, T);
2954 q->active = EZ(t->active * 1000, T);
2955 q->recover = EZ(t->recover * 1000, T);
2956 q->cycle = EZ(t->cycle * 1000, T);
2957 q->udma = EZ(t->udma * 1000, UT);
2958}
2959
2960void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2961 struct ata_timing *m, unsigned int what)
2962{
2963 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2964 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2965 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2966 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2967 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2968 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2969 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2970 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2971}
2972
6357357c 2973const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
452503f9 2974{
70cd071e
TH
2975 const struct ata_timing *t = ata_timing;
2976
2977 while (xfer_mode > t->mode)
2978 t++;
452503f9 2979
70cd071e
TH
2980 if (xfer_mode == t->mode)
2981 return t;
2982 return NULL;
452503f9
AC
2983}
2984
2985int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2986 struct ata_timing *t, int T, int UT)
2987{
2988 const struct ata_timing *s;
2989 struct ata_timing p;
2990
2991 /*
2e9edbf8 2992 * Find the mode.
75b1f2f8 2993 */
452503f9
AC
2994
2995 if (!(s = ata_timing_find_mode(speed)))
2996 return -EINVAL;
2997
75b1f2f8
AL
2998 memcpy(t, s, sizeof(*s));
2999
452503f9
AC
3000 /*
3001 * If the drive is an EIDE drive, it can tell us it needs extended
3002 * PIO/MW_DMA cycle timing.
3003 */
3004
3005 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
3006 memset(&p, 0, sizeof(p));
2dcb407e 3007 if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
452503f9
AC
3008 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
3009 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2dcb407e 3010 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
452503f9
AC
3011 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
3012 }
3013 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
3014 }
3015
3016 /*
3017 * Convert the timing to bus clock counts.
3018 */
3019
75b1f2f8 3020 ata_timing_quantize(t, t, T, UT);
452503f9
AC
3021
3022 /*
c893a3ae
RD
3023 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
3024 * S.M.A.R.T * and some other commands. We have to ensure that the
3025 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
3026 */
3027
fd3367af 3028 if (speed > XFER_PIO_6) {
452503f9
AC
3029 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3030 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3031 }
3032
3033 /*
c893a3ae 3034 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
3035 */
3036
3037 if (t->act8b + t->rec8b < t->cyc8b) {
3038 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3039 t->rec8b = t->cyc8b - t->act8b;
3040 }
3041
3042 if (t->active + t->recover < t->cycle) {
3043 t->active += (t->cycle - (t->active + t->recover)) / 2;
3044 t->recover = t->cycle - t->active;
3045 }
a617c09f 3046
4f701d1e
AC
3047 /* In a few cases quantisation may produce enough errors to
3048 leave t->cycle too low for the sum of active and recovery
3049 if so we must correct this */
3050 if (t->active + t->recover > t->cycle)
3051 t->cycle = t->active + t->recover;
452503f9
AC
3052
3053 return 0;
3054}
3055
a0f79b92
TH
3056/**
3057 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3058 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3059 * @cycle: cycle duration in ns
3060 *
3061 * Return matching xfer mode for @cycle. The returned mode is of
3062 * the transfer type specified by @xfer_shift. If @cycle is too
3063 * slow for @xfer_shift, 0xff is returned. If @cycle is faster
3064 * than the fastest known mode, the fasted mode is returned.
3065 *
3066 * LOCKING:
3067 * None.
3068 *
3069 * RETURNS:
3070 * Matching xfer_mode, 0xff if no match found.
3071 */
3072u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3073{
3074 u8 base_mode = 0xff, last_mode = 0xff;
3075 const struct ata_xfer_ent *ent;
3076 const struct ata_timing *t;
3077
3078 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3079 if (ent->shift == xfer_shift)
3080 base_mode = ent->base;
3081
3082 for (t = ata_timing_find_mode(base_mode);
3083 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3084 unsigned short this_cycle;
3085
3086 switch (xfer_shift) {
3087 case ATA_SHIFT_PIO:
3088 case ATA_SHIFT_MWDMA:
3089 this_cycle = t->cycle;
3090 break;
3091 case ATA_SHIFT_UDMA:
3092 this_cycle = t->udma;
3093 break;
3094 default:
3095 return 0xff;
3096 }
3097
3098 if (cycle > this_cycle)
3099 break;
3100
3101 last_mode = t->mode;
3102 }
3103
3104 return last_mode;
3105}
3106
cf176e1a
TH
3107/**
3108 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 3109 * @dev: Device to adjust xfer masks
458337db 3110 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
3111 *
3112 * Adjust xfer masks of @dev downward. Note that this function
3113 * does not apply the change. Invoking ata_set_mode() afterwards
3114 * will apply the limit.
3115 *
3116 * LOCKING:
3117 * Inherited from caller.
3118 *
3119 * RETURNS:
3120 * 0 on success, negative errno on failure
3121 */
458337db 3122int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 3123{
458337db 3124 char buf[32];
7dc951ae
TH
3125 unsigned long orig_mask, xfer_mask;
3126 unsigned long pio_mask, mwdma_mask, udma_mask;
458337db 3127 int quiet, highbit;
cf176e1a 3128
458337db
TH
3129 quiet = !!(sel & ATA_DNXFER_QUIET);
3130 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 3131
458337db
TH
3132 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3133 dev->mwdma_mask,
3134 dev->udma_mask);
3135 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 3136
458337db
TH
3137 switch (sel) {
3138 case ATA_DNXFER_PIO:
3139 highbit = fls(pio_mask) - 1;
3140 pio_mask &= ~(1 << highbit);
3141 break;
3142
3143 case ATA_DNXFER_DMA:
3144 if (udma_mask) {
3145 highbit = fls(udma_mask) - 1;
3146 udma_mask &= ~(1 << highbit);
3147 if (!udma_mask)
3148 return -ENOENT;
3149 } else if (mwdma_mask) {
3150 highbit = fls(mwdma_mask) - 1;
3151 mwdma_mask &= ~(1 << highbit);
3152 if (!mwdma_mask)
3153 return -ENOENT;
3154 }
3155 break;
3156
3157 case ATA_DNXFER_40C:
3158 udma_mask &= ATA_UDMA_MASK_40C;
3159 break;
3160
3161 case ATA_DNXFER_FORCE_PIO0:
3162 pio_mask &= 1;
3163 case ATA_DNXFER_FORCE_PIO:
3164 mwdma_mask = 0;
3165 udma_mask = 0;
3166 break;
3167
458337db
TH
3168 default:
3169 BUG();
3170 }
3171
3172 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3173
3174 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3175 return -ENOENT;
3176
3177 if (!quiet) {
3178 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3179 snprintf(buf, sizeof(buf), "%s:%s",
3180 ata_mode_string(xfer_mask),
3181 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3182 else
3183 snprintf(buf, sizeof(buf), "%s",
3184 ata_mode_string(xfer_mask));
3185
3186 ata_dev_printk(dev, KERN_WARNING,
3187 "limiting speed to %s\n", buf);
3188 }
cf176e1a
TH
3189
3190 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3191 &dev->udma_mask);
3192
cf176e1a 3193 return 0;
cf176e1a
TH
3194}
3195
3373efd8 3196static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 3197{
9af5c9c9 3198 struct ata_eh_context *ehc = &dev->link->eh_context;
4055dee7
TH
3199 const char *dev_err_whine = "";
3200 int ign_dev_err = 0;
83206a29
TH
3201 unsigned int err_mask;
3202 int rc;
1da177e4 3203
e8384607 3204 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
3205 if (dev->xfer_shift == ATA_SHIFT_PIO)
3206 dev->flags |= ATA_DFLAG_PIO;
3207
3373efd8 3208 err_mask = ata_dev_set_xfermode(dev);
2dcb407e 3209
4055dee7
TH
3210 if (err_mask & ~AC_ERR_DEV)
3211 goto fail;
3212
3213 /* revalidate */
3214 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3215 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3216 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3217 if (rc)
3218 return rc;
3219
b93fda12
AC
3220 if (dev->xfer_shift == ATA_SHIFT_PIO) {
3221 /* Old CFA may refuse this command, which is just fine */
3222 if (ata_id_is_cfa(dev->id))
3223 ign_dev_err = 1;
3224 /* Catch several broken garbage emulations plus some pre
3225 ATA devices */
3226 if (ata_id_major_version(dev->id) == 0 &&
3227 dev->pio_mode <= XFER_PIO_2)
3228 ign_dev_err = 1;
3229 /* Some very old devices and some bad newer ones fail
3230 any kind of SET_XFERMODE request but support PIO0-2
3231 timings and no IORDY */
3232 if (!ata_id_has_iordy(dev->id) && dev->pio_mode <= XFER_PIO_2)
3233 ign_dev_err = 1;
3234 }
3acaf94b
AC
3235 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3236 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
c5038fc0 3237 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3acaf94b
AC
3238 dev->dma_mode == XFER_MW_DMA_0 &&
3239 (dev->id[63] >> 8) & 1)
4055dee7 3240 ign_dev_err = 1;
3acaf94b 3241
4055dee7
TH
3242 /* if the device is actually configured correctly, ignore dev err */
3243 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3244 ign_dev_err = 1;
1da177e4 3245
4055dee7
TH
3246 if (err_mask & AC_ERR_DEV) {
3247 if (!ign_dev_err)
3248 goto fail;
3249 else
3250 dev_err_whine = " (device error ignored)";
3251 }
48a8a14f 3252
23e71c3d
TH
3253 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3254 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 3255
4055dee7
TH
3256 ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n",
3257 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3258 dev_err_whine);
3259
83206a29 3260 return 0;
4055dee7
TH
3261
3262 fail:
3263 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3264 "(err_mask=0x%x)\n", err_mask);
3265 return -EIO;
1da177e4
LT
3266}
3267
1da177e4 3268/**
04351821 3269 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 3270 * @link: link on which timings will be programmed
1967b7ff 3271 * @r_failed_dev: out parameter for failed device
1da177e4 3272 *
04351821
AC
3273 * Standard implementation of the function used to tune and set
3274 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3275 * ata_dev_set_mode() fails, pointer to the failing device is
e82cbdb9 3276 * returned in @r_failed_dev.
780a87f7 3277 *
1da177e4 3278 * LOCKING:
0cba632b 3279 * PCI/etc. bus probe sem.
e82cbdb9
TH
3280 *
3281 * RETURNS:
3282 * 0 on success, negative errno otherwise
1da177e4 3283 */
04351821 3284
0260731f 3285int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
1da177e4 3286{
0260731f 3287 struct ata_port *ap = link->ap;
e8e0619f 3288 struct ata_device *dev;
f58229f8 3289 int rc = 0, used_dma = 0, found = 0;
3adcebb2 3290
a6d5a51c 3291 /* step 1: calculate xfer_mask */
f58229f8 3292 ata_link_for_each_dev(dev, link) {
7dc951ae 3293 unsigned long pio_mask, dma_mask;
b3a70601 3294 unsigned int mode_mask;
a6d5a51c 3295
e1211e3f 3296 if (!ata_dev_enabled(dev))
a6d5a51c
TH
3297 continue;
3298
b3a70601
AC
3299 mode_mask = ATA_DMA_MASK_ATA;
3300 if (dev->class == ATA_DEV_ATAPI)
3301 mode_mask = ATA_DMA_MASK_ATAPI;
3302 else if (ata_id_is_cfa(dev->id))
3303 mode_mask = ATA_DMA_MASK_CFA;
3304
3373efd8 3305 ata_dev_xfermask(dev);
33267325 3306 ata_force_xfermask(dev);
1da177e4 3307
acf356b1
TH
3308 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3309 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
b3a70601
AC
3310
3311 if (libata_dma_mask & mode_mask)
3312 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3313 else
3314 dma_mask = 0;
3315
acf356b1
TH
3316 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3317 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 3318
4f65977d 3319 found = 1;
b15b3eba 3320 if (ata_dma_enabled(dev))
5444a6f4 3321 used_dma = 1;
a6d5a51c 3322 }
4f65977d 3323 if (!found)
e82cbdb9 3324 goto out;
a6d5a51c
TH
3325
3326 /* step 2: always set host PIO timings */
f58229f8 3327 ata_link_for_each_dev(dev, link) {
e8e0619f
TH
3328 if (!ata_dev_enabled(dev))
3329 continue;
3330
70cd071e 3331 if (dev->pio_mode == 0xff) {
f15a1daf 3332 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 3333 rc = -EINVAL;
e82cbdb9 3334 goto out;
e8e0619f
TH
3335 }
3336
3337 dev->xfer_mode = dev->pio_mode;
3338 dev->xfer_shift = ATA_SHIFT_PIO;
3339 if (ap->ops->set_piomode)
3340 ap->ops->set_piomode(ap, dev);
3341 }
1da177e4 3342
a6d5a51c 3343 /* step 3: set host DMA timings */
f58229f8 3344 ata_link_for_each_dev(dev, link) {
b15b3eba 3345 if (!ata_dev_enabled(dev) || !ata_dma_enabled(dev))
e8e0619f
TH
3346 continue;
3347
3348 dev->xfer_mode = dev->dma_mode;
3349 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3350 if (ap->ops->set_dmamode)
3351 ap->ops->set_dmamode(ap, dev);
3352 }
1da177e4
LT
3353
3354 /* step 4: update devices' xfer mode */
f58229f8 3355 ata_link_for_each_dev(dev, link) {
18d90deb 3356 /* don't update suspended devices' xfer mode */
9666f400 3357 if (!ata_dev_enabled(dev))
83206a29
TH
3358 continue;
3359
3373efd8 3360 rc = ata_dev_set_mode(dev);
5bbc53f4 3361 if (rc)
e82cbdb9 3362 goto out;
83206a29 3363 }
1da177e4 3364
e8e0619f
TH
3365 /* Record simplex status. If we selected DMA then the other
3366 * host channels are not permitted to do so.
5444a6f4 3367 */
cca3974e 3368 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 3369 ap->host->simplex_claimed = ap;
5444a6f4 3370
e82cbdb9
TH
3371 out:
3372 if (rc)
3373 *r_failed_dev = dev;
3374 return rc;
1da177e4
LT
3375}
3376
aa2731ad
TH
3377/**
3378 * ata_wait_ready - wait for link to become ready
3379 * @link: link to be waited on
3380 * @deadline: deadline jiffies for the operation
3381 * @check_ready: callback to check link readiness
3382 *
3383 * Wait for @link to become ready. @check_ready should return
3384 * positive number if @link is ready, 0 if it isn't, -ENODEV if
3385 * link doesn't seem to be occupied, other errno for other error
3386 * conditions.
3387 *
3388 * Transient -ENODEV conditions are allowed for
3389 * ATA_TMOUT_FF_WAIT.
3390 *
3391 * LOCKING:
3392 * EH context.
3393 *
3394 * RETURNS:
3395 * 0 if @linke is ready before @deadline; otherwise, -errno.
3396 */
3397int ata_wait_ready(struct ata_link *link, unsigned long deadline,
3398 int (*check_ready)(struct ata_link *link))
3399{
3400 unsigned long start = jiffies;
341c2c95 3401 unsigned long nodev_deadline = ata_deadline(start, ATA_TMOUT_FF_WAIT);
aa2731ad
TH
3402 int warned = 0;
3403
3404 if (time_after(nodev_deadline, deadline))
3405 nodev_deadline = deadline;
3406
3407 while (1) {
3408 unsigned long now = jiffies;
3409 int ready, tmp;
3410
3411 ready = tmp = check_ready(link);
3412 if (ready > 0)
3413 return 0;
3414
3415 /* -ENODEV could be transient. Ignore -ENODEV if link
3416 * is online. Also, some SATA devices take a long
3417 * time to clear 0xff after reset. For example,
3418 * HHD424020F7SV00 iVDR needs >= 800ms while Quantum
3419 * GoVault needs even more than that. Wait for
3420 * ATA_TMOUT_FF_WAIT on -ENODEV if link isn't offline.
3421 *
3422 * Note that some PATA controllers (pata_ali) explode
3423 * if status register is read more than once when
3424 * there's no device attached.
3425 */
3426 if (ready == -ENODEV) {
3427 if (ata_link_online(link))
3428 ready = 0;
3429 else if ((link->ap->flags & ATA_FLAG_SATA) &&
3430 !ata_link_offline(link) &&
3431 time_before(now, nodev_deadline))
3432 ready = 0;
3433 }
3434
3435 if (ready)
3436 return ready;
3437 if (time_after(now, deadline))
3438 return -EBUSY;
3439
3440 if (!warned && time_after(now, start + 5 * HZ) &&
3441 (deadline - now > 3 * HZ)) {
3442 ata_link_printk(link, KERN_WARNING,
3443 "link is slow to respond, please be patient "
3444 "(ready=%d)\n", tmp);
3445 warned = 1;
3446 }
3447
3448 msleep(50);
3449 }
3450}
3451
3452/**
3453 * ata_wait_after_reset - wait for link to become ready after reset
3454 * @link: link to be waited on
3455 * @deadline: deadline jiffies for the operation
3456 * @check_ready: callback to check link readiness
3457 *
3458 * Wait for @link to become ready after reset.
3459 *
3460 * LOCKING:
3461 * EH context.
3462 *
3463 * RETURNS:
3464 * 0 if @linke is ready before @deadline; otherwise, -errno.
3465 */
2b4221bb 3466int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
aa2731ad
TH
3467 int (*check_ready)(struct ata_link *link))
3468{
341c2c95 3469 msleep(ATA_WAIT_AFTER_RESET);
aa2731ad
TH
3470
3471 return ata_wait_ready(link, deadline, check_ready);
3472}
3473
d7bb4cc7 3474/**
936fd732
TH
3475 * sata_link_debounce - debounce SATA phy status
3476 * @link: ATA link to debounce SATA phy status for
d7bb4cc7 3477 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3478 * @deadline: deadline jiffies for the operation
d7bb4cc7 3479 *
936fd732 3480* Make sure SStatus of @link reaches stable state, determined by
d7bb4cc7
TH
3481 * holding the same value where DET is not 1 for @duration polled
3482 * every @interval, before @timeout. Timeout constraints the
d4b2bab4
TH
3483 * beginning of the stable state. Because DET gets stuck at 1 on
3484 * some controllers after hot unplugging, this functions waits
d7bb4cc7
TH
3485 * until timeout then returns 0 if DET is stable at 1.
3486 *
d4b2bab4
TH
3487 * @timeout is further limited by @deadline. The sooner of the
3488 * two is used.
3489 *
d7bb4cc7
TH
3490 * LOCKING:
3491 * Kernel thread context (may sleep)
3492 *
3493 * RETURNS:
3494 * 0 on success, -errno on failure.
3495 */
936fd732
TH
3496int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3497 unsigned long deadline)
7a7921e8 3498{
341c2c95
TH
3499 unsigned long interval = params[0];
3500 unsigned long duration = params[1];
d4b2bab4 3501 unsigned long last_jiffies, t;
d7bb4cc7
TH
3502 u32 last, cur;
3503 int rc;
3504
341c2c95 3505 t = ata_deadline(jiffies, params[2]);
d4b2bab4
TH
3506 if (time_before(t, deadline))
3507 deadline = t;
3508
936fd732 3509 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3510 return rc;
3511 cur &= 0xf;
3512
3513 last = cur;
3514 last_jiffies = jiffies;
3515
3516 while (1) {
341c2c95 3517 msleep(interval);
936fd732 3518 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3519 return rc;
3520 cur &= 0xf;
3521
3522 /* DET stable? */
3523 if (cur == last) {
d4b2bab4 3524 if (cur == 1 && time_before(jiffies, deadline))
d7bb4cc7 3525 continue;
341c2c95
TH
3526 if (time_after(jiffies,
3527 ata_deadline(last_jiffies, duration)))
d7bb4cc7
TH
3528 return 0;
3529 continue;
3530 }
3531
3532 /* unstable, start over */
3533 last = cur;
3534 last_jiffies = jiffies;
3535
f1545154
TH
3536 /* Check deadline. If debouncing failed, return
3537 * -EPIPE to tell upper layer to lower link speed.
3538 */
d4b2bab4 3539 if (time_after(jiffies, deadline))
f1545154 3540 return -EPIPE;
d7bb4cc7
TH
3541 }
3542}
3543
3544/**
936fd732
TH
3545 * sata_link_resume - resume SATA link
3546 * @link: ATA link to resume SATA
d7bb4cc7 3547 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3548 * @deadline: deadline jiffies for the operation
d7bb4cc7 3549 *
936fd732 3550 * Resume SATA phy @link and debounce it.
d7bb4cc7
TH
3551 *
3552 * LOCKING:
3553 * Kernel thread context (may sleep)
3554 *
3555 * RETURNS:
3556 * 0 on success, -errno on failure.
3557 */
936fd732
TH
3558int sata_link_resume(struct ata_link *link, const unsigned long *params,
3559 unsigned long deadline)
d7bb4cc7 3560{
ac371987 3561 u32 scontrol, serror;
81952c54
TH
3562 int rc;
3563
936fd732 3564 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 3565 return rc;
7a7921e8 3566
852ee16a 3567 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54 3568
936fd732 3569 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54 3570 return rc;
7a7921e8 3571
d7bb4cc7
TH
3572 /* Some PHYs react badly if SStatus is pounded immediately
3573 * after resuming. Delay 200ms before debouncing.
3574 */
3575 msleep(200);
7a7921e8 3576
ac371987
TH
3577 if ((rc = sata_link_debounce(link, params, deadline)))
3578 return rc;
3579
f046519f 3580 /* clear SError, some PHYs require this even for SRST to work */
ac371987
TH
3581 if (!(rc = sata_scr_read(link, SCR_ERROR, &serror)))
3582 rc = sata_scr_write(link, SCR_ERROR, serror);
ac371987 3583
f046519f 3584 return rc != -EINVAL ? rc : 0;
7a7921e8
TH
3585}
3586
f5914a46 3587/**
0aa1113d 3588 * ata_std_prereset - prepare for reset
cc0680a5 3589 * @link: ATA link to be reset
d4b2bab4 3590 * @deadline: deadline jiffies for the operation
f5914a46 3591 *
cc0680a5 3592 * @link is about to be reset. Initialize it. Failure from
b8cffc6a
TH
3593 * prereset makes libata abort whole reset sequence and give up
3594 * that port, so prereset should be best-effort. It does its
3595 * best to prepare for reset sequence but if things go wrong, it
3596 * should just whine, not fail.
f5914a46
TH
3597 *
3598 * LOCKING:
3599 * Kernel thread context (may sleep)
3600 *
3601 * RETURNS:
3602 * 0 on success, -errno otherwise.
3603 */
0aa1113d 3604int ata_std_prereset(struct ata_link *link, unsigned long deadline)
f5914a46 3605{
cc0680a5 3606 struct ata_port *ap = link->ap;
936fd732 3607 struct ata_eh_context *ehc = &link->eh_context;
e9c83914 3608 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
3609 int rc;
3610
f5914a46
TH
3611 /* if we're about to do hardreset, nothing more to do */
3612 if (ehc->i.action & ATA_EH_HARDRESET)
3613 return 0;
3614
936fd732 3615 /* if SATA, resume link */
a16abc0b 3616 if (ap->flags & ATA_FLAG_SATA) {
936fd732 3617 rc = sata_link_resume(link, timing, deadline);
b8cffc6a
TH
3618 /* whine about phy resume failure but proceed */
3619 if (rc && rc != -EOPNOTSUPP)
cc0680a5 3620 ata_link_printk(link, KERN_WARNING, "failed to resume "
f5914a46 3621 "link for reset (errno=%d)\n", rc);
f5914a46
TH
3622 }
3623
45db2f6c
TH
3624 /* no point in trying softreset on offline link */
3625 if (ata_link_offline(link))
3626 ehc->i.action &= ~ATA_EH_SOFTRESET;
3627
f5914a46
TH
3628 return 0;
3629}
3630
c2bd5804 3631/**
624d5c51
TH
3632 * sata_link_hardreset - reset link via SATA phy reset
3633 * @link: link to reset
3634 * @timing: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3635 * @deadline: deadline jiffies for the operation
9dadd45b
TH
3636 * @online: optional out parameter indicating link onlineness
3637 * @check_ready: optional callback to check link readiness
c2bd5804 3638 *
624d5c51 3639 * SATA phy-reset @link using DET bits of SControl register.
9dadd45b
TH
3640 * After hardreset, link readiness is waited upon using
3641 * ata_wait_ready() if @check_ready is specified. LLDs are
3642 * allowed to not specify @check_ready and wait itself after this
3643 * function returns. Device classification is LLD's
3644 * responsibility.
3645 *
3646 * *@online is set to one iff reset succeeded and @link is online
3647 * after reset.
c2bd5804
TH
3648 *
3649 * LOCKING:
3650 * Kernel thread context (may sleep)
3651 *
3652 * RETURNS:
3653 * 0 on success, -errno otherwise.
3654 */
624d5c51 3655int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
9dadd45b
TH
3656 unsigned long deadline,
3657 bool *online, int (*check_ready)(struct ata_link *))
c2bd5804 3658{
624d5c51 3659 u32 scontrol;
81952c54 3660 int rc;
852ee16a 3661
c2bd5804
TH
3662 DPRINTK("ENTER\n");
3663
9dadd45b
TH
3664 if (online)
3665 *online = false;
3666
936fd732 3667 if (sata_set_spd_needed(link)) {
1c3fae4d
TH
3668 /* SATA spec says nothing about how to reconfigure
3669 * spd. To be on the safe side, turn off phy during
3670 * reconfiguration. This works for at least ICH7 AHCI
3671 * and Sil3124.
3672 */
936fd732 3673 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3674 goto out;
81952c54 3675
a34b6fc0 3676 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54 3677
936fd732 3678 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
b6103f6d 3679 goto out;
1c3fae4d 3680
936fd732 3681 sata_set_spd(link);
1c3fae4d
TH
3682 }
3683
3684 /* issue phy wake/reset */
936fd732 3685 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3686 goto out;
81952c54 3687
852ee16a 3688 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54 3689
936fd732 3690 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
b6103f6d 3691 goto out;
c2bd5804 3692
1c3fae4d 3693 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
3694 * 10.4.2 says at least 1 ms.
3695 */
3696 msleep(1);
3697
936fd732
TH
3698 /* bring link back */
3699 rc = sata_link_resume(link, timing, deadline);
9dadd45b
TH
3700 if (rc)
3701 goto out;
3702 /* if link is offline nothing more to do */
3703 if (ata_link_offline(link))
3704 goto out;
3705
3706 /* Link is online. From this point, -ENODEV too is an error. */
3707 if (online)
3708 *online = true;
3709
071f44b1 3710 if (sata_pmp_supported(link->ap) && ata_is_host_link(link)) {
9dadd45b
TH
3711 /* If PMP is supported, we have to do follow-up SRST.
3712 * Some PMPs don't send D2H Reg FIS after hardreset if
3713 * the first port is empty. Wait only for
3714 * ATA_TMOUT_PMP_SRST_WAIT.
3715 */
3716 if (check_ready) {
3717 unsigned long pmp_deadline;
3718
341c2c95
TH
3719 pmp_deadline = ata_deadline(jiffies,
3720 ATA_TMOUT_PMP_SRST_WAIT);
9dadd45b
TH
3721 if (time_after(pmp_deadline, deadline))
3722 pmp_deadline = deadline;
3723 ata_wait_ready(link, pmp_deadline, check_ready);
3724 }
3725 rc = -EAGAIN;
3726 goto out;
3727 }
3728
3729 rc = 0;
3730 if (check_ready)
3731 rc = ata_wait_ready(link, deadline, check_ready);
b6103f6d 3732 out:
0cbf0711
TH
3733 if (rc && rc != -EAGAIN) {
3734 /* online is set iff link is online && reset succeeded */
3735 if (online)
3736 *online = false;
9dadd45b
TH
3737 ata_link_printk(link, KERN_ERR,
3738 "COMRESET failed (errno=%d)\n", rc);
0cbf0711 3739 }
b6103f6d
TH
3740 DPRINTK("EXIT, rc=%d\n", rc);
3741 return rc;
3742}
3743
57c9efdf
TH
3744/**
3745 * sata_std_hardreset - COMRESET w/o waiting or classification
3746 * @link: link to reset
3747 * @class: resulting class of attached device
3748 * @deadline: deadline jiffies for the operation
3749 *
3750 * Standard SATA COMRESET w/o waiting or classification.
3751 *
3752 * LOCKING:
3753 * Kernel thread context (may sleep)
3754 *
3755 * RETURNS:
3756 * 0 if link offline, -EAGAIN if link online, -errno on errors.
3757 */
3758int sata_std_hardreset(struct ata_link *link, unsigned int *class,
3759 unsigned long deadline)
3760{
3761 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
3762 bool online;
3763 int rc;
3764
3765 /* do hardreset */
3766 rc = sata_link_hardreset(link, timing, deadline, &online, NULL);
57c9efdf
TH
3767 return online ? -EAGAIN : rc;
3768}
3769
c2bd5804 3770/**
203c75b8 3771 * ata_std_postreset - standard postreset callback
cc0680a5 3772 * @link: the target ata_link
c2bd5804
TH
3773 * @classes: classes of attached devices
3774 *
3775 * This function is invoked after a successful reset. Note that
3776 * the device might have been reset more than once using
3777 * different reset methods before postreset is invoked.
c2bd5804 3778 *
c2bd5804
TH
3779 * LOCKING:
3780 * Kernel thread context (may sleep)
3781 */
203c75b8 3782void ata_std_postreset(struct ata_link *link, unsigned int *classes)
c2bd5804 3783{
f046519f
TH
3784 u32 serror;
3785
c2bd5804
TH
3786 DPRINTK("ENTER\n");
3787
f046519f
TH
3788 /* reset complete, clear SError */
3789 if (!sata_scr_read(link, SCR_ERROR, &serror))
3790 sata_scr_write(link, SCR_ERROR, serror);
3791
c2bd5804 3792 /* print link status */
936fd732 3793 sata_print_link_status(link);
c2bd5804 3794
c2bd5804
TH
3795 DPRINTK("EXIT\n");
3796}
3797
623a3128
TH
3798/**
3799 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3800 * @dev: device to compare against
3801 * @new_class: class of the new device
3802 * @new_id: IDENTIFY page of the new device
3803 *
3804 * Compare @new_class and @new_id against @dev and determine
3805 * whether @dev is the device indicated by @new_class and
3806 * @new_id.
3807 *
3808 * LOCKING:
3809 * None.
3810 *
3811 * RETURNS:
3812 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3813 */
3373efd8
TH
3814static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3815 const u16 *new_id)
623a3128
TH
3816{
3817 const u16 *old_id = dev->id;
a0cf733b
TH
3818 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3819 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
3820
3821 if (dev->class != new_class) {
f15a1daf
TH
3822 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3823 dev->class, new_class);
623a3128
TH
3824 return 0;
3825 }
3826
a0cf733b
TH
3827 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3828 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3829 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3830 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
3831
3832 if (strcmp(model[0], model[1])) {
f15a1daf
TH
3833 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3834 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
3835 return 0;
3836 }
3837
3838 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
3839 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3840 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
3841 return 0;
3842 }
3843
623a3128
TH
3844 return 1;
3845}
3846
3847/**
fe30911b 3848 * ata_dev_reread_id - Re-read IDENTIFY data
3fae450c 3849 * @dev: target ATA device
bff04647 3850 * @readid_flags: read ID flags
623a3128
TH
3851 *
3852 * Re-read IDENTIFY page and make sure @dev is still attached to
3853 * the port.
3854 *
3855 * LOCKING:
3856 * Kernel thread context (may sleep)
3857 *
3858 * RETURNS:
3859 * 0 on success, negative errno otherwise
3860 */
fe30911b 3861int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
623a3128 3862{
5eb45c02 3863 unsigned int class = dev->class;
9af5c9c9 3864 u16 *id = (void *)dev->link->ap->sector_buf;
623a3128
TH
3865 int rc;
3866
fe635c7e 3867 /* read ID data */
bff04647 3868 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128 3869 if (rc)
fe30911b 3870 return rc;
623a3128
TH
3871
3872 /* is the device still there? */
fe30911b
TH
3873 if (!ata_dev_same_device(dev, class, id))
3874 return -ENODEV;
623a3128 3875
fe635c7e 3876 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
fe30911b
TH
3877 return 0;
3878}
3879
3880/**
3881 * ata_dev_revalidate - Revalidate ATA device
3882 * @dev: device to revalidate
422c9daa 3883 * @new_class: new class code
fe30911b
TH
3884 * @readid_flags: read ID flags
3885 *
3886 * Re-read IDENTIFY page, make sure @dev is still attached to the
3887 * port and reconfigure it according to the new IDENTIFY page.
3888 *
3889 * LOCKING:
3890 * Kernel thread context (may sleep)
3891 *
3892 * RETURNS:
3893 * 0 on success, negative errno otherwise
3894 */
422c9daa
TH
3895int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3896 unsigned int readid_flags)
fe30911b 3897{
6ddcd3b0 3898 u64 n_sectors = dev->n_sectors;
fe30911b
TH
3899 int rc;
3900
3901 if (!ata_dev_enabled(dev))
3902 return -ENODEV;
3903
422c9daa
TH
3904 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3905 if (ata_class_enabled(new_class) &&
3906 new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
3907 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
3908 dev->class, new_class);
3909 rc = -ENODEV;
3910 goto fail;
3911 }
3912
fe30911b
TH
3913 /* re-read ID */
3914 rc = ata_dev_reread_id(dev, readid_flags);
3915 if (rc)
3916 goto fail;
623a3128
TH
3917
3918 /* configure device according to the new ID */
efdaedc4 3919 rc = ata_dev_configure(dev);
6ddcd3b0
TH
3920 if (rc)
3921 goto fail;
3922
3923 /* verify n_sectors hasn't changed */
b54eebd6
TH
3924 if (dev->class == ATA_DEV_ATA && n_sectors &&
3925 dev->n_sectors != n_sectors) {
6ddcd3b0
TH
3926 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3927 "%llu != %llu\n",
3928 (unsigned long long)n_sectors,
3929 (unsigned long long)dev->n_sectors);
8270bec4
TH
3930
3931 /* restore original n_sectors */
3932 dev->n_sectors = n_sectors;
3933
6ddcd3b0
TH
3934 rc = -ENODEV;
3935 goto fail;
3936 }
3937
3938 return 0;
623a3128
TH
3939
3940 fail:
f15a1daf 3941 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
3942 return rc;
3943}
3944
6919a0a6
AC
3945struct ata_blacklist_entry {
3946 const char *model_num;
3947 const char *model_rev;
3948 unsigned long horkage;
3949};
3950
3951static const struct ata_blacklist_entry ata_device_blacklist [] = {
3952 /* Devices with DMA related problems under Linux */
3953 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3954 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3955 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3956 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3957 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3958 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3959 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3960 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3961 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3962 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3963 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3964 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3965 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3966 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3967 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3968 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3969 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3970 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3971 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3972 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3973 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3974 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3975 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3976 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3977 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3978 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
3979 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3980 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
2dcb407e 3981 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
39f19886 3982 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
3af9a77a 3983 /* Odd clown on sil3726/4726 PMPs */
50af2fa1 3984 { "Config Disk", NULL, ATA_HORKAGE_DISABLE },
6919a0a6 3985
18d6e9d5 3986 /* Weird ATAPI devices */
40a1d531 3987 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
18d6e9d5 3988
6919a0a6
AC
3989 /* Devices we expect to fail diagnostics */
3990
3991 /* Devices where NCQ should be avoided */
3992 /* NCQ is slow */
2dcb407e 3993 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
459ad688 3994 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
09125ea6
TH
3995 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3996 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30 3997 /* NCQ is broken */
539cc7c7 3998 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
0e3dbc01 3999 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
da6f0ec2 4000 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
e41bd3e8 4001 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
539cc7c7 4002
36e337d0
RH
4003 /* Blacklist entries taken from Silicon Image 3124/3132
4004 Windows driver .inf file - also several Linux problem reports */
4005 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4006 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4007 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
6919a0a6 4008
16c55b03
TH
4009 /* devices which puke on READ_NATIVE_MAX */
4010 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4011 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4012 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4013 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
6919a0a6 4014
93328e11
AC
4015 /* Devices which report 1 sector over size HPA */
4016 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4017 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
b152fcd3 4018 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
93328e11 4019
6bbfd53d
AC
4020 /* Devices which get the IVB wrong */
4021 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
a79067e5
AC
4022 /* Maybe we should just blacklist TSSTcorp... */
4023 { "TSSTcorp CDDVDW SH-S202H", "SB00", ATA_HORKAGE_IVB, },
4024 { "TSSTcorp CDDVDW SH-S202H", "SB01", ATA_HORKAGE_IVB, },
6bbfd53d 4025 { "TSSTcorp CDDVDW SH-S202J", "SB00", ATA_HORKAGE_IVB, },
e9f33406
PM
4026 { "TSSTcorp CDDVDW SH-S202J", "SB01", ATA_HORKAGE_IVB, },
4027 { "TSSTcorp CDDVDW SH-S202N", "SB00", ATA_HORKAGE_IVB, },
4028 { "TSSTcorp CDDVDW SH-S202N", "SB01", ATA_HORKAGE_IVB, },
6bbfd53d 4029
6919a0a6
AC
4030 /* End Marker */
4031 { }
1da177e4 4032};
2e9edbf8 4033
741b7763 4034static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
539cc7c7
JG
4035{
4036 const char *p;
4037 int len;
4038
4039 /*
4040 * check for trailing wildcard: *\0
4041 */
4042 p = strchr(patt, wildchar);
4043 if (p && ((*(p + 1)) == 0))
4044 len = p - patt;
317b50b8 4045 else {
539cc7c7 4046 len = strlen(name);
317b50b8
AP
4047 if (!len) {
4048 if (!*patt)
4049 return 0;
4050 return -1;
4051 }
4052 }
539cc7c7
JG
4053
4054 return strncmp(patt, name, len);
4055}
4056
75683fe7 4057static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
1da177e4 4058{
8bfa79fc
TH
4059 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4060 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 4061 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 4062
8bfa79fc
TH
4063 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4064 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 4065
6919a0a6 4066 while (ad->model_num) {
539cc7c7 4067 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
6919a0a6
AC
4068 if (ad->model_rev == NULL)
4069 return ad->horkage;
539cc7c7 4070 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
6919a0a6 4071 return ad->horkage;
f4b15fef 4072 }
6919a0a6 4073 ad++;
f4b15fef 4074 }
1da177e4
LT
4075 return 0;
4076}
4077
6919a0a6
AC
4078static int ata_dma_blacklisted(const struct ata_device *dev)
4079{
4080 /* We don't support polling DMA.
4081 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4082 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4083 */
9af5c9c9 4084 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
6919a0a6
AC
4085 (dev->flags & ATA_DFLAG_CDB_INTR))
4086 return 1;
75683fe7 4087 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
6919a0a6
AC
4088}
4089
6bbfd53d
AC
4090/**
4091 * ata_is_40wire - check drive side detection
4092 * @dev: device
4093 *
4094 * Perform drive side detection decoding, allowing for device vendors
4095 * who can't follow the documentation.
4096 */
4097
4098static int ata_is_40wire(struct ata_device *dev)
4099{
4100 if (dev->horkage & ATA_HORKAGE_IVB)
4101 return ata_drive_40wire_relaxed(dev->id);
4102 return ata_drive_40wire(dev->id);
4103}
4104
15a5551c
AC
4105/**
4106 * cable_is_40wire - 40/80/SATA decider
4107 * @ap: port to consider
4108 *
4109 * This function encapsulates the policy for speed management
4110 * in one place. At the moment we don't cache the result but
4111 * there is a good case for setting ap->cbl to the result when
4112 * we are called with unknown cables (and figuring out if it
4113 * impacts hotplug at all).
4114 *
4115 * Return 1 if the cable appears to be 40 wire.
4116 */
4117
4118static int cable_is_40wire(struct ata_port *ap)
4119{
4120 struct ata_link *link;
4121 struct ata_device *dev;
4122
4123 /* If the controller thinks we are 40 wire, we are */
4124 if (ap->cbl == ATA_CBL_PATA40)
4125 return 1;
4126 /* If the controller thinks we are 80 wire, we are */
4127 if (ap->cbl == ATA_CBL_PATA80 || ap->cbl == ATA_CBL_SATA)
4128 return 0;
f792068e
AC
4129 /* If the system is known to be 40 wire short cable (eg laptop),
4130 then we allow 80 wire modes even if the drive isn't sure */
4131 if (ap->cbl == ATA_CBL_PATA40_SHORT)
4132 return 0;
15a5551c
AC
4133 /* If the controller doesn't know we scan
4134
4135 - Note: We look for all 40 wire detects at this point.
4136 Any 80 wire detect is taken to be 80 wire cable
4137 because
4138 - In many setups only the one drive (slave if present)
4139 will give a valid detect
4140 - If you have a non detect capable drive you don't
4141 want it to colour the choice
4142 */
4143 ata_port_for_each_link(link, ap) {
4144 ata_link_for_each_dev(dev, link) {
4145 if (!ata_is_40wire(dev))
4146 return 0;
4147 }
4148 }
4149 return 1;
4150}
4151
a6d5a51c
TH
4152/**
4153 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
4154 * @dev: Device to compute xfermask for
4155 *
acf356b1
TH
4156 * Compute supported xfermask of @dev and store it in
4157 * dev->*_mask. This function is responsible for applying all
4158 * known limits including host controller limits, device
4159 * blacklist, etc...
a6d5a51c
TH
4160 *
4161 * LOCKING:
4162 * None.
a6d5a51c 4163 */
3373efd8 4164static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 4165{
9af5c9c9
TH
4166 struct ata_link *link = dev->link;
4167 struct ata_port *ap = link->ap;
cca3974e 4168 struct ata_host *host = ap->host;
a6d5a51c 4169 unsigned long xfer_mask;
1da177e4 4170
37deecb5 4171 /* controller modes available */
565083e1
TH
4172 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4173 ap->mwdma_mask, ap->udma_mask);
4174
8343f889 4175 /* drive modes available */
37deecb5
TH
4176 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4177 dev->mwdma_mask, dev->udma_mask);
4178 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 4179
b352e57d
AC
4180 /*
4181 * CFA Advanced TrueIDE timings are not allowed on a shared
4182 * cable
4183 */
4184 if (ata_dev_pair(dev)) {
4185 /* No PIO5 or PIO6 */
4186 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4187 /* No MWDMA3 or MWDMA 4 */
4188 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4189 }
4190
37deecb5
TH
4191 if (ata_dma_blacklisted(dev)) {
4192 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
4193 ata_dev_printk(dev, KERN_WARNING,
4194 "device is on DMA blacklist, disabling DMA\n");
37deecb5 4195 }
a6d5a51c 4196
14d66ab7 4197 if ((host->flags & ATA_HOST_SIMPLEX) &&
2dcb407e 4198 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5
TH
4199 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4200 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4201 "other device, disabling DMA\n");
5444a6f4 4202 }
565083e1 4203
e424675f
JG
4204 if (ap->flags & ATA_FLAG_NO_IORDY)
4205 xfer_mask &= ata_pio_mask_no_iordy(dev);
4206
5444a6f4 4207 if (ap->ops->mode_filter)
a76b62ca 4208 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
5444a6f4 4209
8343f889
RH
4210 /* Apply cable rule here. Don't apply it early because when
4211 * we handle hot plug the cable type can itself change.
4212 * Check this last so that we know if the transfer rate was
4213 * solely limited by the cable.
4214 * Unknown or 80 wire cables reported host side are checked
4215 * drive side as well. Cases where we know a 40wire cable
4216 * is used safely for 80 are not checked here.
4217 */
4218 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4219 /* UDMA/44 or higher would be available */
15a5551c 4220 if (cable_is_40wire(ap)) {
2dcb407e 4221 ata_dev_printk(dev, KERN_WARNING,
8343f889
RH
4222 "limited to UDMA/33 due to 40-wire cable\n");
4223 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4224 }
4225
565083e1
TH
4226 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4227 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
4228}
4229
1da177e4
LT
4230/**
4231 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
4232 * @dev: Device to which command will be sent
4233 *
780a87f7
JG
4234 * Issue SET FEATURES - XFER MODE command to device @dev
4235 * on port @ap.
4236 *
1da177e4 4237 * LOCKING:
0cba632b 4238 * PCI/etc. bus probe sem.
83206a29
TH
4239 *
4240 * RETURNS:
4241 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
4242 */
4243
3373efd8 4244static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 4245{
a0123703 4246 struct ata_taskfile tf;
83206a29 4247 unsigned int err_mask;
1da177e4
LT
4248
4249 /* set up set-features taskfile */
4250 DPRINTK("set features - xfer mode\n");
4251
464cf177
TH
4252 /* Some controllers and ATAPI devices show flaky interrupt
4253 * behavior after setting xfer mode. Use polling instead.
4254 */
3373efd8 4255 ata_tf_init(dev, &tf);
a0123703
TH
4256 tf.command = ATA_CMD_SET_FEATURES;
4257 tf.feature = SETFEATURES_XFER;
464cf177 4258 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
a0123703 4259 tf.protocol = ATA_PROT_NODATA;
b9f8ab2d 4260 /* If we are using IORDY we must send the mode setting command */
11b7becc
JG
4261 if (ata_pio_need_iordy(dev))
4262 tf.nsect = dev->xfer_mode;
b9f8ab2d
AC
4263 /* If the device has IORDY and the controller does not - turn it off */
4264 else if (ata_id_has_iordy(dev->id))
11b7becc 4265 tf.nsect = 0x01;
b9f8ab2d
AC
4266 else /* In the ancient relic department - skip all of this */
4267 return 0;
1da177e4 4268
2b789108 4269 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
9f45cbd3
KCA
4270
4271 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4272 return err_mask;
4273}
9f45cbd3 4274/**
218f3d30 4275 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
9f45cbd3
KCA
4276 * @dev: Device to which command will be sent
4277 * @enable: Whether to enable or disable the feature
218f3d30 4278 * @feature: The sector count represents the feature to set
9f45cbd3
KCA
4279 *
4280 * Issue SET FEATURES - SATA FEATURES command to device @dev
218f3d30 4281 * on port @ap with sector count
9f45cbd3
KCA
4282 *
4283 * LOCKING:
4284 * PCI/etc. bus probe sem.
4285 *
4286 * RETURNS:
4287 * 0 on success, AC_ERR_* mask otherwise.
4288 */
218f3d30
JG
4289static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4290 u8 feature)
9f45cbd3
KCA
4291{
4292 struct ata_taskfile tf;
4293 unsigned int err_mask;
4294
4295 /* set up set-features taskfile */
4296 DPRINTK("set features - SATA features\n");
4297
4298 ata_tf_init(dev, &tf);
4299 tf.command = ATA_CMD_SET_FEATURES;
4300 tf.feature = enable;
4301 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4302 tf.protocol = ATA_PROT_NODATA;
218f3d30 4303 tf.nsect = feature;
9f45cbd3 4304
2b789108 4305 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1da177e4 4306
83206a29
TH
4307 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4308 return err_mask;
1da177e4
LT
4309}
4310
8bf62ece
AL
4311/**
4312 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 4313 * @dev: Device to which command will be sent
e2a7f77a
RD
4314 * @heads: Number of heads (taskfile parameter)
4315 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
4316 *
4317 * LOCKING:
6aff8f1f
TH
4318 * Kernel thread context (may sleep)
4319 *
4320 * RETURNS:
4321 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 4322 */
3373efd8
TH
4323static unsigned int ata_dev_init_params(struct ata_device *dev,
4324 u16 heads, u16 sectors)
8bf62ece 4325{
a0123703 4326 struct ata_taskfile tf;
6aff8f1f 4327 unsigned int err_mask;
8bf62ece
AL
4328
4329 /* Number of sectors per track 1-255. Number of heads 1-16 */
4330 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 4331 return AC_ERR_INVALID;
8bf62ece
AL
4332
4333 /* set up init dev params taskfile */
4334 DPRINTK("init dev params \n");
4335
3373efd8 4336 ata_tf_init(dev, &tf);
a0123703
TH
4337 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4338 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4339 tf.protocol = ATA_PROT_NODATA;
4340 tf.nsect = sectors;
4341 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 4342
2b789108 4343 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
18b2466c
AC
4344 /* A clean abort indicates an original or just out of spec drive
4345 and we should continue as we issue the setup based on the
4346 drive reported working geometry */
4347 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4348 err_mask = 0;
8bf62ece 4349
6aff8f1f
TH
4350 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4351 return err_mask;
8bf62ece
AL
4352}
4353
1da177e4 4354/**
0cba632b
JG
4355 * ata_sg_clean - Unmap DMA memory associated with command
4356 * @qc: Command containing DMA memory to be released
4357 *
4358 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
4359 *
4360 * LOCKING:
cca3974e 4361 * spin_lock_irqsave(host lock)
1da177e4 4362 */
70e6ad0c 4363void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
4364{
4365 struct ata_port *ap = qc->ap;
ff2aeb1e 4366 struct scatterlist *sg = qc->sg;
1da177e4
LT
4367 int dir = qc->dma_dir;
4368
a4631474 4369 WARN_ON(sg == NULL);
1da177e4 4370
dde20207 4371 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 4372
dde20207
JB
4373 if (qc->n_elem)
4374 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
1da177e4
LT
4375
4376 qc->flags &= ~ATA_QCFLAG_DMAMAP;
ff2aeb1e 4377 qc->sg = NULL;
1da177e4
LT
4378}
4379
1da177e4 4380/**
5895ef9a 4381 * atapi_check_dma - Check whether ATAPI DMA can be supported
1da177e4
LT
4382 * @qc: Metadata associated with taskfile to check
4383 *
780a87f7
JG
4384 * Allow low-level driver to filter ATA PACKET commands, returning
4385 * a status indicating whether or not it is OK to use DMA for the
4386 * supplied PACKET command.
4387 *
1da177e4 4388 * LOCKING:
624d5c51
TH
4389 * spin_lock_irqsave(host lock)
4390 *
4391 * RETURNS: 0 when ATAPI DMA can be used
4392 * nonzero otherwise
4393 */
5895ef9a 4394int atapi_check_dma(struct ata_queued_cmd *qc)
624d5c51
TH
4395{
4396 struct ata_port *ap = qc->ap;
71601958 4397
624d5c51
TH
4398 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4399 * few ATAPI devices choke on such DMA requests.
4400 */
4401 if (unlikely(qc->nbytes & 15))
4402 return 1;
e2cec771 4403
624d5c51
TH
4404 if (ap->ops->check_atapi_dma)
4405 return ap->ops->check_atapi_dma(qc);
e2cec771 4406
624d5c51
TH
4407 return 0;
4408}
1da177e4 4409
624d5c51
TH
4410/**
4411 * ata_std_qc_defer - Check whether a qc needs to be deferred
4412 * @qc: ATA command in question
4413 *
4414 * Non-NCQ commands cannot run with any other command, NCQ or
4415 * not. As upper layer only knows the queue depth, we are
4416 * responsible for maintaining exclusion. This function checks
4417 * whether a new command @qc can be issued.
4418 *
4419 * LOCKING:
4420 * spin_lock_irqsave(host lock)
4421 *
4422 * RETURNS:
4423 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4424 */
4425int ata_std_qc_defer(struct ata_queued_cmd *qc)
4426{
4427 struct ata_link *link = qc->dev->link;
e2cec771 4428
624d5c51
TH
4429 if (qc->tf.protocol == ATA_PROT_NCQ) {
4430 if (!ata_tag_valid(link->active_tag))
4431 return 0;
4432 } else {
4433 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4434 return 0;
4435 }
e2cec771 4436
624d5c51
TH
4437 return ATA_DEFER_LINK;
4438}
6912ccd5 4439
624d5c51 4440void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
1da177e4 4441
624d5c51
TH
4442/**
4443 * ata_sg_init - Associate command with scatter-gather table.
4444 * @qc: Command to be associated
4445 * @sg: Scatter-gather table.
4446 * @n_elem: Number of elements in s/g table.
4447 *
4448 * Initialize the data-related elements of queued_cmd @qc
4449 * to point to a scatter-gather table @sg, containing @n_elem
4450 * elements.
4451 *
4452 * LOCKING:
4453 * spin_lock_irqsave(host lock)
4454 */
4455void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4456 unsigned int n_elem)
4457{
4458 qc->sg = sg;
4459 qc->n_elem = n_elem;
4460 qc->cursg = qc->sg;
4461}
bb5cb290 4462
624d5c51
TH
4463/**
4464 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4465 * @qc: Command with scatter-gather table to be mapped.
4466 *
4467 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4468 *
4469 * LOCKING:
4470 * spin_lock_irqsave(host lock)
4471 *
4472 * RETURNS:
4473 * Zero on success, negative on error.
4474 *
4475 */
4476static int ata_sg_setup(struct ata_queued_cmd *qc)
4477{
4478 struct ata_port *ap = qc->ap;
4479 unsigned int n_elem;
1da177e4 4480
624d5c51 4481 VPRINTK("ENTER, ata%u\n", ap->print_id);
e2cec771 4482
624d5c51
TH
4483 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
4484 if (n_elem < 1)
4485 return -1;
bb5cb290 4486
624d5c51 4487 DPRINTK("%d sg elements mapped\n", n_elem);
bb5cb290 4488
624d5c51
TH
4489 qc->n_elem = n_elem;
4490 qc->flags |= ATA_QCFLAG_DMAMAP;
1da177e4 4491
624d5c51 4492 return 0;
1da177e4
LT
4493}
4494
624d5c51
TH
4495/**
4496 * swap_buf_le16 - swap halves of 16-bit words in place
4497 * @buf: Buffer to swap
4498 * @buf_words: Number of 16-bit words in buffer.
4499 *
4500 * Swap halves of 16-bit words if needed to convert from
4501 * little-endian byte order to native cpu byte order, or
4502 * vice-versa.
4503 *
4504 * LOCKING:
4505 * Inherited from caller.
4506 */
4507void swap_buf_le16(u16 *buf, unsigned int buf_words)
8061f5f0 4508{
624d5c51
TH
4509#ifdef __BIG_ENDIAN
4510 unsigned int i;
8061f5f0 4511
624d5c51
TH
4512 for (i = 0; i < buf_words; i++)
4513 buf[i] = le16_to_cpu(buf[i]);
4514#endif /* __BIG_ENDIAN */
8061f5f0
TH
4515}
4516
1da177e4
LT
4517/**
4518 * ata_qc_new - Request an available ATA command, for queueing
4519 * @ap: Port associated with device @dev
4520 * @dev: Device from whom we request an available command structure
4521 *
4522 * LOCKING:
0cba632b 4523 * None.
1da177e4
LT
4524 */
4525
4526static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
4527{
4528 struct ata_queued_cmd *qc = NULL;
4529 unsigned int i;
4530
e3180499 4531 /* no command while frozen */
b51e9e5d 4532 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
4533 return NULL;
4534
2ab7db1f
TH
4535 /* the last tag is reserved for internal command. */
4536 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 4537 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 4538 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
4539 break;
4540 }
4541
4542 if (qc)
4543 qc->tag = i;
4544
4545 return qc;
4546}
4547
4548/**
4549 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
4550 * @dev: Device from whom we request an available command structure
4551 *
4552 * LOCKING:
0cba632b 4553 * None.
1da177e4
LT
4554 */
4555
3373efd8 4556struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 4557{
9af5c9c9 4558 struct ata_port *ap = dev->link->ap;
1da177e4
LT
4559 struct ata_queued_cmd *qc;
4560
4561 qc = ata_qc_new(ap);
4562 if (qc) {
1da177e4
LT
4563 qc->scsicmd = NULL;
4564 qc->ap = ap;
4565 qc->dev = dev;
1da177e4 4566
2c13b7ce 4567 ata_qc_reinit(qc);
1da177e4
LT
4568 }
4569
4570 return qc;
4571}
4572
1da177e4
LT
4573/**
4574 * ata_qc_free - free unused ata_queued_cmd
4575 * @qc: Command to complete
4576 *
4577 * Designed to free unused ata_queued_cmd object
4578 * in case something prevents using it.
4579 *
4580 * LOCKING:
cca3974e 4581 * spin_lock_irqsave(host lock)
1da177e4
LT
4582 */
4583void ata_qc_free(struct ata_queued_cmd *qc)
4584{
4ba946e9
TH
4585 struct ata_port *ap = qc->ap;
4586 unsigned int tag;
4587
a4631474 4588 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 4589
4ba946e9
TH
4590 qc->flags = 0;
4591 tag = qc->tag;
4592 if (likely(ata_tag_valid(tag))) {
4ba946e9 4593 qc->tag = ATA_TAG_POISON;
6cec4a39 4594 clear_bit(tag, &ap->qc_allocated);
4ba946e9 4595 }
1da177e4
LT
4596}
4597
76014427 4598void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 4599{
dedaf2b0 4600 struct ata_port *ap = qc->ap;
9af5c9c9 4601 struct ata_link *link = qc->dev->link;
dedaf2b0 4602
a4631474
TH
4603 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
4604 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
4605
4606 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
4607 ata_sg_clean(qc);
4608
7401abf2 4609 /* command should be marked inactive atomically with qc completion */
da917d69 4610 if (qc->tf.protocol == ATA_PROT_NCQ) {
9af5c9c9 4611 link->sactive &= ~(1 << qc->tag);
da917d69
TH
4612 if (!link->sactive)
4613 ap->nr_active_links--;
4614 } else {
9af5c9c9 4615 link->active_tag = ATA_TAG_POISON;
da917d69
TH
4616 ap->nr_active_links--;
4617 }
4618
4619 /* clear exclusive status */
4620 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
4621 ap->excl_link == link))
4622 ap->excl_link = NULL;
7401abf2 4623
3f3791d3
AL
4624 /* atapi: mark qc as inactive to prevent the interrupt handler
4625 * from completing the command twice later, before the error handler
4626 * is called. (when rc != 0 and atapi request sense is needed)
4627 */
4628 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 4629 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 4630
1da177e4 4631 /* call completion callback */
77853bf2 4632 qc->complete_fn(qc);
1da177e4
LT
4633}
4634
39599a53
TH
4635static void fill_result_tf(struct ata_queued_cmd *qc)
4636{
4637 struct ata_port *ap = qc->ap;
4638
39599a53 4639 qc->result_tf.flags = qc->tf.flags;
22183bf5 4640 ap->ops->qc_fill_rtf(qc);
39599a53
TH
4641}
4642
00115e0f
TH
4643static void ata_verify_xfer(struct ata_queued_cmd *qc)
4644{
4645 struct ata_device *dev = qc->dev;
4646
4647 if (ata_tag_internal(qc->tag))
4648 return;
4649
4650 if (ata_is_nodata(qc->tf.protocol))
4651 return;
4652
4653 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
4654 return;
4655
4656 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
4657}
4658
f686bcb8
TH
4659/**
4660 * ata_qc_complete - Complete an active ATA command
4661 * @qc: Command to complete
4662 * @err_mask: ATA Status register contents
4663 *
4664 * Indicate to the mid and upper layers that an ATA
4665 * command has completed, with either an ok or not-ok status.
4666 *
4667 * LOCKING:
cca3974e 4668 * spin_lock_irqsave(host lock)
f686bcb8
TH
4669 */
4670void ata_qc_complete(struct ata_queued_cmd *qc)
4671{
4672 struct ata_port *ap = qc->ap;
4673
4674 /* XXX: New EH and old EH use different mechanisms to
4675 * synchronize EH with regular execution path.
4676 *
4677 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
4678 * Normal execution path is responsible for not accessing a
4679 * failed qc. libata core enforces the rule by returning NULL
4680 * from ata_qc_from_tag() for failed qcs.
4681 *
4682 * Old EH depends on ata_qc_complete() nullifying completion
4683 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
4684 * not synchronize with interrupt handler. Only PIO task is
4685 * taken care of.
4686 */
4687 if (ap->ops->error_handler) {
4dbfa39b
TH
4688 struct ata_device *dev = qc->dev;
4689 struct ata_eh_info *ehi = &dev->link->eh_info;
4690
b51e9e5d 4691 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
4692
4693 if (unlikely(qc->err_mask))
4694 qc->flags |= ATA_QCFLAG_FAILED;
4695
4696 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
4697 if (!ata_tag_internal(qc->tag)) {
4698 /* always fill result TF for failed qc */
39599a53 4699 fill_result_tf(qc);
f686bcb8
TH
4700 ata_qc_schedule_eh(qc);
4701 return;
4702 }
4703 }
4704
4705 /* read result TF if requested */
4706 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4707 fill_result_tf(qc);
f686bcb8 4708
4dbfa39b
TH
4709 /* Some commands need post-processing after successful
4710 * completion.
4711 */
4712 switch (qc->tf.command) {
4713 case ATA_CMD_SET_FEATURES:
4714 if (qc->tf.feature != SETFEATURES_WC_ON &&
4715 qc->tf.feature != SETFEATURES_WC_OFF)
4716 break;
4717 /* fall through */
4718 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
4719 case ATA_CMD_SET_MULTI: /* multi_count changed */
4720 /* revalidate device */
4721 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
4722 ata_port_schedule_eh(ap);
4723 break;
054a5fba
TH
4724
4725 case ATA_CMD_SLEEP:
4726 dev->flags |= ATA_DFLAG_SLEEPING;
4727 break;
4dbfa39b
TH
4728 }
4729
00115e0f
TH
4730 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
4731 ata_verify_xfer(qc);
4732
f686bcb8
TH
4733 __ata_qc_complete(qc);
4734 } else {
4735 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
4736 return;
4737
4738 /* read result TF if failed or requested */
4739 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 4740 fill_result_tf(qc);
f686bcb8
TH
4741
4742 __ata_qc_complete(qc);
4743 }
4744}
4745
dedaf2b0
TH
4746/**
4747 * ata_qc_complete_multiple - Complete multiple qcs successfully
4748 * @ap: port in question
4749 * @qc_active: new qc_active mask
dedaf2b0
TH
4750 *
4751 * Complete in-flight commands. This functions is meant to be
4752 * called from low-level driver's interrupt routine to complete
4753 * requests normally. ap->qc_active and @qc_active is compared
4754 * and commands are completed accordingly.
4755 *
4756 * LOCKING:
cca3974e 4757 * spin_lock_irqsave(host lock)
dedaf2b0
TH
4758 *
4759 * RETURNS:
4760 * Number of completed commands on success, -errno otherwise.
4761 */
79f97dad 4762int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active)
dedaf2b0
TH
4763{
4764 int nr_done = 0;
4765 u32 done_mask;
4766 int i;
4767
4768 done_mask = ap->qc_active ^ qc_active;
4769
4770 if (unlikely(done_mask & qc_active)) {
4771 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
4772 "(%08x->%08x)\n", ap->qc_active, qc_active);
4773 return -EINVAL;
4774 }
4775
4776 for (i = 0; i < ATA_MAX_QUEUE; i++) {
4777 struct ata_queued_cmd *qc;
4778
4779 if (!(done_mask & (1 << i)))
4780 continue;
4781
4782 if ((qc = ata_qc_from_tag(ap, i))) {
dedaf2b0
TH
4783 ata_qc_complete(qc);
4784 nr_done++;
4785 }
4786 }
4787
4788 return nr_done;
4789}
4790
1da177e4
LT
4791/**
4792 * ata_qc_issue - issue taskfile to device
4793 * @qc: command to issue to device
4794 *
4795 * Prepare an ATA command to submission to device.
4796 * This includes mapping the data into a DMA-able
4797 * area, filling in the S/G table, and finally
4798 * writing the taskfile to hardware, starting the command.
4799 *
4800 * LOCKING:
cca3974e 4801 * spin_lock_irqsave(host lock)
1da177e4 4802 */
8e0e694a 4803void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
4804{
4805 struct ata_port *ap = qc->ap;
9af5c9c9 4806 struct ata_link *link = qc->dev->link;
405e66b3 4807 u8 prot = qc->tf.protocol;
1da177e4 4808
dedaf2b0
TH
4809 /* Make sure only one non-NCQ command is outstanding. The
4810 * check is skipped for old EH because it reuses active qc to
4811 * request ATAPI sense.
4812 */
9af5c9c9 4813 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
dedaf2b0 4814
1973a023 4815 if (ata_is_ncq(prot)) {
9af5c9c9 4816 WARN_ON(link->sactive & (1 << qc->tag));
da917d69
TH
4817
4818 if (!link->sactive)
4819 ap->nr_active_links++;
9af5c9c9 4820 link->sactive |= 1 << qc->tag;
dedaf2b0 4821 } else {
9af5c9c9 4822 WARN_ON(link->sactive);
da917d69
TH
4823
4824 ap->nr_active_links++;
9af5c9c9 4825 link->active_tag = qc->tag;
dedaf2b0
TH
4826 }
4827
e4a70e76 4828 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 4829 ap->qc_active |= 1 << qc->tag;
e4a70e76 4830
f92a2636
TH
4831 /* We guarantee to LLDs that they will have at least one
4832 * non-zero sg if the command is a data command.
4833 */
ff2aeb1e 4834 BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
f92a2636 4835
405e66b3 4836 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
f92a2636 4837 (ap->flags & ATA_FLAG_PIO_DMA)))
001102d7
TH
4838 if (ata_sg_setup(qc))
4839 goto sg_err;
1da177e4 4840
cf480626 4841 /* if device is sleeping, schedule reset and abort the link */
054a5fba 4842 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
cf480626 4843 link->eh_info.action |= ATA_EH_RESET;
054a5fba
TH
4844 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
4845 ata_link_abort(link);
4846 return;
4847 }
4848
1da177e4
LT
4849 ap->ops->qc_prep(qc);
4850
8e0e694a
TH
4851 qc->err_mask |= ap->ops->qc_issue(qc);
4852 if (unlikely(qc->err_mask))
4853 goto err;
4854 return;
1da177e4 4855
8e436af9 4856sg_err:
8e0e694a
TH
4857 qc->err_mask |= AC_ERR_SYSTEM;
4858err:
4859 ata_qc_complete(qc);
1da177e4
LT
4860}
4861
34bf2170
TH
4862/**
4863 * sata_scr_valid - test whether SCRs are accessible
936fd732 4864 * @link: ATA link to test SCR accessibility for
34bf2170 4865 *
936fd732 4866 * Test whether SCRs are accessible for @link.
34bf2170
TH
4867 *
4868 * LOCKING:
4869 * None.
4870 *
4871 * RETURNS:
4872 * 1 if SCRs are accessible, 0 otherwise.
4873 */
936fd732 4874int sata_scr_valid(struct ata_link *link)
34bf2170 4875{
936fd732
TH
4876 struct ata_port *ap = link->ap;
4877
a16abc0b 4878 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
34bf2170
TH
4879}
4880
4881/**
4882 * sata_scr_read - read SCR register of the specified port
936fd732 4883 * @link: ATA link to read SCR for
34bf2170
TH
4884 * @reg: SCR to read
4885 * @val: Place to store read value
4886 *
936fd732 4887 * Read SCR register @reg of @link into *@val. This function is
633273a3
TH
4888 * guaranteed to succeed if @link is ap->link, the cable type of
4889 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
4890 *
4891 * LOCKING:
633273a3 4892 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
4893 *
4894 * RETURNS:
4895 * 0 on success, negative errno on failure.
4896 */
936fd732 4897int sata_scr_read(struct ata_link *link, int reg, u32 *val)
34bf2170 4898{
633273a3 4899 if (ata_is_host_link(link)) {
633273a3 4900 if (sata_scr_valid(link))
82ef04fb 4901 return link->ap->ops->scr_read(link, reg, val);
633273a3
TH
4902 return -EOPNOTSUPP;
4903 }
4904
4905 return sata_pmp_scr_read(link, reg, val);
34bf2170
TH
4906}
4907
4908/**
4909 * sata_scr_write - write SCR register of the specified port
936fd732 4910 * @link: ATA link to write SCR for
34bf2170
TH
4911 * @reg: SCR to write
4912 * @val: value to write
4913 *
936fd732 4914 * Write @val to SCR register @reg of @link. This function is
633273a3
TH
4915 * guaranteed to succeed if @link is ap->link, the cable type of
4916 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
4917 *
4918 * LOCKING:
633273a3 4919 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
4920 *
4921 * RETURNS:
4922 * 0 on success, negative errno on failure.
4923 */
936fd732 4924int sata_scr_write(struct ata_link *link, int reg, u32 val)
34bf2170 4925{
633273a3 4926 if (ata_is_host_link(link)) {
633273a3 4927 if (sata_scr_valid(link))
82ef04fb 4928 return link->ap->ops->scr_write(link, reg, val);
633273a3
TH
4929 return -EOPNOTSUPP;
4930 }
936fd732 4931
633273a3 4932 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
4933}
4934
4935/**
4936 * sata_scr_write_flush - write SCR register of the specified port and flush
936fd732 4937 * @link: ATA link to write SCR for
34bf2170
TH
4938 * @reg: SCR to write
4939 * @val: value to write
4940 *
4941 * This function is identical to sata_scr_write() except that this
4942 * function performs flush after writing to the register.
4943 *
4944 * LOCKING:
633273a3 4945 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
4946 *
4947 * RETURNS:
4948 * 0 on success, negative errno on failure.
4949 */
936fd732 4950int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
34bf2170 4951{
633273a3 4952 if (ata_is_host_link(link)) {
633273a3 4953 int rc;
da3dbb17 4954
633273a3 4955 if (sata_scr_valid(link)) {
82ef04fb 4956 rc = link->ap->ops->scr_write(link, reg, val);
633273a3 4957 if (rc == 0)
82ef04fb 4958 rc = link->ap->ops->scr_read(link, reg, &val);
633273a3
TH
4959 return rc;
4960 }
4961 return -EOPNOTSUPP;
34bf2170 4962 }
633273a3
TH
4963
4964 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
4965}
4966
4967/**
936fd732
TH
4968 * ata_link_online - test whether the given link is online
4969 * @link: ATA link to test
34bf2170 4970 *
936fd732
TH
4971 * Test whether @link is online. Note that this function returns
4972 * 0 if online status of @link cannot be obtained, so
4973 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
4974 *
4975 * LOCKING:
4976 * None.
4977 *
4978 * RETURNS:
b5b3fa38 4979 * True if the port online status is available and online.
34bf2170 4980 */
b5b3fa38 4981bool ata_link_online(struct ata_link *link)
34bf2170
TH
4982{
4983 u32 sstatus;
4984
936fd732
TH
4985 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
4986 (sstatus & 0xf) == 0x3)
b5b3fa38
TH
4987 return true;
4988 return false;
34bf2170
TH
4989}
4990
4991/**
936fd732
TH
4992 * ata_link_offline - test whether the given link is offline
4993 * @link: ATA link to test
34bf2170 4994 *
936fd732
TH
4995 * Test whether @link is offline. Note that this function
4996 * returns 0 if offline status of @link cannot be obtained, so
4997 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
4998 *
4999 * LOCKING:
5000 * None.
5001 *
5002 * RETURNS:
b5b3fa38 5003 * True if the port offline status is available and offline.
34bf2170 5004 */
b5b3fa38 5005bool ata_link_offline(struct ata_link *link)
34bf2170
TH
5006{
5007 u32 sstatus;
5008
936fd732
TH
5009 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5010 (sstatus & 0xf) != 0x3)
b5b3fa38
TH
5011 return true;
5012 return false;
34bf2170 5013}
0baab86b 5014
6ffa01d8 5015#ifdef CONFIG_PM
cca3974e
JG
5016static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5017 unsigned int action, unsigned int ehi_flags,
5018 int wait)
500530f6
TH
5019{
5020 unsigned long flags;
5021 int i, rc;
5022
cca3974e
JG
5023 for (i = 0; i < host->n_ports; i++) {
5024 struct ata_port *ap = host->ports[i];
e3667ebf 5025 struct ata_link *link;
500530f6
TH
5026
5027 /* Previous resume operation might still be in
5028 * progress. Wait for PM_PENDING to clear.
5029 */
5030 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5031 ata_port_wait_eh(ap);
5032 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5033 }
5034
5035 /* request PM ops to EH */
5036 spin_lock_irqsave(ap->lock, flags);
5037
5038 ap->pm_mesg = mesg;
5039 if (wait) {
5040 rc = 0;
5041 ap->pm_result = &rc;
5042 }
5043
5044 ap->pflags |= ATA_PFLAG_PM_PENDING;
e3667ebf
TH
5045 __ata_port_for_each_link(link, ap) {
5046 link->eh_info.action |= action;
5047 link->eh_info.flags |= ehi_flags;
5048 }
500530f6
TH
5049
5050 ata_port_schedule_eh(ap);
5051
5052 spin_unlock_irqrestore(ap->lock, flags);
5053
5054 /* wait and check result */
5055 if (wait) {
5056 ata_port_wait_eh(ap);
5057 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5058 if (rc)
5059 return rc;
5060 }
5061 }
5062
5063 return 0;
5064}
5065
5066/**
cca3974e
JG
5067 * ata_host_suspend - suspend host
5068 * @host: host to suspend
500530f6
TH
5069 * @mesg: PM message
5070 *
cca3974e 5071 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
5072 * function requests EH to perform PM operations and waits for EH
5073 * to finish.
5074 *
5075 * LOCKING:
5076 * Kernel thread context (may sleep).
5077 *
5078 * RETURNS:
5079 * 0 on success, -errno on failure.
5080 */
cca3974e 5081int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6 5082{
9666f400 5083 int rc;
500530f6 5084
ca77329f
KCA
5085 /*
5086 * disable link pm on all ports before requesting
5087 * any pm activity
5088 */
5089 ata_lpm_enable(host);
5090
cca3974e 5091 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
72ad6ec4
JG
5092 if (rc == 0)
5093 host->dev->power.power_state = mesg;
500530f6
TH
5094 return rc;
5095}
5096
5097/**
cca3974e
JG
5098 * ata_host_resume - resume host
5099 * @host: host to resume
500530f6 5100 *
cca3974e 5101 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
5102 * function requests EH to perform PM operations and returns.
5103 * Note that all resume operations are performed parallely.
5104 *
5105 * LOCKING:
5106 * Kernel thread context (may sleep).
5107 */
cca3974e 5108void ata_host_resume(struct ata_host *host)
500530f6 5109{
cf480626 5110 ata_host_request_pm(host, PMSG_ON, ATA_EH_RESET,
cca3974e 5111 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
72ad6ec4 5112 host->dev->power.power_state = PMSG_ON;
ca77329f
KCA
5113
5114 /* reenable link pm */
5115 ata_lpm_disable(host);
500530f6 5116}
6ffa01d8 5117#endif
500530f6 5118
c893a3ae
RD
5119/**
5120 * ata_port_start - Set port up for dma.
5121 * @ap: Port to initialize
5122 *
5123 * Called just after data structures for each port are
5124 * initialized. Allocates space for PRD table.
5125 *
5126 * May be used as the port_start() entry in ata_port_operations.
5127 *
5128 * LOCKING:
5129 * Inherited from caller.
5130 */
f0d36efd 5131int ata_port_start(struct ata_port *ap)
1da177e4 5132{
2f1f610b 5133 struct device *dev = ap->dev;
1da177e4 5134
f0d36efd
TH
5135 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5136 GFP_KERNEL);
1da177e4
LT
5137 if (!ap->prd)
5138 return -ENOMEM;
5139
1da177e4
LT
5140 return 0;
5141}
5142
3ef3b43d
TH
5143/**
5144 * ata_dev_init - Initialize an ata_device structure
5145 * @dev: Device structure to initialize
5146 *
5147 * Initialize @dev in preparation for probing.
5148 *
5149 * LOCKING:
5150 * Inherited from caller.
5151 */
5152void ata_dev_init(struct ata_device *dev)
5153{
9af5c9c9
TH
5154 struct ata_link *link = dev->link;
5155 struct ata_port *ap = link->ap;
72fa4b74
TH
5156 unsigned long flags;
5157
5a04bf4b 5158 /* SATA spd limit is bound to the first device */
9af5c9c9
TH
5159 link->sata_spd_limit = link->hw_sata_spd_limit;
5160 link->sata_spd = 0;
5a04bf4b 5161
72fa4b74
TH
5162 /* High bits of dev->flags are used to record warm plug
5163 * requests which occur asynchronously. Synchronize using
cca3974e 5164 * host lock.
72fa4b74 5165 */
ba6a1308 5166 spin_lock_irqsave(ap->lock, flags);
72fa4b74 5167 dev->flags &= ~ATA_DFLAG_INIT_MASK;
3dcc323f 5168 dev->horkage = 0;
ba6a1308 5169 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 5170
72fa4b74
TH
5171 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5172 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
5173 dev->pio_mask = UINT_MAX;
5174 dev->mwdma_mask = UINT_MAX;
5175 dev->udma_mask = UINT_MAX;
5176}
5177
4fb37a25
TH
5178/**
5179 * ata_link_init - Initialize an ata_link structure
5180 * @ap: ATA port link is attached to
5181 * @link: Link structure to initialize
8989805d 5182 * @pmp: Port multiplier port number
4fb37a25
TH
5183 *
5184 * Initialize @link.
5185 *
5186 * LOCKING:
5187 * Kernel thread context (may sleep)
5188 */
fb7fd614 5189void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
4fb37a25
TH
5190{
5191 int i;
5192
5193 /* clear everything except for devices */
5194 memset(link, 0, offsetof(struct ata_link, device[0]));
5195
5196 link->ap = ap;
8989805d 5197 link->pmp = pmp;
4fb37a25
TH
5198 link->active_tag = ATA_TAG_POISON;
5199 link->hw_sata_spd_limit = UINT_MAX;
5200
5201 /* can't use iterator, ap isn't initialized yet */
5202 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5203 struct ata_device *dev = &link->device[i];
5204
5205 dev->link = link;
5206 dev->devno = dev - link->device;
5207 ata_dev_init(dev);
5208 }
5209}
5210
5211/**
5212 * sata_link_init_spd - Initialize link->sata_spd_limit
5213 * @link: Link to configure sata_spd_limit for
5214 *
5215 * Initialize @link->[hw_]sata_spd_limit to the currently
5216 * configured value.
5217 *
5218 * LOCKING:
5219 * Kernel thread context (may sleep).
5220 *
5221 * RETURNS:
5222 * 0 on success, -errno on failure.
5223 */
fb7fd614 5224int sata_link_init_spd(struct ata_link *link)
4fb37a25 5225{
33267325 5226 u8 spd;
4fb37a25
TH
5227 int rc;
5228
d127ea7b 5229 rc = sata_scr_read(link, SCR_CONTROL, &link->saved_scontrol);
4fb37a25
TH
5230 if (rc)
5231 return rc;
5232
d127ea7b 5233 spd = (link->saved_scontrol >> 4) & 0xf;
4fb37a25
TH
5234 if (spd)
5235 link->hw_sata_spd_limit &= (1 << spd) - 1;
5236
05944bdf 5237 ata_force_link_limits(link);
33267325 5238
4fb37a25
TH
5239 link->sata_spd_limit = link->hw_sata_spd_limit;
5240
5241 return 0;
5242}
5243
1da177e4 5244/**
f3187195
TH
5245 * ata_port_alloc - allocate and initialize basic ATA port resources
5246 * @host: ATA host this allocated port belongs to
1da177e4 5247 *
f3187195
TH
5248 * Allocate and initialize basic ATA port resources.
5249 *
5250 * RETURNS:
5251 * Allocate ATA port on success, NULL on failure.
0cba632b 5252 *
1da177e4 5253 * LOCKING:
f3187195 5254 * Inherited from calling layer (may sleep).
1da177e4 5255 */
f3187195 5256struct ata_port *ata_port_alloc(struct ata_host *host)
1da177e4 5257{
f3187195 5258 struct ata_port *ap;
1da177e4 5259
f3187195
TH
5260 DPRINTK("ENTER\n");
5261
5262 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5263 if (!ap)
5264 return NULL;
5265
f4d6d004 5266 ap->pflags |= ATA_PFLAG_INITIALIZING;
cca3974e 5267 ap->lock = &host->lock;
198e0fed 5268 ap->flags = ATA_FLAG_DISABLED;
f3187195 5269 ap->print_id = -1;
1da177e4 5270 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 5271 ap->host = host;
f3187195 5272 ap->dev = host->dev;
1da177e4 5273 ap->last_ctl = 0xFF;
bd5d825c
BP
5274
5275#if defined(ATA_VERBOSE_DEBUG)
5276 /* turn on all debugging levels */
5277 ap->msg_enable = 0x00FF;
5278#elif defined(ATA_DEBUG)
5279 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 5280#else
0dd4b21f 5281 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 5282#endif
1da177e4 5283
127102ae 5284#ifdef CONFIG_ATA_SFF
442eacc3 5285 INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
127102ae 5286#endif
65f27f38
DH
5287 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5288 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 5289 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 5290 init_waitqueue_head(&ap->eh_wait_q);
5ddf24c5
TH
5291 init_timer_deferrable(&ap->fastdrain_timer);
5292 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
5293 ap->fastdrain_timer.data = (unsigned long)ap;
1da177e4 5294
838df628 5295 ap->cbl = ATA_CBL_NONE;
838df628 5296
8989805d 5297 ata_link_init(ap, &ap->link, 0);
1da177e4
LT
5298
5299#ifdef ATA_IRQ_TRAP
5300 ap->stats.unhandled_irq = 1;
5301 ap->stats.idle_irq = 1;
5302#endif
1da177e4 5303 return ap;
1da177e4
LT
5304}
5305
f0d36efd
TH
5306static void ata_host_release(struct device *gendev, void *res)
5307{
5308 struct ata_host *host = dev_get_drvdata(gendev);
5309 int i;
5310
1aa506e4
TH
5311 for (i = 0; i < host->n_ports; i++) {
5312 struct ata_port *ap = host->ports[i];
5313
4911487a
TH
5314 if (!ap)
5315 continue;
5316
5317 if (ap->scsi_host)
1aa506e4
TH
5318 scsi_host_put(ap->scsi_host);
5319
633273a3 5320 kfree(ap->pmp_link);
4911487a 5321 kfree(ap);
1aa506e4
TH
5322 host->ports[i] = NULL;
5323 }
5324
1aa56cca 5325 dev_set_drvdata(gendev, NULL);
f0d36efd
TH
5326}
5327
f3187195
TH
5328/**
5329 * ata_host_alloc - allocate and init basic ATA host resources
5330 * @dev: generic device this host is associated with
5331 * @max_ports: maximum number of ATA ports associated with this host
5332 *
5333 * Allocate and initialize basic ATA host resources. LLD calls
5334 * this function to allocate a host, initializes it fully and
5335 * attaches it using ata_host_register().
5336 *
5337 * @max_ports ports are allocated and host->n_ports is
5338 * initialized to @max_ports. The caller is allowed to decrease
5339 * host->n_ports before calling ata_host_register(). The unused
5340 * ports will be automatically freed on registration.
5341 *
5342 * RETURNS:
5343 * Allocate ATA host on success, NULL on failure.
5344 *
5345 * LOCKING:
5346 * Inherited from calling layer (may sleep).
5347 */
5348struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
5349{
5350 struct ata_host *host;
5351 size_t sz;
5352 int i;
5353
5354 DPRINTK("ENTER\n");
5355
5356 if (!devres_open_group(dev, NULL, GFP_KERNEL))
5357 return NULL;
5358
5359 /* alloc a container for our list of ATA ports (buses) */
5360 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
5361 /* alloc a container for our list of ATA ports (buses) */
5362 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
5363 if (!host)
5364 goto err_out;
5365
5366 devres_add(dev, host);
5367 dev_set_drvdata(dev, host);
5368
5369 spin_lock_init(&host->lock);
5370 host->dev = dev;
5371 host->n_ports = max_ports;
5372
5373 /* allocate ports bound to this host */
5374 for (i = 0; i < max_ports; i++) {
5375 struct ata_port *ap;
5376
5377 ap = ata_port_alloc(host);
5378 if (!ap)
5379 goto err_out;
5380
5381 ap->port_no = i;
5382 host->ports[i] = ap;
5383 }
5384
5385 devres_remove_group(dev, NULL);
5386 return host;
5387
5388 err_out:
5389 devres_release_group(dev, NULL);
5390 return NULL;
5391}
5392
f5cda257
TH
5393/**
5394 * ata_host_alloc_pinfo - alloc host and init with port_info array
5395 * @dev: generic device this host is associated with
5396 * @ppi: array of ATA port_info to initialize host with
5397 * @n_ports: number of ATA ports attached to this host
5398 *
5399 * Allocate ATA host and initialize with info from @ppi. If NULL
5400 * terminated, @ppi may contain fewer entries than @n_ports. The
5401 * last entry will be used for the remaining ports.
5402 *
5403 * RETURNS:
5404 * Allocate ATA host on success, NULL on failure.
5405 *
5406 * LOCKING:
5407 * Inherited from calling layer (may sleep).
5408 */
5409struct ata_host *ata_host_alloc_pinfo(struct device *dev,
5410 const struct ata_port_info * const * ppi,
5411 int n_ports)
5412{
5413 const struct ata_port_info *pi;
5414 struct ata_host *host;
5415 int i, j;
5416
5417 host = ata_host_alloc(dev, n_ports);
5418 if (!host)
5419 return NULL;
5420
5421 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
5422 struct ata_port *ap = host->ports[i];
5423
5424 if (ppi[j])
5425 pi = ppi[j++];
5426
5427 ap->pio_mask = pi->pio_mask;
5428 ap->mwdma_mask = pi->mwdma_mask;
5429 ap->udma_mask = pi->udma_mask;
5430 ap->flags |= pi->flags;
0c88758b 5431 ap->link.flags |= pi->link_flags;
f5cda257
TH
5432 ap->ops = pi->port_ops;
5433
5434 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
5435 host->ops = pi->port_ops;
f5cda257
TH
5436 }
5437
5438 return host;
5439}
5440
32ebbc0c
TH
5441static void ata_host_stop(struct device *gendev, void *res)
5442{
5443 struct ata_host *host = dev_get_drvdata(gendev);
5444 int i;
5445
5446 WARN_ON(!(host->flags & ATA_HOST_STARTED));
5447
5448 for (i = 0; i < host->n_ports; i++) {
5449 struct ata_port *ap = host->ports[i];
5450
5451 if (ap->ops->port_stop)
5452 ap->ops->port_stop(ap);
5453 }
5454
5455 if (host->ops->host_stop)
5456 host->ops->host_stop(host);
5457}
5458
029cfd6b
TH
5459/**
5460 * ata_finalize_port_ops - finalize ata_port_operations
5461 * @ops: ata_port_operations to finalize
5462 *
5463 * An ata_port_operations can inherit from another ops and that
5464 * ops can again inherit from another. This can go on as many
5465 * times as necessary as long as there is no loop in the
5466 * inheritance chain.
5467 *
5468 * Ops tables are finalized when the host is started. NULL or
5469 * unspecified entries are inherited from the closet ancestor
5470 * which has the method and the entry is populated with it.
5471 * After finalization, the ops table directly points to all the
5472 * methods and ->inherits is no longer necessary and cleared.
5473 *
5474 * Using ATA_OP_NULL, inheriting ops can force a method to NULL.
5475 *
5476 * LOCKING:
5477 * None.
5478 */
5479static void ata_finalize_port_ops(struct ata_port_operations *ops)
5480{
2da67659 5481 static DEFINE_SPINLOCK(lock);
029cfd6b
TH
5482 const struct ata_port_operations *cur;
5483 void **begin = (void **)ops;
5484 void **end = (void **)&ops->inherits;
5485 void **pp;
5486
5487 if (!ops || !ops->inherits)
5488 return;
5489
5490 spin_lock(&lock);
5491
5492 for (cur = ops->inherits; cur; cur = cur->inherits) {
5493 void **inherit = (void **)cur;
5494
5495 for (pp = begin; pp < end; pp++, inherit++)
5496 if (!*pp)
5497 *pp = *inherit;
5498 }
5499
5500 for (pp = begin; pp < end; pp++)
5501 if (IS_ERR(*pp))
5502 *pp = NULL;
5503
5504 ops->inherits = NULL;
5505
5506 spin_unlock(&lock);
5507}
5508
ecef7253
TH
5509/**
5510 * ata_host_start - start and freeze ports of an ATA host
5511 * @host: ATA host to start ports for
5512 *
5513 * Start and then freeze ports of @host. Started status is
5514 * recorded in host->flags, so this function can be called
5515 * multiple times. Ports are guaranteed to get started only
f3187195
TH
5516 * once. If host->ops isn't initialized yet, its set to the
5517 * first non-dummy port ops.
ecef7253
TH
5518 *
5519 * LOCKING:
5520 * Inherited from calling layer (may sleep).
5521 *
5522 * RETURNS:
5523 * 0 if all ports are started successfully, -errno otherwise.
5524 */
5525int ata_host_start(struct ata_host *host)
5526{
32ebbc0c
TH
5527 int have_stop = 0;
5528 void *start_dr = NULL;
ecef7253
TH
5529 int i, rc;
5530
5531 if (host->flags & ATA_HOST_STARTED)
5532 return 0;
5533
029cfd6b
TH
5534 ata_finalize_port_ops(host->ops);
5535
ecef7253
TH
5536 for (i = 0; i < host->n_ports; i++) {
5537 struct ata_port *ap = host->ports[i];
5538
029cfd6b
TH
5539 ata_finalize_port_ops(ap->ops);
5540
f3187195
TH
5541 if (!host->ops && !ata_port_is_dummy(ap))
5542 host->ops = ap->ops;
5543
32ebbc0c
TH
5544 if (ap->ops->port_stop)
5545 have_stop = 1;
5546 }
5547
5548 if (host->ops->host_stop)
5549 have_stop = 1;
5550
5551 if (have_stop) {
5552 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
5553 if (!start_dr)
5554 return -ENOMEM;
5555 }
5556
5557 for (i = 0; i < host->n_ports; i++) {
5558 struct ata_port *ap = host->ports[i];
5559
ecef7253
TH
5560 if (ap->ops->port_start) {
5561 rc = ap->ops->port_start(ap);
5562 if (rc) {
0f9fe9b7 5563 if (rc != -ENODEV)
0f757743
AM
5564 dev_printk(KERN_ERR, host->dev,
5565 "failed to start port %d "
5566 "(errno=%d)\n", i, rc);
ecef7253
TH
5567 goto err_out;
5568 }
5569 }
ecef7253
TH
5570 ata_eh_freeze_port(ap);
5571 }
5572
32ebbc0c
TH
5573 if (start_dr)
5574 devres_add(host->dev, start_dr);
ecef7253
TH
5575 host->flags |= ATA_HOST_STARTED;
5576 return 0;
5577
5578 err_out:
5579 while (--i >= 0) {
5580 struct ata_port *ap = host->ports[i];
5581
5582 if (ap->ops->port_stop)
5583 ap->ops->port_stop(ap);
5584 }
32ebbc0c 5585 devres_free(start_dr);
ecef7253
TH
5586 return rc;
5587}
5588
b03732f0 5589/**
cca3974e
JG
5590 * ata_sas_host_init - Initialize a host struct
5591 * @host: host to initialize
5592 * @dev: device host is attached to
5593 * @flags: host flags
5594 * @ops: port_ops
b03732f0
BK
5595 *
5596 * LOCKING:
5597 * PCI/etc. bus probe sem.
5598 *
5599 */
f3187195 5600/* KILLME - the only user left is ipr */
cca3974e 5601void ata_host_init(struct ata_host *host, struct device *dev,
029cfd6b 5602 unsigned long flags, struct ata_port_operations *ops)
b03732f0 5603{
cca3974e
JG
5604 spin_lock_init(&host->lock);
5605 host->dev = dev;
5606 host->flags = flags;
5607 host->ops = ops;
b03732f0
BK
5608}
5609
f3187195
TH
5610/**
5611 * ata_host_register - register initialized ATA host
5612 * @host: ATA host to register
5613 * @sht: template for SCSI host
5614 *
5615 * Register initialized ATA host. @host is allocated using
5616 * ata_host_alloc() and fully initialized by LLD. This function
5617 * starts ports, registers @host with ATA and SCSI layers and
5618 * probe registered devices.
5619 *
5620 * LOCKING:
5621 * Inherited from calling layer (may sleep).
5622 *
5623 * RETURNS:
5624 * 0 on success, -errno otherwise.
5625 */
5626int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
5627{
5628 int i, rc;
5629
5630 /* host must have been started */
5631 if (!(host->flags & ATA_HOST_STARTED)) {
5632 dev_printk(KERN_ERR, host->dev,
5633 "BUG: trying to register unstarted host\n");
5634 WARN_ON(1);
5635 return -EINVAL;
5636 }
5637
5638 /* Blow away unused ports. This happens when LLD can't
5639 * determine the exact number of ports to allocate at
5640 * allocation time.
5641 */
5642 for (i = host->n_ports; host->ports[i]; i++)
5643 kfree(host->ports[i]);
5644
5645 /* give ports names and add SCSI hosts */
5646 for (i = 0; i < host->n_ports; i++)
5647 host->ports[i]->print_id = ata_print_id++;
5648
5649 rc = ata_scsi_add_hosts(host, sht);
5650 if (rc)
5651 return rc;
5652
fafbae87
TH
5653 /* associate with ACPI nodes */
5654 ata_acpi_associate(host);
5655
f3187195
TH
5656 /* set cable, sata_spd_limit and report */
5657 for (i = 0; i < host->n_ports; i++) {
5658 struct ata_port *ap = host->ports[i];
f3187195
TH
5659 unsigned long xfer_mask;
5660
5661 /* set SATA cable type if still unset */
5662 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
5663 ap->cbl = ATA_CBL_SATA;
5664
5665 /* init sata_spd_limit to the current value */
4fb37a25 5666 sata_link_init_spd(&ap->link);
f3187195 5667
cbcdd875 5668 /* print per-port info to dmesg */
f3187195
TH
5669 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
5670 ap->udma_mask);
5671
abf6e8ed 5672 if (!ata_port_is_dummy(ap)) {
cbcdd875
TH
5673 ata_port_printk(ap, KERN_INFO,
5674 "%cATA max %s %s\n",
a16abc0b 5675 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
f3187195 5676 ata_mode_string(xfer_mask),
cbcdd875 5677 ap->link.eh_info.desc);
abf6e8ed
TH
5678 ata_ehi_clear_desc(&ap->link.eh_info);
5679 } else
f3187195
TH
5680 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
5681 }
5682
5683 /* perform each probe synchronously */
5684 DPRINTK("probe begin\n");
5685 for (i = 0; i < host->n_ports; i++) {
5686 struct ata_port *ap = host->ports[i];
f3187195
TH
5687
5688 /* probe */
5689 if (ap->ops->error_handler) {
9af5c9c9 5690 struct ata_eh_info *ehi = &ap->link.eh_info;
f3187195
TH
5691 unsigned long flags;
5692
5693 ata_port_probe(ap);
5694
5695 /* kick EH for boot probing */
5696 spin_lock_irqsave(ap->lock, flags);
5697
b558eddd 5698 ehi->probe_mask |= ATA_ALL_DEVICES;
391191c1 5699 ehi->action |= ATA_EH_RESET | ATA_EH_LPM;
f3187195
TH
5700 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
5701
f4d6d004 5702 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
f3187195
TH
5703 ap->pflags |= ATA_PFLAG_LOADING;
5704 ata_port_schedule_eh(ap);
5705
5706 spin_unlock_irqrestore(ap->lock, flags);
5707
5708 /* wait for EH to finish */
5709 ata_port_wait_eh(ap);
5710 } else {
5711 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
5712 rc = ata_bus_probe(ap);
5713 DPRINTK("ata%u: bus probe end\n", ap->print_id);
5714
5715 if (rc) {
5716 /* FIXME: do something useful here?
5717 * Current libata behavior will
5718 * tear down everything when
5719 * the module is removed
5720 * or the h/w is unplugged.
5721 */
5722 }
5723 }
5724 }
5725
5726 /* probes are done, now scan each port's disk(s) */
5727 DPRINTK("host probe begin\n");
5728 for (i = 0; i < host->n_ports; i++) {
5729 struct ata_port *ap = host->ports[i];
5730
1ae46317 5731 ata_scsi_scan_host(ap, 1);
f3187195
TH
5732 }
5733
5734 return 0;
5735}
5736
f5cda257
TH
5737/**
5738 * ata_host_activate - start host, request IRQ and register it
5739 * @host: target ATA host
5740 * @irq: IRQ to request
5741 * @irq_handler: irq_handler used when requesting IRQ
5742 * @irq_flags: irq_flags used when requesting IRQ
5743 * @sht: scsi_host_template to use when registering the host
5744 *
5745 * After allocating an ATA host and initializing it, most libata
5746 * LLDs perform three steps to activate the host - start host,
5747 * request IRQ and register it. This helper takes necessasry
5748 * arguments and performs the three steps in one go.
5749 *
3d46b2e2
PM
5750 * An invalid IRQ skips the IRQ registration and expects the host to
5751 * have set polling mode on the port. In this case, @irq_handler
5752 * should be NULL.
5753 *
f5cda257
TH
5754 * LOCKING:
5755 * Inherited from calling layer (may sleep).
5756 *
5757 * RETURNS:
5758 * 0 on success, -errno otherwise.
5759 */
5760int ata_host_activate(struct ata_host *host, int irq,
5761 irq_handler_t irq_handler, unsigned long irq_flags,
5762 struct scsi_host_template *sht)
5763{
cbcdd875 5764 int i, rc;
f5cda257
TH
5765
5766 rc = ata_host_start(host);
5767 if (rc)
5768 return rc;
5769
3d46b2e2
PM
5770 /* Special case for polling mode */
5771 if (!irq) {
5772 WARN_ON(irq_handler);
5773 return ata_host_register(host, sht);
5774 }
5775
f5cda257
TH
5776 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
5777 dev_driver_string(host->dev), host);
5778 if (rc)
5779 return rc;
5780
cbcdd875
TH
5781 for (i = 0; i < host->n_ports; i++)
5782 ata_port_desc(host->ports[i], "irq %d", irq);
4031826b 5783
f5cda257
TH
5784 rc = ata_host_register(host, sht);
5785 /* if failed, just free the IRQ and leave ports alone */
5786 if (rc)
5787 devm_free_irq(host->dev, irq, host);
5788
5789 return rc;
5790}
5791
720ba126
TH
5792/**
5793 * ata_port_detach - Detach ATA port in prepration of device removal
5794 * @ap: ATA port to be detached
5795 *
5796 * Detach all ATA devices and the associated SCSI devices of @ap;
5797 * then, remove the associated SCSI host. @ap is guaranteed to
5798 * be quiescent on return from this function.
5799 *
5800 * LOCKING:
5801 * Kernel thread context (may sleep).
5802 */
741b7763 5803static void ata_port_detach(struct ata_port *ap)
720ba126
TH
5804{
5805 unsigned long flags;
41bda9c9 5806 struct ata_link *link;
f58229f8 5807 struct ata_device *dev;
720ba126
TH
5808
5809 if (!ap->ops->error_handler)
c3cf30a9 5810 goto skip_eh;
720ba126
TH
5811
5812 /* tell EH we're leaving & flush EH */
ba6a1308 5813 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 5814 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 5815 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5816
5817 ata_port_wait_eh(ap);
5818
7f9ad9b8 5819 /* EH is now guaranteed to see UNLOADING - EH context belongs
d127ea7b 5820 * to us. Restore SControl and disable all existing devices.
720ba126 5821 */
d127ea7b
TH
5822 __ata_port_for_each_link(link, ap) {
5823 sata_scr_write(link, SCR_CONTROL, link->saved_scontrol);
41bda9c9
TH
5824 ata_link_for_each_dev(dev, link)
5825 ata_dev_disable(dev);
5826 }
720ba126 5827
720ba126
TH
5828 /* Final freeze & EH. All in-flight commands are aborted. EH
5829 * will be skipped and retrials will be terminated with bad
5830 * target.
5831 */
ba6a1308 5832 spin_lock_irqsave(ap->lock, flags);
720ba126 5833 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 5834 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
5835
5836 ata_port_wait_eh(ap);
45a66c1c 5837 cancel_rearming_delayed_work(&ap->hotplug_task);
720ba126 5838
c3cf30a9 5839 skip_eh:
720ba126 5840 /* remove the associated SCSI host */
cca3974e 5841 scsi_remove_host(ap->scsi_host);
720ba126
TH
5842}
5843
0529c159
TH
5844/**
5845 * ata_host_detach - Detach all ports of an ATA host
5846 * @host: Host to detach
5847 *
5848 * Detach all ports of @host.
5849 *
5850 * LOCKING:
5851 * Kernel thread context (may sleep).
5852 */
5853void ata_host_detach(struct ata_host *host)
5854{
5855 int i;
5856
5857 for (i = 0; i < host->n_ports; i++)
5858 ata_port_detach(host->ports[i]);
562f0c2d
TH
5859
5860 /* the host is dead now, dissociate ACPI */
5861 ata_acpi_dissociate(host);
0529c159
TH
5862}
5863
374b1873
JG
5864#ifdef CONFIG_PCI
5865
1da177e4
LT
5866/**
5867 * ata_pci_remove_one - PCI layer callback for device removal
5868 * @pdev: PCI device that was removed
5869 *
b878ca5d
TH
5870 * PCI layer indicates to libata via this hook that hot-unplug or
5871 * module unload event has occurred. Detach all ports. Resource
5872 * release is handled via devres.
1da177e4
LT
5873 *
5874 * LOCKING:
5875 * Inherited from PCI layer (may sleep).
5876 */
f0d36efd 5877void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4 5878{
2855568b 5879 struct device *dev = &pdev->dev;
cca3974e 5880 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 5881
b878ca5d 5882 ata_host_detach(host);
1da177e4
LT
5883}
5884
5885/* move to PCI subsystem */
057ace5e 5886int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
5887{
5888 unsigned long tmp = 0;
5889
5890 switch (bits->width) {
5891 case 1: {
5892 u8 tmp8 = 0;
5893 pci_read_config_byte(pdev, bits->reg, &tmp8);
5894 tmp = tmp8;
5895 break;
5896 }
5897 case 2: {
5898 u16 tmp16 = 0;
5899 pci_read_config_word(pdev, bits->reg, &tmp16);
5900 tmp = tmp16;
5901 break;
5902 }
5903 case 4: {
5904 u32 tmp32 = 0;
5905 pci_read_config_dword(pdev, bits->reg, &tmp32);
5906 tmp = tmp32;
5907 break;
5908 }
5909
5910 default:
5911 return -EINVAL;
5912 }
5913
5914 tmp &= bits->mask;
5915
5916 return (tmp == bits->val) ? 1 : 0;
5917}
9b847548 5918
6ffa01d8 5919#ifdef CONFIG_PM
3c5100c1 5920void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
5921{
5922 pci_save_state(pdev);
4c90d971 5923 pci_disable_device(pdev);
500530f6 5924
3a2d5b70 5925 if (mesg.event & PM_EVENT_SLEEP)
500530f6 5926 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
5927}
5928
553c4aa6 5929int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 5930{
553c4aa6
TH
5931 int rc;
5932
9b847548
JA
5933 pci_set_power_state(pdev, PCI_D0);
5934 pci_restore_state(pdev);
553c4aa6 5935
b878ca5d 5936 rc = pcim_enable_device(pdev);
553c4aa6
TH
5937 if (rc) {
5938 dev_printk(KERN_ERR, &pdev->dev,
5939 "failed to enable device after resume (%d)\n", rc);
5940 return rc;
5941 }
5942
9b847548 5943 pci_set_master(pdev);
553c4aa6 5944 return 0;
500530f6
TH
5945}
5946
3c5100c1 5947int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 5948{
cca3974e 5949 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
5950 int rc = 0;
5951
cca3974e 5952 rc = ata_host_suspend(host, mesg);
500530f6
TH
5953 if (rc)
5954 return rc;
5955
3c5100c1 5956 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
5957
5958 return 0;
5959}
5960
5961int ata_pci_device_resume(struct pci_dev *pdev)
5962{
cca3974e 5963 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 5964 int rc;
500530f6 5965
553c4aa6
TH
5966 rc = ata_pci_device_do_resume(pdev);
5967 if (rc == 0)
5968 ata_host_resume(host);
5969 return rc;
9b847548 5970}
6ffa01d8
TH
5971#endif /* CONFIG_PM */
5972
1da177e4
LT
5973#endif /* CONFIG_PCI */
5974
33267325
TH
5975static int __init ata_parse_force_one(char **cur,
5976 struct ata_force_ent *force_ent,
5977 const char **reason)
5978{
5979 /* FIXME: Currently, there's no way to tag init const data and
5980 * using __initdata causes build failure on some versions of
5981 * gcc. Once __initdataconst is implemented, add const to the
5982 * following structure.
5983 */
5984 static struct ata_force_param force_tbl[] __initdata = {
5985 { "40c", .cbl = ATA_CBL_PATA40 },
5986 { "80c", .cbl = ATA_CBL_PATA80 },
5987 { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
5988 { "unk", .cbl = ATA_CBL_PATA_UNK },
5989 { "ign", .cbl = ATA_CBL_PATA_IGN },
5990 { "sata", .cbl = ATA_CBL_SATA },
5991 { "1.5Gbps", .spd_limit = 1 },
5992 { "3.0Gbps", .spd_limit = 2 },
5993 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
5994 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
5995 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
5996 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
5997 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
5998 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
5999 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
6000 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
6001 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
6002 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
6003 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
6004 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
6005 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
6006 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
6007 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6008 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6009 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
6010 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6011 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6012 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
6013 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6014 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6015 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
6016 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6017 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6018 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
6019 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6020 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6021 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
6022 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6023 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6024 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
6025 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6026 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6027 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
6028 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
05944bdf
TH
6029 { "nohrst", .lflags = ATA_LFLAG_NO_HRST },
6030 { "nosrst", .lflags = ATA_LFLAG_NO_SRST },
6031 { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST },
33267325
TH
6032 };
6033 char *start = *cur, *p = *cur;
6034 char *id, *val, *endp;
6035 const struct ata_force_param *match_fp = NULL;
6036 int nr_matches = 0, i;
6037
6038 /* find where this param ends and update *cur */
6039 while (*p != '\0' && *p != ',')
6040 p++;
6041
6042 if (*p == '\0')
6043 *cur = p;
6044 else
6045 *cur = p + 1;
6046
6047 *p = '\0';
6048
6049 /* parse */
6050 p = strchr(start, ':');
6051 if (!p) {
6052 val = strstrip(start);
6053 goto parse_val;
6054 }
6055 *p = '\0';
6056
6057 id = strstrip(start);
6058 val = strstrip(p + 1);
6059
6060 /* parse id */
6061 p = strchr(id, '.');
6062 if (p) {
6063 *p++ = '\0';
6064 force_ent->device = simple_strtoul(p, &endp, 10);
6065 if (p == endp || *endp != '\0') {
6066 *reason = "invalid device";
6067 return -EINVAL;
6068 }
6069 }
6070
6071 force_ent->port = simple_strtoul(id, &endp, 10);
6072 if (p == endp || *endp != '\0') {
6073 *reason = "invalid port/link";
6074 return -EINVAL;
6075 }
6076
6077 parse_val:
6078 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
6079 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
6080 const struct ata_force_param *fp = &force_tbl[i];
6081
6082 if (strncasecmp(val, fp->name, strlen(val)))
6083 continue;
6084
6085 nr_matches++;
6086 match_fp = fp;
6087
6088 if (strcasecmp(val, fp->name) == 0) {
6089 nr_matches = 1;
6090 break;
6091 }
6092 }
6093
6094 if (!nr_matches) {
6095 *reason = "unknown value";
6096 return -EINVAL;
6097 }
6098 if (nr_matches > 1) {
6099 *reason = "ambigious value";
6100 return -EINVAL;
6101 }
6102
6103 force_ent->param = *match_fp;
6104
6105 return 0;
6106}
6107
6108static void __init ata_parse_force_param(void)
6109{
6110 int idx = 0, size = 1;
6111 int last_port = -1, last_device = -1;
6112 char *p, *cur, *next;
6113
6114 /* calculate maximum number of params and allocate force_tbl */
6115 for (p = ata_force_param_buf; *p; p++)
6116 if (*p == ',')
6117 size++;
6118
6119 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
6120 if (!ata_force_tbl) {
6121 printk(KERN_WARNING "ata: failed to extend force table, "
6122 "libata.force ignored\n");
6123 return;
6124 }
6125
6126 /* parse and populate the table */
6127 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
6128 const char *reason = "";
6129 struct ata_force_ent te = { .port = -1, .device = -1 };
6130
6131 next = cur;
6132 if (ata_parse_force_one(&next, &te, &reason)) {
6133 printk(KERN_WARNING "ata: failed to parse force "
6134 "parameter \"%s\" (%s)\n",
6135 cur, reason);
6136 continue;
6137 }
6138
6139 if (te.port == -1) {
6140 te.port = last_port;
6141 te.device = last_device;
6142 }
6143
6144 ata_force_tbl[idx++] = te;
6145
6146 last_port = te.port;
6147 last_device = te.device;
6148 }
6149
6150 ata_force_tbl_size = idx;
6151}
1da177e4 6152
1da177e4
LT
6153static int __init ata_init(void)
6154{
33267325
TH
6155 ata_parse_force_param();
6156
1da177e4
LT
6157 ata_wq = create_workqueue("ata");
6158 if (!ata_wq)
49ea3b04 6159 goto free_force_tbl;
1da177e4 6160
453b07ac 6161 ata_aux_wq = create_singlethread_workqueue("ata_aux");
49ea3b04
EO
6162 if (!ata_aux_wq)
6163 goto free_wq;
453b07ac 6164
1da177e4
LT
6165 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6166 return 0;
49ea3b04
EO
6167
6168free_wq:
6169 destroy_workqueue(ata_wq);
6170free_force_tbl:
6171 kfree(ata_force_tbl);
6172 return -ENOMEM;
1da177e4
LT
6173}
6174
6175static void __exit ata_exit(void)
6176{
33267325 6177 kfree(ata_force_tbl);
1da177e4 6178 destroy_workqueue(ata_wq);
453b07ac 6179 destroy_workqueue(ata_aux_wq);
1da177e4
LT
6180}
6181
a4625085 6182subsys_initcall(ata_init);
1da177e4
LT
6183module_exit(ata_exit);
6184
67846b30 6185static unsigned long ratelimit_time;
34af946a 6186static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
6187
6188int ata_ratelimit(void)
6189{
6190 int rc;
6191 unsigned long flags;
6192
6193 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6194
6195 if (time_after(jiffies, ratelimit_time)) {
6196 rc = 1;
6197 ratelimit_time = jiffies + (HZ/5);
6198 } else
6199 rc = 0;
6200
6201 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6202
6203 return rc;
6204}
6205
c22daff4
TH
6206/**
6207 * ata_wait_register - wait until register value changes
6208 * @reg: IO-mapped register
6209 * @mask: Mask to apply to read register value
6210 * @val: Wait condition
341c2c95
TH
6211 * @interval: polling interval in milliseconds
6212 * @timeout: timeout in milliseconds
c22daff4
TH
6213 *
6214 * Waiting for some bits of register to change is a common
6215 * operation for ATA controllers. This function reads 32bit LE
6216 * IO-mapped register @reg and tests for the following condition.
6217 *
6218 * (*@reg & mask) != val
6219 *
6220 * If the condition is met, it returns; otherwise, the process is
6221 * repeated after @interval_msec until timeout.
6222 *
6223 * LOCKING:
6224 * Kernel thread context (may sleep)
6225 *
6226 * RETURNS:
6227 * The final register value.
6228 */
6229u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
341c2c95 6230 unsigned long interval, unsigned long timeout)
c22daff4 6231{
341c2c95 6232 unsigned long deadline;
c22daff4
TH
6233 u32 tmp;
6234
6235 tmp = ioread32(reg);
6236
6237 /* Calculate timeout _after_ the first read to make sure
6238 * preceding writes reach the controller before starting to
6239 * eat away the timeout.
6240 */
341c2c95 6241 deadline = ata_deadline(jiffies, timeout);
c22daff4 6242
341c2c95
TH
6243 while ((tmp & mask) == val && time_before(jiffies, deadline)) {
6244 msleep(interval);
c22daff4
TH
6245 tmp = ioread32(reg);
6246 }
6247
6248 return tmp;
6249}
6250
dd5b06c4
TH
6251/*
6252 * Dummy port_ops
6253 */
182d7bba 6254static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
dd5b06c4 6255{
182d7bba 6256 return AC_ERR_SYSTEM;
dd5b06c4
TH
6257}
6258
182d7bba 6259static void ata_dummy_error_handler(struct ata_port *ap)
dd5b06c4 6260{
182d7bba 6261 /* truly dummy */
dd5b06c4
TH
6262}
6263
029cfd6b 6264struct ata_port_operations ata_dummy_port_ops = {
dd5b06c4
TH
6265 .qc_prep = ata_noop_qc_prep,
6266 .qc_issue = ata_dummy_qc_issue,
182d7bba 6267 .error_handler = ata_dummy_error_handler,
dd5b06c4
TH
6268};
6269
21b0ad4f
TH
6270const struct ata_port_info ata_dummy_port_info = {
6271 .port_ops = &ata_dummy_port_ops,
6272};
6273
1da177e4
LT
6274/*
6275 * libata is essentially a library of internal helper functions for
6276 * low-level ATA host controller drivers. As such, the API/ABI is
6277 * likely to change as new drivers are added and updated.
6278 * Do not depend on ABI/API stability.
6279 */
e9c83914
TH
6280EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6281EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6282EXPORT_SYMBOL_GPL(sata_deb_timing_long);
029cfd6b
TH
6283EXPORT_SYMBOL_GPL(ata_base_port_ops);
6284EXPORT_SYMBOL_GPL(sata_port_ops);
dd5b06c4 6285EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
21b0ad4f 6286EXPORT_SYMBOL_GPL(ata_dummy_port_info);
aadffb68 6287EXPORT_SYMBOL_GPL(__ata_port_next_link);
1da177e4 6288EXPORT_SYMBOL_GPL(ata_std_bios_param);
cca3974e 6289EXPORT_SYMBOL_GPL(ata_host_init);
f3187195 6290EXPORT_SYMBOL_GPL(ata_host_alloc);
f5cda257 6291EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
ecef7253 6292EXPORT_SYMBOL_GPL(ata_host_start);
f3187195 6293EXPORT_SYMBOL_GPL(ata_host_register);
f5cda257 6294EXPORT_SYMBOL_GPL(ata_host_activate);
0529c159 6295EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4 6296EXPORT_SYMBOL_GPL(ata_sg_init);
f686bcb8 6297EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 6298EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
436d34b3 6299EXPORT_SYMBOL_GPL(atapi_cmd_type);
1da177e4
LT
6300EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6301EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6357357c
TH
6302EXPORT_SYMBOL_GPL(ata_pack_xfermask);
6303EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
6304EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
6305EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
6306EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
6307EXPORT_SYMBOL_GPL(ata_mode_string);
6308EXPORT_SYMBOL_GPL(ata_id_xfermask);
1da177e4 6309EXPORT_SYMBOL_GPL(ata_port_start);
04351821 6310EXPORT_SYMBOL_GPL(ata_do_set_mode);
31cc23b3 6311EXPORT_SYMBOL_GPL(ata_std_qc_defer);
e46834cd 6312EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4 6313EXPORT_SYMBOL_GPL(ata_port_probe);
10305f0f 6314EXPORT_SYMBOL_GPL(ata_dev_disable);
3c567b7d 6315EXPORT_SYMBOL_GPL(sata_set_spd);
aa2731ad 6316EXPORT_SYMBOL_GPL(ata_wait_after_reset);
936fd732
TH
6317EXPORT_SYMBOL_GPL(sata_link_debounce);
6318EXPORT_SYMBOL_GPL(sata_link_resume);
0aa1113d 6319EXPORT_SYMBOL_GPL(ata_std_prereset);
cc0680a5 6320EXPORT_SYMBOL_GPL(sata_link_hardreset);
57c9efdf 6321EXPORT_SYMBOL_GPL(sata_std_hardreset);
203c75b8 6322EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
6323EXPORT_SYMBOL_GPL(ata_dev_classify);
6324EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 6325EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 6326EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 6327EXPORT_SYMBOL_GPL(ata_wait_register);
1da177e4
LT
6328EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6329EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 6330EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 6331EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 6332EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
34bf2170
TH
6333EXPORT_SYMBOL_GPL(sata_scr_valid);
6334EXPORT_SYMBOL_GPL(sata_scr_read);
6335EXPORT_SYMBOL_GPL(sata_scr_write);
6336EXPORT_SYMBOL_GPL(sata_scr_write_flush);
936fd732
TH
6337EXPORT_SYMBOL_GPL(ata_link_online);
6338EXPORT_SYMBOL_GPL(ata_link_offline);
6ffa01d8 6339#ifdef CONFIG_PM
cca3974e
JG
6340EXPORT_SYMBOL_GPL(ata_host_suspend);
6341EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 6342#endif /* CONFIG_PM */
6a62a04d
TH
6343EXPORT_SYMBOL_GPL(ata_id_string);
6344EXPORT_SYMBOL_GPL(ata_id_c_string);
963e4975 6345EXPORT_SYMBOL_GPL(ata_do_dev_read_id);
1da177e4
LT
6346EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6347
1bc4ccff 6348EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6357357c 6349EXPORT_SYMBOL_GPL(ata_timing_find_mode);
452503f9
AC
6350EXPORT_SYMBOL_GPL(ata_timing_compute);
6351EXPORT_SYMBOL_GPL(ata_timing_merge);
a0f79b92 6352EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
452503f9 6353
1da177e4
LT
6354#ifdef CONFIG_PCI
6355EXPORT_SYMBOL_GPL(pci_test_config_bits);
1da177e4 6356EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6ffa01d8 6357#ifdef CONFIG_PM
500530f6
TH
6358EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6359EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
6360EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6361EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 6362#endif /* CONFIG_PM */
1da177e4 6363#endif /* CONFIG_PCI */
9b847548 6364
b64bbc39
TH
6365EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
6366EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
6367EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
cbcdd875
TH
6368EXPORT_SYMBOL_GPL(ata_port_desc);
6369#ifdef CONFIG_PCI
6370EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
6371#endif /* CONFIG_PCI */
7b70fc03 6372EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
dbd82616 6373EXPORT_SYMBOL_GPL(ata_link_abort);
7b70fc03 6374EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499 6375EXPORT_SYMBOL_GPL(ata_port_freeze);
7d77b247 6376EXPORT_SYMBOL_GPL(sata_async_notification);
e3180499
TH
6377EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6378EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
6379EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6380EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
10acf3b0 6381EXPORT_SYMBOL_GPL(ata_eh_analyze_ncq_error);
022bdb07 6382EXPORT_SYMBOL_GPL(ata_do_eh);
a1efdaba 6383EXPORT_SYMBOL_GPL(ata_std_error_handler);
be0d18df
AC
6384
6385EXPORT_SYMBOL_GPL(ata_cable_40wire);
6386EXPORT_SYMBOL_GPL(ata_cable_80wire);
6387EXPORT_SYMBOL_GPL(ata_cable_unknown);
c88f90c3 6388EXPORT_SYMBOL_GPL(ata_cable_ignore);
be0d18df 6389EXPORT_SYMBOL_GPL(ata_cable_sata);