]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/ata/libata-core.c
libata: uninline atapi_cmd_type()
[net-next-2.6.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
92c52c52
AC
33 * Standards documents from:
34 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
35 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
36 * http://www.sata-io.org (SATA)
37 * http://www.compactflash.org (CF)
38 * http://www.qic.org (QIC157 - Tape and DSC)
39 * http://www.ce-ata.org (CE-ATA: not supported)
40 *
1da177e4
LT
41 */
42
1da177e4
LT
43#include <linux/kernel.h>
44#include <linux/module.h>
45#include <linux/pci.h>
46#include <linux/init.h>
47#include <linux/list.h>
48#include <linux/mm.h>
49#include <linux/highmem.h>
50#include <linux/spinlock.h>
51#include <linux/blkdev.h>
52#include <linux/delay.h>
53#include <linux/timer.h>
54#include <linux/interrupt.h>
55#include <linux/completion.h>
56#include <linux/suspend.h>
57#include <linux/workqueue.h>
67846b30 58#include <linux/jiffies.h>
378f058c 59#include <linux/scatterlist.h>
2dcb407e 60#include <linux/io.h>
1da177e4 61#include <scsi/scsi.h>
193515d5 62#include <scsi/scsi_cmnd.h>
1da177e4
LT
63#include <scsi/scsi_host.h>
64#include <linux/libata.h>
1da177e4
LT
65#include <asm/semaphore.h>
66#include <asm/byteorder.h>
140b5e59 67#include <linux/cdrom.h>
1da177e4
LT
68
69#include "libata.h"
70
fda0efc5 71
d7bb4cc7 72/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
73const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
74const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
75const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 76
3373efd8
TH
77static unsigned int ata_dev_init_params(struct ata_device *dev,
78 u16 heads, u16 sectors);
79static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
218f3d30
JG
80static unsigned int ata_dev_set_feature(struct ata_device *dev,
81 u8 enable, u8 feature);
3373efd8 82static void ata_dev_xfermask(struct ata_device *dev);
75683fe7 83static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
1da177e4 84
f3187195 85unsigned int ata_print_id = 1;
1da177e4
LT
86static struct workqueue_struct *ata_wq;
87
453b07ac
TH
88struct workqueue_struct *ata_aux_wq;
89
33267325
TH
90struct ata_force_param {
91 const char *name;
92 unsigned int cbl;
93 int spd_limit;
94 unsigned long xfer_mask;
95 unsigned int horkage_on;
96 unsigned int horkage_off;
97};
98
99struct ata_force_ent {
100 int port;
101 int device;
102 struct ata_force_param param;
103};
104
105static struct ata_force_ent *ata_force_tbl;
106static int ata_force_tbl_size;
107
108static char ata_force_param_buf[PAGE_SIZE] __initdata;
7afb4222
TH
109/* param_buf is thrown away after initialization, disallow read */
110module_param_string(force, ata_force_param_buf, sizeof(ata_force_param_buf), 0);
33267325
TH
111MODULE_PARM_DESC(force, "Force ATA configurations including cable type, link speed and transfer mode (see Documentation/kernel-parameters.txt for details)");
112
418dc1f5 113int atapi_enabled = 1;
1623c81e
JG
114module_param(atapi_enabled, int, 0444);
115MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
116
c5c61bda 117static int atapi_dmadir = 0;
95de719a
AL
118module_param(atapi_dmadir, int, 0444);
119MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
120
baf4fdfa
ML
121int atapi_passthru16 = 1;
122module_param(atapi_passthru16, int, 0444);
123MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
124
c3c013a2
JG
125int libata_fua = 0;
126module_param_named(fua, libata_fua, int, 0444);
127MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
128
2dcb407e 129static int ata_ignore_hpa;
1e999736
AC
130module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
131MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
132
b3a70601
AC
133static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
134module_param_named(dma, libata_dma_mask, int, 0444);
135MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
136
a8601e5f
AM
137static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
138module_param(ata_probe_timeout, int, 0444);
139MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
140
6ebe9d86 141int libata_noacpi = 0;
d7d0dad6 142module_param_named(noacpi, libata_noacpi, int, 0444);
6ebe9d86 143MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
11ef697b 144
ae8d4ee7
AC
145int libata_allow_tpm = 0;
146module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
147MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands");
148
1da177e4
LT
149MODULE_AUTHOR("Jeff Garzik");
150MODULE_DESCRIPTION("Library module for ATA devices");
151MODULE_LICENSE("GPL");
152MODULE_VERSION(DRV_VERSION);
153
0baab86b 154
33267325
TH
155/**
156 * ata_force_cbl - force cable type according to libata.force
4cdfa1b3 157 * @ap: ATA port of interest
33267325
TH
158 *
159 * Force cable type according to libata.force and whine about it.
160 * The last entry which has matching port number is used, so it
161 * can be specified as part of device force parameters. For
162 * example, both "a:40c,1.00:udma4" and "1.00:40c,udma4" have the
163 * same effect.
164 *
165 * LOCKING:
166 * EH context.
167 */
168void ata_force_cbl(struct ata_port *ap)
169{
170 int i;
171
172 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
173 const struct ata_force_ent *fe = &ata_force_tbl[i];
174
175 if (fe->port != -1 && fe->port != ap->print_id)
176 continue;
177
178 if (fe->param.cbl == ATA_CBL_NONE)
179 continue;
180
181 ap->cbl = fe->param.cbl;
182 ata_port_printk(ap, KERN_NOTICE,
183 "FORCE: cable set to %s\n", fe->param.name);
184 return;
185 }
186}
187
188/**
189 * ata_force_spd_limit - force SATA spd limit according to libata.force
190 * @link: ATA link of interest
191 *
192 * Force SATA spd limit according to libata.force and whine about
193 * it. When only the port part is specified (e.g. 1:), the limit
194 * applies to all links connected to both the host link and all
195 * fan-out ports connected via PMP. If the device part is
196 * specified as 0 (e.g. 1.00:), it specifies the first fan-out
197 * link not the host link. Device number 15 always points to the
198 * host link whether PMP is attached or not.
199 *
200 * LOCKING:
201 * EH context.
202 */
203static void ata_force_spd_limit(struct ata_link *link)
204{
205 int linkno, i;
206
207 if (ata_is_host_link(link))
208 linkno = 15;
209 else
210 linkno = link->pmp;
211
212 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
213 const struct ata_force_ent *fe = &ata_force_tbl[i];
214
215 if (fe->port != -1 && fe->port != link->ap->print_id)
216 continue;
217
218 if (fe->device != -1 && fe->device != linkno)
219 continue;
220
221 if (!fe->param.spd_limit)
222 continue;
223
224 link->hw_sata_spd_limit = (1 << fe->param.spd_limit) - 1;
225 ata_link_printk(link, KERN_NOTICE,
226 "FORCE: PHY spd limit set to %s\n", fe->param.name);
227 return;
228 }
229}
230
231/**
232 * ata_force_xfermask - force xfermask according to libata.force
233 * @dev: ATA device of interest
234 *
235 * Force xfer_mask according to libata.force and whine about it.
236 * For consistency with link selection, device number 15 selects
237 * the first device connected to the host link.
238 *
239 * LOCKING:
240 * EH context.
241 */
242static void ata_force_xfermask(struct ata_device *dev)
243{
244 int devno = dev->link->pmp + dev->devno;
245 int alt_devno = devno;
246 int i;
247
248 /* allow n.15 for the first device attached to host port */
249 if (ata_is_host_link(dev->link) && devno == 0)
250 alt_devno = 15;
251
252 for (i = ata_force_tbl_size - 1; i >= 0; i--) {
253 const struct ata_force_ent *fe = &ata_force_tbl[i];
254 unsigned long pio_mask, mwdma_mask, udma_mask;
255
256 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
257 continue;
258
259 if (fe->device != -1 && fe->device != devno &&
260 fe->device != alt_devno)
261 continue;
262
263 if (!fe->param.xfer_mask)
264 continue;
265
266 ata_unpack_xfermask(fe->param.xfer_mask,
267 &pio_mask, &mwdma_mask, &udma_mask);
268 if (udma_mask)
269 dev->udma_mask = udma_mask;
270 else if (mwdma_mask) {
271 dev->udma_mask = 0;
272 dev->mwdma_mask = mwdma_mask;
273 } else {
274 dev->udma_mask = 0;
275 dev->mwdma_mask = 0;
276 dev->pio_mask = pio_mask;
277 }
278
279 ata_dev_printk(dev, KERN_NOTICE,
280 "FORCE: xfer_mask set to %s\n", fe->param.name);
281 return;
282 }
283}
284
285/**
286 * ata_force_horkage - force horkage according to libata.force
287 * @dev: ATA device of interest
288 *
289 * Force horkage according to libata.force and whine about it.
290 * For consistency with link selection, device number 15 selects
291 * the first device connected to the host link.
292 *
293 * LOCKING:
294 * EH context.
295 */
296static void ata_force_horkage(struct ata_device *dev)
297{
298 int devno = dev->link->pmp + dev->devno;
299 int alt_devno = devno;
300 int i;
301
302 /* allow n.15 for the first device attached to host port */
303 if (ata_is_host_link(dev->link) && devno == 0)
304 alt_devno = 15;
305
306 for (i = 0; i < ata_force_tbl_size; i++) {
307 const struct ata_force_ent *fe = &ata_force_tbl[i];
308
309 if (fe->port != -1 && fe->port != dev->link->ap->print_id)
310 continue;
311
312 if (fe->device != -1 && fe->device != devno &&
313 fe->device != alt_devno)
314 continue;
315
316 if (!(~dev->horkage & fe->param.horkage_on) &&
317 !(dev->horkage & fe->param.horkage_off))
318 continue;
319
320 dev->horkage |= fe->param.horkage_on;
321 dev->horkage &= ~fe->param.horkage_off;
322
323 ata_dev_printk(dev, KERN_NOTICE,
324 "FORCE: horkage modified (%s)\n", fe->param.name);
325 }
326}
327
436d34b3
TH
328/**
329 * atapi_cmd_type - Determine ATAPI command type from SCSI opcode
330 * @opcode: SCSI opcode
331 *
332 * Determine ATAPI command type from @opcode.
333 *
334 * LOCKING:
335 * None.
336 *
337 * RETURNS:
338 * ATAPI_{READ|WRITE|READ_CD|PASS_THRU|MISC}
339 */
340int atapi_cmd_type(u8 opcode)
341{
342 switch (opcode) {
343 case GPCMD_READ_10:
344 case GPCMD_READ_12:
345 return ATAPI_READ;
346
347 case GPCMD_WRITE_10:
348 case GPCMD_WRITE_12:
349 case GPCMD_WRITE_AND_VERIFY_10:
350 return ATAPI_WRITE;
351
352 case GPCMD_READ_CD:
353 case GPCMD_READ_CD_MSF:
354 return ATAPI_READ_CD;
355
356 default:
357 return ATAPI_MISC;
358 }
359}
360
1da177e4
LT
361/**
362 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
363 * @tf: Taskfile to convert
1da177e4 364 * @pmp: Port multiplier port
9977126c
TH
365 * @is_cmd: This FIS is for command
366 * @fis: Buffer into which data will output
1da177e4
LT
367 *
368 * Converts a standard ATA taskfile to a Serial ATA
369 * FIS structure (Register - Host to Device).
370 *
371 * LOCKING:
372 * Inherited from caller.
373 */
9977126c 374void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
1da177e4 375{
9977126c
TH
376 fis[0] = 0x27; /* Register - Host to Device FIS */
377 fis[1] = pmp & 0xf; /* Port multiplier number*/
378 if (is_cmd)
379 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
380
1da177e4
LT
381 fis[2] = tf->command;
382 fis[3] = tf->feature;
383
384 fis[4] = tf->lbal;
385 fis[5] = tf->lbam;
386 fis[6] = tf->lbah;
387 fis[7] = tf->device;
388
389 fis[8] = tf->hob_lbal;
390 fis[9] = tf->hob_lbam;
391 fis[10] = tf->hob_lbah;
392 fis[11] = tf->hob_feature;
393
394 fis[12] = tf->nsect;
395 fis[13] = tf->hob_nsect;
396 fis[14] = 0;
397 fis[15] = tf->ctl;
398
399 fis[16] = 0;
400 fis[17] = 0;
401 fis[18] = 0;
402 fis[19] = 0;
403}
404
405/**
406 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
407 * @fis: Buffer from which data will be input
408 * @tf: Taskfile to output
409 *
e12a1be6 410 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
411 *
412 * LOCKING:
413 * Inherited from caller.
414 */
415
057ace5e 416void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
417{
418 tf->command = fis[2]; /* status */
419 tf->feature = fis[3]; /* error */
420
421 tf->lbal = fis[4];
422 tf->lbam = fis[5];
423 tf->lbah = fis[6];
424 tf->device = fis[7];
425
426 tf->hob_lbal = fis[8];
427 tf->hob_lbam = fis[9];
428 tf->hob_lbah = fis[10];
429
430 tf->nsect = fis[12];
431 tf->hob_nsect = fis[13];
432}
433
8cbd6df1
AL
434static const u8 ata_rw_cmds[] = {
435 /* pio multi */
436 ATA_CMD_READ_MULTI,
437 ATA_CMD_WRITE_MULTI,
438 ATA_CMD_READ_MULTI_EXT,
439 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
440 0,
441 0,
442 0,
443 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
444 /* pio */
445 ATA_CMD_PIO_READ,
446 ATA_CMD_PIO_WRITE,
447 ATA_CMD_PIO_READ_EXT,
448 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
449 0,
450 0,
451 0,
452 0,
8cbd6df1
AL
453 /* dma */
454 ATA_CMD_READ,
455 ATA_CMD_WRITE,
456 ATA_CMD_READ_EXT,
9a3dccc4
TH
457 ATA_CMD_WRITE_EXT,
458 0,
459 0,
460 0,
461 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 462};
1da177e4
LT
463
464/**
8cbd6df1 465 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
466 * @tf: command to examine and configure
467 * @dev: device tf belongs to
1da177e4 468 *
2e9edbf8 469 * Examine the device configuration and tf->flags to calculate
8cbd6df1 470 * the proper read/write commands and protocol to use.
1da177e4
LT
471 *
472 * LOCKING:
473 * caller.
474 */
bd056d7e 475static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 476{
9a3dccc4 477 u8 cmd;
1da177e4 478
9a3dccc4 479 int index, fua, lba48, write;
2e9edbf8 480
9a3dccc4 481 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
482 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
483 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 484
8cbd6df1
AL
485 if (dev->flags & ATA_DFLAG_PIO) {
486 tf->protocol = ATA_PROT_PIO;
9a3dccc4 487 index = dev->multi_count ? 0 : 8;
9af5c9c9 488 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
489 /* Unable to use DMA due to host limitation */
490 tf->protocol = ATA_PROT_PIO;
0565c26d 491 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
492 } else {
493 tf->protocol = ATA_PROT_DMA;
9a3dccc4 494 index = 16;
8cbd6df1 495 }
1da177e4 496
9a3dccc4
TH
497 cmd = ata_rw_cmds[index + fua + lba48 + write];
498 if (cmd) {
499 tf->command = cmd;
500 return 0;
501 }
502 return -1;
1da177e4
LT
503}
504
35b649fe
TH
505/**
506 * ata_tf_read_block - Read block address from ATA taskfile
507 * @tf: ATA taskfile of interest
508 * @dev: ATA device @tf belongs to
509 *
510 * LOCKING:
511 * None.
512 *
513 * Read block address from @tf. This function can handle all
514 * three address formats - LBA, LBA48 and CHS. tf->protocol and
515 * flags select the address format to use.
516 *
517 * RETURNS:
518 * Block address read from @tf.
519 */
520u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
521{
522 u64 block = 0;
523
524 if (tf->flags & ATA_TFLAG_LBA) {
525 if (tf->flags & ATA_TFLAG_LBA48) {
526 block |= (u64)tf->hob_lbah << 40;
527 block |= (u64)tf->hob_lbam << 32;
528 block |= tf->hob_lbal << 24;
529 } else
530 block |= (tf->device & 0xf) << 24;
531
532 block |= tf->lbah << 16;
533 block |= tf->lbam << 8;
534 block |= tf->lbal;
535 } else {
536 u32 cyl, head, sect;
537
538 cyl = tf->lbam | (tf->lbah << 8);
539 head = tf->device & 0xf;
540 sect = tf->lbal;
541
542 block = (cyl * dev->heads + head) * dev->sectors + sect;
543 }
544
545 return block;
546}
547
bd056d7e
TH
548/**
549 * ata_build_rw_tf - Build ATA taskfile for given read/write request
550 * @tf: Target ATA taskfile
551 * @dev: ATA device @tf belongs to
552 * @block: Block address
553 * @n_block: Number of blocks
554 * @tf_flags: RW/FUA etc...
555 * @tag: tag
556 *
557 * LOCKING:
558 * None.
559 *
560 * Build ATA taskfile @tf for read/write request described by
561 * @block, @n_block, @tf_flags and @tag on @dev.
562 *
563 * RETURNS:
564 *
565 * 0 on success, -ERANGE if the request is too large for @dev,
566 * -EINVAL if the request is invalid.
567 */
568int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
569 u64 block, u32 n_block, unsigned int tf_flags,
570 unsigned int tag)
571{
572 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
573 tf->flags |= tf_flags;
574
6d1245bf 575 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
576 /* yay, NCQ */
577 if (!lba_48_ok(block, n_block))
578 return -ERANGE;
579
580 tf->protocol = ATA_PROT_NCQ;
581 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
582
583 if (tf->flags & ATA_TFLAG_WRITE)
584 tf->command = ATA_CMD_FPDMA_WRITE;
585 else
586 tf->command = ATA_CMD_FPDMA_READ;
587
588 tf->nsect = tag << 3;
589 tf->hob_feature = (n_block >> 8) & 0xff;
590 tf->feature = n_block & 0xff;
591
592 tf->hob_lbah = (block >> 40) & 0xff;
593 tf->hob_lbam = (block >> 32) & 0xff;
594 tf->hob_lbal = (block >> 24) & 0xff;
595 tf->lbah = (block >> 16) & 0xff;
596 tf->lbam = (block >> 8) & 0xff;
597 tf->lbal = block & 0xff;
598
599 tf->device = 1 << 6;
600 if (tf->flags & ATA_TFLAG_FUA)
601 tf->device |= 1 << 7;
602 } else if (dev->flags & ATA_DFLAG_LBA) {
603 tf->flags |= ATA_TFLAG_LBA;
604
605 if (lba_28_ok(block, n_block)) {
606 /* use LBA28 */
607 tf->device |= (block >> 24) & 0xf;
608 } else if (lba_48_ok(block, n_block)) {
609 if (!(dev->flags & ATA_DFLAG_LBA48))
610 return -ERANGE;
611
612 /* use LBA48 */
613 tf->flags |= ATA_TFLAG_LBA48;
614
615 tf->hob_nsect = (n_block >> 8) & 0xff;
616
617 tf->hob_lbah = (block >> 40) & 0xff;
618 tf->hob_lbam = (block >> 32) & 0xff;
619 tf->hob_lbal = (block >> 24) & 0xff;
620 } else
621 /* request too large even for LBA48 */
622 return -ERANGE;
623
624 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
625 return -EINVAL;
626
627 tf->nsect = n_block & 0xff;
628
629 tf->lbah = (block >> 16) & 0xff;
630 tf->lbam = (block >> 8) & 0xff;
631 tf->lbal = block & 0xff;
632
633 tf->device |= ATA_LBA;
634 } else {
635 /* CHS */
636 u32 sect, head, cyl, track;
637
638 /* The request -may- be too large for CHS addressing. */
639 if (!lba_28_ok(block, n_block))
640 return -ERANGE;
641
642 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
643 return -EINVAL;
644
645 /* Convert LBA to CHS */
646 track = (u32)block / dev->sectors;
647 cyl = track / dev->heads;
648 head = track % dev->heads;
649 sect = (u32)block % dev->sectors + 1;
650
651 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
652 (u32)block, track, cyl, head, sect);
653
654 /* Check whether the converted CHS can fit.
655 Cylinder: 0-65535
656 Head: 0-15
657 Sector: 1-255*/
658 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
659 return -ERANGE;
660
661 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
662 tf->lbal = sect;
663 tf->lbam = cyl;
664 tf->lbah = cyl >> 8;
665 tf->device |= head;
666 }
667
668 return 0;
669}
670
cb95d562
TH
671/**
672 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
673 * @pio_mask: pio_mask
674 * @mwdma_mask: mwdma_mask
675 * @udma_mask: udma_mask
676 *
677 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
678 * unsigned int xfer_mask.
679 *
680 * LOCKING:
681 * None.
682 *
683 * RETURNS:
684 * Packed xfer_mask.
685 */
7dc951ae
TH
686unsigned long ata_pack_xfermask(unsigned long pio_mask,
687 unsigned long mwdma_mask,
688 unsigned long udma_mask)
cb95d562
TH
689{
690 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
691 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
692 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
693}
694
c0489e4e
TH
695/**
696 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
697 * @xfer_mask: xfer_mask to unpack
698 * @pio_mask: resulting pio_mask
699 * @mwdma_mask: resulting mwdma_mask
700 * @udma_mask: resulting udma_mask
701 *
702 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
703 * Any NULL distination masks will be ignored.
704 */
7dc951ae
TH
705void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
706 unsigned long *mwdma_mask, unsigned long *udma_mask)
c0489e4e
TH
707{
708 if (pio_mask)
709 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
710 if (mwdma_mask)
711 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
712 if (udma_mask)
713 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
714}
715
cb95d562 716static const struct ata_xfer_ent {
be9a50c8 717 int shift, bits;
cb95d562
TH
718 u8 base;
719} ata_xfer_tbl[] = {
70cd071e
TH
720 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
721 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
722 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
cb95d562
TH
723 { -1, },
724};
725
726/**
727 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
728 * @xfer_mask: xfer_mask of interest
729 *
730 * Return matching XFER_* value for @xfer_mask. Only the highest
731 * bit of @xfer_mask is considered.
732 *
733 * LOCKING:
734 * None.
735 *
736 * RETURNS:
70cd071e 737 * Matching XFER_* value, 0xff if no match found.
cb95d562 738 */
7dc951ae 739u8 ata_xfer_mask2mode(unsigned long xfer_mask)
cb95d562
TH
740{
741 int highbit = fls(xfer_mask) - 1;
742 const struct ata_xfer_ent *ent;
743
744 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
745 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
746 return ent->base + highbit - ent->shift;
70cd071e 747 return 0xff;
cb95d562
TH
748}
749
750/**
751 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
752 * @xfer_mode: XFER_* of interest
753 *
754 * Return matching xfer_mask for @xfer_mode.
755 *
756 * LOCKING:
757 * None.
758 *
759 * RETURNS:
760 * Matching xfer_mask, 0 if no match found.
761 */
7dc951ae 762unsigned long ata_xfer_mode2mask(u8 xfer_mode)
cb95d562
TH
763{
764 const struct ata_xfer_ent *ent;
765
766 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
767 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
70cd071e
TH
768 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
769 & ~((1 << ent->shift) - 1);
cb95d562
TH
770 return 0;
771}
772
773/**
774 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
775 * @xfer_mode: XFER_* of interest
776 *
777 * Return matching xfer_shift for @xfer_mode.
778 *
779 * LOCKING:
780 * None.
781 *
782 * RETURNS:
783 * Matching xfer_shift, -1 if no match found.
784 */
7dc951ae 785int ata_xfer_mode2shift(unsigned long xfer_mode)
cb95d562
TH
786{
787 const struct ata_xfer_ent *ent;
788
789 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
790 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
791 return ent->shift;
792 return -1;
793}
794
1da177e4 795/**
1da7b0d0
TH
796 * ata_mode_string - convert xfer_mask to string
797 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
798 *
799 * Determine string which represents the highest speed
1da7b0d0 800 * (highest bit in @modemask).
1da177e4
LT
801 *
802 * LOCKING:
803 * None.
804 *
805 * RETURNS:
806 * Constant C string representing highest speed listed in
1da7b0d0 807 * @mode_mask, or the constant C string "<n/a>".
1da177e4 808 */
7dc951ae 809const char *ata_mode_string(unsigned long xfer_mask)
1da177e4 810{
75f554bc
TH
811 static const char * const xfer_mode_str[] = {
812 "PIO0",
813 "PIO1",
814 "PIO2",
815 "PIO3",
816 "PIO4",
b352e57d
AC
817 "PIO5",
818 "PIO6",
75f554bc
TH
819 "MWDMA0",
820 "MWDMA1",
821 "MWDMA2",
b352e57d
AC
822 "MWDMA3",
823 "MWDMA4",
75f554bc
TH
824 "UDMA/16",
825 "UDMA/25",
826 "UDMA/33",
827 "UDMA/44",
828 "UDMA/66",
829 "UDMA/100",
830 "UDMA/133",
831 "UDMA7",
832 };
1da7b0d0 833 int highbit;
1da177e4 834
1da7b0d0
TH
835 highbit = fls(xfer_mask) - 1;
836 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
837 return xfer_mode_str[highbit];
1da177e4 838 return "<n/a>";
1da177e4
LT
839}
840
4c360c81
TH
841static const char *sata_spd_string(unsigned int spd)
842{
843 static const char * const spd_str[] = {
844 "1.5 Gbps",
845 "3.0 Gbps",
846 };
847
848 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
849 return "<unknown>";
850 return spd_str[spd - 1];
851}
852
3373efd8 853void ata_dev_disable(struct ata_device *dev)
0b8efb0a 854{
09d7f9b0 855 if (ata_dev_enabled(dev)) {
9af5c9c9 856 if (ata_msg_drv(dev->link->ap))
09d7f9b0 857 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
562f0c2d 858 ata_acpi_on_disable(dev);
4ae72a1e
TH
859 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
860 ATA_DNXFER_QUIET);
0b8efb0a
TH
861 dev->class++;
862 }
863}
864
ca77329f
KCA
865static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
866{
867 struct ata_link *link = dev->link;
868 struct ata_port *ap = link->ap;
869 u32 scontrol;
870 unsigned int err_mask;
871 int rc;
872
873 /*
874 * disallow DIPM for drivers which haven't set
875 * ATA_FLAG_IPM. This is because when DIPM is enabled,
876 * phy ready will be set in the interrupt status on
877 * state changes, which will cause some drivers to
878 * think there are errors - additionally drivers will
879 * need to disable hot plug.
880 */
881 if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
882 ap->pm_policy = NOT_AVAILABLE;
883 return -EINVAL;
884 }
885
886 /*
887 * For DIPM, we will only enable it for the
888 * min_power setting.
889 *
890 * Why? Because Disks are too stupid to know that
891 * If the host rejects a request to go to SLUMBER
892 * they should retry at PARTIAL, and instead it
893 * just would give up. So, for medium_power to
894 * work at all, we need to only allow HIPM.
895 */
896 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
897 if (rc)
898 return rc;
899
900 switch (policy) {
901 case MIN_POWER:
902 /* no restrictions on IPM transitions */
903 scontrol &= ~(0x3 << 8);
904 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
905 if (rc)
906 return rc;
907
908 /* enable DIPM */
909 if (dev->flags & ATA_DFLAG_DIPM)
910 err_mask = ata_dev_set_feature(dev,
911 SETFEATURES_SATA_ENABLE, SATA_DIPM);
912 break;
913 case MEDIUM_POWER:
914 /* allow IPM to PARTIAL */
915 scontrol &= ~(0x1 << 8);
916 scontrol |= (0x2 << 8);
917 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
918 if (rc)
919 return rc;
920
f5456b63
KCA
921 /*
922 * we don't have to disable DIPM since IPM flags
923 * disallow transitions to SLUMBER, which effectively
924 * disable DIPM if it does not support PARTIAL
925 */
ca77329f
KCA
926 break;
927 case NOT_AVAILABLE:
928 case MAX_PERFORMANCE:
929 /* disable all IPM transitions */
930 scontrol |= (0x3 << 8);
931 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
932 if (rc)
933 return rc;
934
f5456b63
KCA
935 /*
936 * we don't have to disable DIPM since IPM flags
937 * disallow all transitions which effectively
938 * disable DIPM anyway.
939 */
ca77329f
KCA
940 break;
941 }
942
943 /* FIXME: handle SET FEATURES failure */
944 (void) err_mask;
945
946 return 0;
947}
948
949/**
950 * ata_dev_enable_pm - enable SATA interface power management
48166fd9
SH
951 * @dev: device to enable power management
952 * @policy: the link power management policy
ca77329f
KCA
953 *
954 * Enable SATA Interface power management. This will enable
955 * Device Interface Power Management (DIPM) for min_power
956 * policy, and then call driver specific callbacks for
957 * enabling Host Initiated Power management.
958 *
959 * Locking: Caller.
960 * Returns: -EINVAL if IPM is not supported, 0 otherwise.
961 */
962void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
963{
964 int rc = 0;
965 struct ata_port *ap = dev->link->ap;
966
967 /* set HIPM first, then DIPM */
968 if (ap->ops->enable_pm)
969 rc = ap->ops->enable_pm(ap, policy);
970 if (rc)
971 goto enable_pm_out;
972 rc = ata_dev_set_dipm(dev, policy);
973
974enable_pm_out:
975 if (rc)
976 ap->pm_policy = MAX_PERFORMANCE;
977 else
978 ap->pm_policy = policy;
979 return /* rc */; /* hopefully we can use 'rc' eventually */
980}
981
1992a5ed 982#ifdef CONFIG_PM
ca77329f
KCA
983/**
984 * ata_dev_disable_pm - disable SATA interface power management
48166fd9 985 * @dev: device to disable power management
ca77329f
KCA
986 *
987 * Disable SATA Interface power management. This will disable
988 * Device Interface Power Management (DIPM) without changing
989 * policy, call driver specific callbacks for disabling Host
990 * Initiated Power management.
991 *
992 * Locking: Caller.
993 * Returns: void
994 */
995static void ata_dev_disable_pm(struct ata_device *dev)
996{
997 struct ata_port *ap = dev->link->ap;
998
999 ata_dev_set_dipm(dev, MAX_PERFORMANCE);
1000 if (ap->ops->disable_pm)
1001 ap->ops->disable_pm(ap);
1002}
1992a5ed 1003#endif /* CONFIG_PM */
ca77329f
KCA
1004
1005void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
1006{
1007 ap->pm_policy = policy;
3ec25ebd 1008 ap->link.eh_info.action |= ATA_EH_LPM;
ca77329f
KCA
1009 ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
1010 ata_port_schedule_eh(ap);
1011}
1012
1992a5ed 1013#ifdef CONFIG_PM
ca77329f
KCA
1014static void ata_lpm_enable(struct ata_host *host)
1015{
1016 struct ata_link *link;
1017 struct ata_port *ap;
1018 struct ata_device *dev;
1019 int i;
1020
1021 for (i = 0; i < host->n_ports; i++) {
1022 ap = host->ports[i];
1023 ata_port_for_each_link(link, ap) {
1024 ata_link_for_each_dev(dev, link)
1025 ata_dev_disable_pm(dev);
1026 }
1027 }
1028}
1029
1030static void ata_lpm_disable(struct ata_host *host)
1031{
1032 int i;
1033
1034 for (i = 0; i < host->n_ports; i++) {
1035 struct ata_port *ap = host->ports[i];
1036 ata_lpm_schedule(ap, ap->pm_policy);
1037 }
1038}
1992a5ed 1039#endif /* CONFIG_PM */
ca77329f
KCA
1040
1041
1da177e4 1042/**
0d5ff566 1043 * ata_devchk - PATA device presence detection
1da177e4
LT
1044 * @ap: ATA channel to examine
1045 * @device: Device to examine (starting at zero)
1046 *
1047 * This technique was originally described in
1048 * Hale Landis's ATADRVR (www.ata-atapi.com), and
1049 * later found its way into the ATA/ATAPI spec.
1050 *
1051 * Write a pattern to the ATA shadow registers,
1052 * and if a device is present, it will respond by
1053 * correctly storing and echoing back the
1054 * ATA shadow register contents.
1055 *
1056 * LOCKING:
1057 * caller.
1058 */
1059
0d5ff566 1060static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1da177e4
LT
1061{
1062 struct ata_ioports *ioaddr = &ap->ioaddr;
1063 u8 nsect, lbal;
1064
1065 ap->ops->dev_select(ap, device);
1066
0d5ff566
TH
1067 iowrite8(0x55, ioaddr->nsect_addr);
1068 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 1069
0d5ff566
TH
1070 iowrite8(0xaa, ioaddr->nsect_addr);
1071 iowrite8(0x55, ioaddr->lbal_addr);
1da177e4 1072
0d5ff566
TH
1073 iowrite8(0x55, ioaddr->nsect_addr);
1074 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 1075
0d5ff566
TH
1076 nsect = ioread8(ioaddr->nsect_addr);
1077 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
1078
1079 if ((nsect == 0x55) && (lbal == 0xaa))
1080 return 1; /* we found a device */
1081
1082 return 0; /* nothing found */
1083}
1084
1da177e4
LT
1085/**
1086 * ata_dev_classify - determine device type based on ATA-spec signature
1087 * @tf: ATA taskfile register set for device to be identified
1088 *
1089 * Determine from taskfile register contents whether a device is
1090 * ATA or ATAPI, as per "Signature and persistence" section
1091 * of ATA/PI spec (volume 1, sect 5.14).
1092 *
1093 * LOCKING:
1094 * None.
1095 *
1096 * RETURNS:
633273a3
TH
1097 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
1098 * %ATA_DEV_UNKNOWN the event of failure.
1da177e4 1099 */
057ace5e 1100unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
1101{
1102 /* Apple's open source Darwin code hints that some devices only
1103 * put a proper signature into the LBA mid/high registers,
1104 * So, we only check those. It's sufficient for uniqueness.
633273a3
TH
1105 *
1106 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
1107 * signatures for ATA and ATAPI devices attached on SerialATA,
1108 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
1109 * spec has never mentioned about using different signatures
1110 * for ATA/ATAPI devices. Then, Serial ATA II: Port
1111 * Multiplier specification began to use 0x69/0x96 to identify
1112 * port multpliers and 0x3c/0xc3 to identify SEMB device.
1113 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
1114 * 0x69/0x96 shortly and described them as reserved for
1115 * SerialATA.
1116 *
1117 * We follow the current spec and consider that 0x69/0x96
1118 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1da177e4 1119 */
633273a3 1120 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1da177e4
LT
1121 DPRINTK("found ATA device by sig\n");
1122 return ATA_DEV_ATA;
1123 }
1124
633273a3 1125 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1da177e4
LT
1126 DPRINTK("found ATAPI device by sig\n");
1127 return ATA_DEV_ATAPI;
1128 }
1129
633273a3
TH
1130 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
1131 DPRINTK("found PMP device by sig\n");
1132 return ATA_DEV_PMP;
1133 }
1134
1135 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
2dcb407e 1136 printk(KERN_INFO "ata: SEMB device ignored\n");
633273a3
TH
1137 return ATA_DEV_SEMB_UNSUP; /* not yet */
1138 }
1139
1da177e4
LT
1140 DPRINTK("unknown device\n");
1141 return ATA_DEV_UNKNOWN;
1142}
1143
1144/**
1145 * ata_dev_try_classify - Parse returned ATA device signature
3f19859e
TH
1146 * @dev: ATA device to classify (starting at zero)
1147 * @present: device seems present
b4dc7623 1148 * @r_err: Value of error register on completion
1da177e4
LT
1149 *
1150 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
1151 * an ATA/ATAPI-defined set of values is placed in the ATA
1152 * shadow registers, indicating the results of device detection
1153 * and diagnostics.
1154 *
1155 * Select the ATA device, and read the values from the ATA shadow
1156 * registers. Then parse according to the Error register value,
1157 * and the spec-defined values examined by ata_dev_classify().
1158 *
1159 * LOCKING:
1160 * caller.
b4dc7623
TH
1161 *
1162 * RETURNS:
1163 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4 1164 */
3f19859e
TH
1165unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
1166 u8 *r_err)
1da177e4 1167{
3f19859e 1168 struct ata_port *ap = dev->link->ap;
1da177e4
LT
1169 struct ata_taskfile tf;
1170 unsigned int class;
1171 u8 err;
1172
3f19859e 1173 ap->ops->dev_select(ap, dev->devno);
1da177e4
LT
1174
1175 memset(&tf, 0, sizeof(tf));
1176
1da177e4 1177 ap->ops->tf_read(ap, &tf);
0169e284 1178 err = tf.feature;
b4dc7623
TH
1179 if (r_err)
1180 *r_err = err;
1da177e4 1181
c5038fc0
AC
1182 /* see if device passed diags: continue and warn later */
1183 if (err == 0)
93590859 1184 /* diagnostic fail : do nothing _YET_ */
3f19859e 1185 dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
93590859 1186 else if (err == 1)
1da177e4 1187 /* do nothing */ ;
3f19859e 1188 else if ((dev->devno == 0) && (err == 0x81))
1da177e4
LT
1189 /* do nothing */ ;
1190 else
b4dc7623 1191 return ATA_DEV_NONE;
1da177e4 1192
b4dc7623 1193 /* determine if device is ATA or ATAPI */
1da177e4 1194 class = ata_dev_classify(&tf);
b4dc7623 1195
d7fbee05
TH
1196 if (class == ATA_DEV_UNKNOWN) {
1197 /* If the device failed diagnostic, it's likely to
1198 * have reported incorrect device signature too.
1199 * Assume ATA device if the device seems present but
1200 * device signature is invalid with diagnostic
1201 * failure.
1202 */
1203 if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
1204 class = ATA_DEV_ATA;
1205 else
1206 class = ATA_DEV_NONE;
1207 } else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
1208 class = ATA_DEV_NONE;
1209
b4dc7623 1210 return class;
1da177e4
LT
1211}
1212
1213/**
6a62a04d 1214 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
1215 * @id: IDENTIFY DEVICE results we will examine
1216 * @s: string into which data is output
1217 * @ofs: offset into identify device page
1218 * @len: length of string to return. must be an even number.
1219 *
1220 * The strings in the IDENTIFY DEVICE page are broken up into
1221 * 16-bit chunks. Run through the string, and output each
1222 * 8-bit chunk linearly, regardless of platform.
1223 *
1224 * LOCKING:
1225 * caller.
1226 */
1227
6a62a04d
TH
1228void ata_id_string(const u16 *id, unsigned char *s,
1229 unsigned int ofs, unsigned int len)
1da177e4
LT
1230{
1231 unsigned int c;
1232
1233 while (len > 0) {
1234 c = id[ofs] >> 8;
1235 *s = c;
1236 s++;
1237
1238 c = id[ofs] & 0xff;
1239 *s = c;
1240 s++;
1241
1242 ofs++;
1243 len -= 2;
1244 }
1245}
1246
0e949ff3 1247/**
6a62a04d 1248 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
1249 * @id: IDENTIFY DEVICE results we will examine
1250 * @s: string into which data is output
1251 * @ofs: offset into identify device page
1252 * @len: length of string to return. must be an odd number.
1253 *
6a62a04d 1254 * This function is identical to ata_id_string except that it
0e949ff3
TH
1255 * trims trailing spaces and terminates the resulting string with
1256 * null. @len must be actual maximum length (even number) + 1.
1257 *
1258 * LOCKING:
1259 * caller.
1260 */
6a62a04d
TH
1261void ata_id_c_string(const u16 *id, unsigned char *s,
1262 unsigned int ofs, unsigned int len)
0e949ff3
TH
1263{
1264 unsigned char *p;
1265
1266 WARN_ON(!(len & 1));
1267
6a62a04d 1268 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
1269
1270 p = s + strnlen(s, len - 1);
1271 while (p > s && p[-1] == ' ')
1272 p--;
1273 *p = '\0';
1274}
0baab86b 1275
db6f8759
TH
1276static u64 ata_id_n_sectors(const u16 *id)
1277{
1278 if (ata_id_has_lba(id)) {
1279 if (ata_id_has_lba48(id))
1280 return ata_id_u64(id, 100);
1281 else
1282 return ata_id_u32(id, 60);
1283 } else {
1284 if (ata_id_current_chs_valid(id))
1285 return ata_id_u32(id, 57);
1286 else
1287 return id[1] * id[3] * id[6];
1288 }
1289}
1290
1e999736
AC
1291static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
1292{
1293 u64 sectors = 0;
1294
1295 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1296 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1297 sectors |= (tf->hob_lbal & 0xff) << 24;
1298 sectors |= (tf->lbah & 0xff) << 16;
1299 sectors |= (tf->lbam & 0xff) << 8;
1300 sectors |= (tf->lbal & 0xff);
1301
1302 return ++sectors;
1303}
1304
1305static u64 ata_tf_to_lba(struct ata_taskfile *tf)
1306{
1307 u64 sectors = 0;
1308
1309 sectors |= (tf->device & 0x0f) << 24;
1310 sectors |= (tf->lbah & 0xff) << 16;
1311 sectors |= (tf->lbam & 0xff) << 8;
1312 sectors |= (tf->lbal & 0xff);
1313
1314 return ++sectors;
1315}
1316
1317/**
c728a914
TH
1318 * ata_read_native_max_address - Read native max address
1319 * @dev: target device
1320 * @max_sectors: out parameter for the result native max address
1e999736 1321 *
c728a914
TH
1322 * Perform an LBA48 or LBA28 native size query upon the device in
1323 * question.
1e999736 1324 *
c728a914
TH
1325 * RETURNS:
1326 * 0 on success, -EACCES if command is aborted by the drive.
1327 * -EIO on other errors.
1e999736 1328 */
c728a914 1329static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1e999736 1330{
c728a914 1331 unsigned int err_mask;
1e999736 1332 struct ata_taskfile tf;
c728a914 1333 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1334
1335 ata_tf_init(dev, &tf);
1336
c728a914 1337 /* always clear all address registers */
1e999736 1338 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1e999736 1339
c728a914
TH
1340 if (lba48) {
1341 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1342 tf.flags |= ATA_TFLAG_LBA48;
1343 } else
1344 tf.command = ATA_CMD_READ_NATIVE_MAX;
1e999736 1345
1e999736 1346 tf.protocol |= ATA_PROT_NODATA;
c728a914
TH
1347 tf.device |= ATA_LBA;
1348
2b789108 1349 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914
TH
1350 if (err_mask) {
1351 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1352 "max address (err_mask=0x%x)\n", err_mask);
1353 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1354 return -EACCES;
1355 return -EIO;
1356 }
1e999736 1357
c728a914
TH
1358 if (lba48)
1359 *max_sectors = ata_tf_to_lba48(&tf);
1360 else
1361 *max_sectors = ata_tf_to_lba(&tf);
2dcb407e 1362 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
93328e11 1363 (*max_sectors)--;
c728a914 1364 return 0;
1e999736
AC
1365}
1366
1367/**
c728a914
TH
1368 * ata_set_max_sectors - Set max sectors
1369 * @dev: target device
6b38d1d1 1370 * @new_sectors: new max sectors value to set for the device
1e999736 1371 *
c728a914
TH
1372 * Set max sectors of @dev to @new_sectors.
1373 *
1374 * RETURNS:
1375 * 0 on success, -EACCES if command is aborted or denied (due to
1376 * previous non-volatile SET_MAX) by the drive. -EIO on other
1377 * errors.
1e999736 1378 */
05027adc 1379static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1e999736 1380{
c728a914 1381 unsigned int err_mask;
1e999736 1382 struct ata_taskfile tf;
c728a914 1383 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1384
1385 new_sectors--;
1386
1387 ata_tf_init(dev, &tf);
1388
1e999736 1389 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
c728a914
TH
1390
1391 if (lba48) {
1392 tf.command = ATA_CMD_SET_MAX_EXT;
1393 tf.flags |= ATA_TFLAG_LBA48;
1394
1395 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1396 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1397 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1e582ba4 1398 } else {
c728a914
TH
1399 tf.command = ATA_CMD_SET_MAX;
1400
1e582ba4
TH
1401 tf.device |= (new_sectors >> 24) & 0xf;
1402 }
1403
1e999736 1404 tf.protocol |= ATA_PROT_NODATA;
c728a914 1405 tf.device |= ATA_LBA;
1e999736
AC
1406
1407 tf.lbal = (new_sectors >> 0) & 0xff;
1408 tf.lbam = (new_sectors >> 8) & 0xff;
1409 tf.lbah = (new_sectors >> 16) & 0xff;
1e999736 1410
2b789108 1411 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914
TH
1412 if (err_mask) {
1413 ata_dev_printk(dev, KERN_WARNING, "failed to set "
1414 "max address (err_mask=0x%x)\n", err_mask);
1415 if (err_mask == AC_ERR_DEV &&
1416 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1417 return -EACCES;
1418 return -EIO;
1419 }
1420
c728a914 1421 return 0;
1e999736
AC
1422}
1423
1424/**
1425 * ata_hpa_resize - Resize a device with an HPA set
1426 * @dev: Device to resize
1427 *
1428 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1429 * it if required to the full size of the media. The caller must check
1430 * the drive has the HPA feature set enabled.
05027adc
TH
1431 *
1432 * RETURNS:
1433 * 0 on success, -errno on failure.
1e999736 1434 */
05027adc 1435static int ata_hpa_resize(struct ata_device *dev)
1e999736 1436{
05027adc
TH
1437 struct ata_eh_context *ehc = &dev->link->eh_context;
1438 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1439 u64 sectors = ata_id_n_sectors(dev->id);
1440 u64 native_sectors;
c728a914 1441 int rc;
a617c09f 1442
05027adc
TH
1443 /* do we need to do it? */
1444 if (dev->class != ATA_DEV_ATA ||
1445 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1446 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
c728a914 1447 return 0;
1e999736 1448
05027adc
TH
1449 /* read native max address */
1450 rc = ata_read_native_max_address(dev, &native_sectors);
1451 if (rc) {
dda7aba1
TH
1452 /* If device aborted the command or HPA isn't going to
1453 * be unlocked, skip HPA resizing.
05027adc 1454 */
dda7aba1 1455 if (rc == -EACCES || !ata_ignore_hpa) {
05027adc 1456 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
dda7aba1 1457 "broken, skipping HPA handling\n");
05027adc
TH
1458 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1459
1460 /* we can continue if device aborted the command */
1461 if (rc == -EACCES)
1462 rc = 0;
1e999736 1463 }
37301a55 1464
05027adc
TH
1465 return rc;
1466 }
1467
1468 /* nothing to do? */
1469 if (native_sectors <= sectors || !ata_ignore_hpa) {
1470 if (!print_info || native_sectors == sectors)
1471 return 0;
1472
1473 if (native_sectors > sectors)
1474 ata_dev_printk(dev, KERN_INFO,
1475 "HPA detected: current %llu, native %llu\n",
1476 (unsigned long long)sectors,
1477 (unsigned long long)native_sectors);
1478 else if (native_sectors < sectors)
1479 ata_dev_printk(dev, KERN_WARNING,
1480 "native sectors (%llu) is smaller than "
1481 "sectors (%llu)\n",
1482 (unsigned long long)native_sectors,
1483 (unsigned long long)sectors);
1484 return 0;
1485 }
1486
1487 /* let's unlock HPA */
1488 rc = ata_set_max_sectors(dev, native_sectors);
1489 if (rc == -EACCES) {
1490 /* if device aborted the command, skip HPA resizing */
1491 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1492 "(%llu -> %llu), skipping HPA handling\n",
1493 (unsigned long long)sectors,
1494 (unsigned long long)native_sectors);
1495 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1496 return 0;
1497 } else if (rc)
1498 return rc;
1499
1500 /* re-read IDENTIFY data */
1501 rc = ata_dev_reread_id(dev, 0);
1502 if (rc) {
1503 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1504 "data after HPA resizing\n");
1505 return rc;
1506 }
1507
1508 if (print_info) {
1509 u64 new_sectors = ata_id_n_sectors(dev->id);
1510 ata_dev_printk(dev, KERN_INFO,
1511 "HPA unlocked: %llu -> %llu, native %llu\n",
1512 (unsigned long long)sectors,
1513 (unsigned long long)new_sectors,
1514 (unsigned long long)native_sectors);
1515 }
1516
1517 return 0;
1e999736
AC
1518}
1519
0baab86b
EF
1520/**
1521 * ata_noop_dev_select - Select device 0/1 on ATA bus
1522 * @ap: ATA channel to manipulate
1523 * @device: ATA device (numbered from zero) to select
1524 *
1525 * This function performs no actual function.
1526 *
1527 * May be used as the dev_select() entry in ata_port_operations.
1528 *
1529 * LOCKING:
1530 * caller.
1531 */
2dcb407e 1532void ata_noop_dev_select(struct ata_port *ap, unsigned int device)
1da177e4
LT
1533{
1534}
1535
0baab86b 1536
1da177e4
LT
1537/**
1538 * ata_std_dev_select - Select device 0/1 on ATA bus
1539 * @ap: ATA channel to manipulate
1540 * @device: ATA device (numbered from zero) to select
1541 *
1542 * Use the method defined in the ATA specification to
1543 * make either device 0, or device 1, active on the
0baab86b
EF
1544 * ATA channel. Works with both PIO and MMIO.
1545 *
1546 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
1547 *
1548 * LOCKING:
1549 * caller.
1550 */
1551
2dcb407e 1552void ata_std_dev_select(struct ata_port *ap, unsigned int device)
1da177e4
LT
1553{
1554 u8 tmp;
1555
1556 if (device == 0)
1557 tmp = ATA_DEVICE_OBS;
1558 else
1559 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1560
0d5ff566 1561 iowrite8(tmp, ap->ioaddr.device_addr);
1da177e4
LT
1562 ata_pause(ap); /* needed; also flushes, for mmio */
1563}
1564
1565/**
1566 * ata_dev_select - Select device 0/1 on ATA bus
1567 * @ap: ATA channel to manipulate
1568 * @device: ATA device (numbered from zero) to select
1569 * @wait: non-zero to wait for Status register BSY bit to clear
1570 * @can_sleep: non-zero if context allows sleeping
1571 *
1572 * Use the method defined in the ATA specification to
1573 * make either device 0, or device 1, active on the
1574 * ATA channel.
1575 *
1576 * This is a high-level version of ata_std_dev_select(),
1577 * which additionally provides the services of inserting
1578 * the proper pauses and status polling, where needed.
1579 *
1580 * LOCKING:
1581 * caller.
1582 */
1583
1584void ata_dev_select(struct ata_port *ap, unsigned int device,
1585 unsigned int wait, unsigned int can_sleep)
1586{
88574551 1587 if (ata_msg_probe(ap))
44877b4e
TH
1588 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
1589 "device %u, wait %u\n", device, wait);
1da177e4
LT
1590
1591 if (wait)
1592 ata_wait_idle(ap);
1593
1594 ap->ops->dev_select(ap, device);
1595
1596 if (wait) {
9af5c9c9 1597 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
1da177e4
LT
1598 msleep(150);
1599 ata_wait_idle(ap);
1600 }
1601}
1602
1603/**
1604 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 1605 * @id: IDENTIFY DEVICE page to dump
1da177e4 1606 *
0bd3300a
TH
1607 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1608 * page.
1da177e4
LT
1609 *
1610 * LOCKING:
1611 * caller.
1612 */
1613
0bd3300a 1614static inline void ata_dump_id(const u16 *id)
1da177e4
LT
1615{
1616 DPRINTK("49==0x%04x "
1617 "53==0x%04x "
1618 "63==0x%04x "
1619 "64==0x%04x "
1620 "75==0x%04x \n",
0bd3300a
TH
1621 id[49],
1622 id[53],
1623 id[63],
1624 id[64],
1625 id[75]);
1da177e4
LT
1626 DPRINTK("80==0x%04x "
1627 "81==0x%04x "
1628 "82==0x%04x "
1629 "83==0x%04x "
1630 "84==0x%04x \n",
0bd3300a
TH
1631 id[80],
1632 id[81],
1633 id[82],
1634 id[83],
1635 id[84]);
1da177e4
LT
1636 DPRINTK("88==0x%04x "
1637 "93==0x%04x\n",
0bd3300a
TH
1638 id[88],
1639 id[93]);
1da177e4
LT
1640}
1641
cb95d562
TH
1642/**
1643 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1644 * @id: IDENTIFY data to compute xfer mask from
1645 *
1646 * Compute the xfermask for this device. This is not as trivial
1647 * as it seems if we must consider early devices correctly.
1648 *
1649 * FIXME: pre IDE drive timing (do we care ?).
1650 *
1651 * LOCKING:
1652 * None.
1653 *
1654 * RETURNS:
1655 * Computed xfermask
1656 */
7dc951ae 1657unsigned long ata_id_xfermask(const u16 *id)
cb95d562 1658{
7dc951ae 1659 unsigned long pio_mask, mwdma_mask, udma_mask;
cb95d562
TH
1660
1661 /* Usual case. Word 53 indicates word 64 is valid */
1662 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1663 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1664 pio_mask <<= 3;
1665 pio_mask |= 0x7;
1666 } else {
1667 /* If word 64 isn't valid then Word 51 high byte holds
1668 * the PIO timing number for the maximum. Turn it into
1669 * a mask.
1670 */
7a0f1c8a 1671 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb 1672 if (mode < 5) /* Valid PIO range */
2dcb407e 1673 pio_mask = (2 << mode) - 1;
46767aeb
AC
1674 else
1675 pio_mask = 1;
cb95d562
TH
1676
1677 /* But wait.. there's more. Design your standards by
1678 * committee and you too can get a free iordy field to
1679 * process. However its the speeds not the modes that
1680 * are supported... Note drivers using the timing API
1681 * will get this right anyway
1682 */
1683 }
1684
1685 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1686
b352e57d
AC
1687 if (ata_id_is_cfa(id)) {
1688 /*
1689 * Process compact flash extended modes
1690 */
1691 int pio = id[163] & 0x7;
1692 int dma = (id[163] >> 3) & 7;
1693
1694 if (pio)
1695 pio_mask |= (1 << 5);
1696 if (pio > 1)
1697 pio_mask |= (1 << 6);
1698 if (dma)
1699 mwdma_mask |= (1 << 3);
1700 if (dma > 1)
1701 mwdma_mask |= (1 << 4);
1702 }
1703
fb21f0d0
TH
1704 udma_mask = 0;
1705 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1706 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1707
1708 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1709}
1710
86e45b6b 1711/**
442eacc3 1712 * ata_pio_queue_task - Queue port_task
86e45b6b 1713 * @ap: The ata_port to queue port_task for
e2a7f77a 1714 * @fn: workqueue function to be scheduled
65f27f38 1715 * @data: data for @fn to use
e2a7f77a 1716 * @delay: delay time for workqueue function
86e45b6b
TH
1717 *
1718 * Schedule @fn(@data) for execution after @delay jiffies using
1719 * port_task. There is one port_task per port and it's the
1720 * user(low level driver)'s responsibility to make sure that only
1721 * one task is active at any given time.
1722 *
1723 * libata core layer takes care of synchronization between
442eacc3 1724 * port_task and EH. ata_pio_queue_task() may be ignored for EH
86e45b6b
TH
1725 * synchronization.
1726 *
1727 * LOCKING:
1728 * Inherited from caller.
1729 */
442eacc3
JG
1730static void ata_pio_queue_task(struct ata_port *ap, void *data,
1731 unsigned long delay)
86e45b6b 1732{
65f27f38 1733 ap->port_task_data = data;
86e45b6b 1734
45a66c1c
ON
1735 /* may fail if ata_port_flush_task() in progress */
1736 queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1737}
1738
1739/**
1740 * ata_port_flush_task - Flush port_task
1741 * @ap: The ata_port to flush port_task for
1742 *
1743 * After this function completes, port_task is guranteed not to
1744 * be running or scheduled.
1745 *
1746 * LOCKING:
1747 * Kernel thread context (may sleep)
1748 */
1749void ata_port_flush_task(struct ata_port *ap)
1750{
86e45b6b
TH
1751 DPRINTK("ENTER\n");
1752
45a66c1c 1753 cancel_rearming_delayed_work(&ap->port_task);
86e45b6b 1754
0dd4b21f 1755 if (ata_msg_ctl(ap))
7f5e4e8d 1756 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __func__);
86e45b6b
TH
1757}
1758
7102d230 1759static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1760{
77853bf2 1761 struct completion *waiting = qc->private_data;
a2a7a662 1762
a2a7a662 1763 complete(waiting);
a2a7a662
TH
1764}
1765
1766/**
2432697b 1767 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1768 * @dev: Device to which the command is sent
1769 * @tf: Taskfile registers for the command and the result
d69cf37d 1770 * @cdb: CDB for packet command
a2a7a662 1771 * @dma_dir: Data tranfer direction of the command
5c1ad8b3 1772 * @sgl: sg list for the data buffer of the command
2432697b 1773 * @n_elem: Number of sg entries
2b789108 1774 * @timeout: Timeout in msecs (0 for default)
a2a7a662
TH
1775 *
1776 * Executes libata internal command with timeout. @tf contains
1777 * command on entry and result on return. Timeout and error
1778 * conditions are reported via return value. No recovery action
1779 * is taken after a command times out. It's caller's duty to
1780 * clean up after timeout.
1781 *
1782 * LOCKING:
1783 * None. Should be called with kernel context, might sleep.
551e8889
TH
1784 *
1785 * RETURNS:
1786 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1787 */
2432697b
TH
1788unsigned ata_exec_internal_sg(struct ata_device *dev,
1789 struct ata_taskfile *tf, const u8 *cdb,
87260216 1790 int dma_dir, struct scatterlist *sgl,
2b789108 1791 unsigned int n_elem, unsigned long timeout)
a2a7a662 1792{
9af5c9c9
TH
1793 struct ata_link *link = dev->link;
1794 struct ata_port *ap = link->ap;
a2a7a662
TH
1795 u8 command = tf->command;
1796 struct ata_queued_cmd *qc;
2ab7db1f 1797 unsigned int tag, preempted_tag;
dedaf2b0 1798 u32 preempted_sactive, preempted_qc_active;
da917d69 1799 int preempted_nr_active_links;
60be6b9a 1800 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1801 unsigned long flags;
77853bf2 1802 unsigned int err_mask;
d95a717f 1803 int rc;
a2a7a662 1804
ba6a1308 1805 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1806
e3180499 1807 /* no internal command while frozen */
b51e9e5d 1808 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1809 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1810 return AC_ERR_SYSTEM;
1811 }
1812
2ab7db1f 1813 /* initialize internal qc */
a2a7a662 1814
2ab7db1f
TH
1815 /* XXX: Tag 0 is used for drivers with legacy EH as some
1816 * drivers choke if any other tag is given. This breaks
1817 * ata_tag_internal() test for those drivers. Don't use new
1818 * EH stuff without converting to it.
1819 */
1820 if (ap->ops->error_handler)
1821 tag = ATA_TAG_INTERNAL;
1822 else
1823 tag = 0;
1824
6cec4a39 1825 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1826 BUG();
f69499f4 1827 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1828
1829 qc->tag = tag;
1830 qc->scsicmd = NULL;
1831 qc->ap = ap;
1832 qc->dev = dev;
1833 ata_qc_reinit(qc);
1834
9af5c9c9
TH
1835 preempted_tag = link->active_tag;
1836 preempted_sactive = link->sactive;
dedaf2b0 1837 preempted_qc_active = ap->qc_active;
da917d69 1838 preempted_nr_active_links = ap->nr_active_links;
9af5c9c9
TH
1839 link->active_tag = ATA_TAG_POISON;
1840 link->sactive = 0;
dedaf2b0 1841 ap->qc_active = 0;
da917d69 1842 ap->nr_active_links = 0;
2ab7db1f
TH
1843
1844 /* prepare & issue qc */
a2a7a662 1845 qc->tf = *tf;
d69cf37d
TH
1846 if (cdb)
1847 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1848 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1849 qc->dma_dir = dma_dir;
1850 if (dma_dir != DMA_NONE) {
2432697b 1851 unsigned int i, buflen = 0;
87260216 1852 struct scatterlist *sg;
2432697b 1853
87260216
JA
1854 for_each_sg(sgl, sg, n_elem, i)
1855 buflen += sg->length;
2432697b 1856
87260216 1857 ata_sg_init(qc, sgl, n_elem);
49c80429 1858 qc->nbytes = buflen;
a2a7a662
TH
1859 }
1860
77853bf2 1861 qc->private_data = &wait;
a2a7a662
TH
1862 qc->complete_fn = ata_qc_complete_internal;
1863
8e0e694a 1864 ata_qc_issue(qc);
a2a7a662 1865
ba6a1308 1866 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1867
2b789108
TH
1868 if (!timeout)
1869 timeout = ata_probe_timeout * 1000 / HZ;
1870
1871 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
d95a717f
TH
1872
1873 ata_port_flush_task(ap);
41ade50c 1874
d95a717f 1875 if (!rc) {
ba6a1308 1876 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1877
1878 /* We're racing with irq here. If we lose, the
1879 * following test prevents us from completing the qc
d95a717f
TH
1880 * twice. If we win, the port is frozen and will be
1881 * cleaned up by ->post_internal_cmd().
a2a7a662 1882 */
77853bf2 1883 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1884 qc->err_mask |= AC_ERR_TIMEOUT;
1885
1886 if (ap->ops->error_handler)
1887 ata_port_freeze(ap);
1888 else
1889 ata_qc_complete(qc);
f15a1daf 1890
0dd4b21f
BP
1891 if (ata_msg_warn(ap))
1892 ata_dev_printk(dev, KERN_WARNING,
88574551 1893 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1894 }
1895
ba6a1308 1896 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1897 }
1898
d95a717f
TH
1899 /* do post_internal_cmd */
1900 if (ap->ops->post_internal_cmd)
1901 ap->ops->post_internal_cmd(qc);
1902
a51d644a
TH
1903 /* perform minimal error analysis */
1904 if (qc->flags & ATA_QCFLAG_FAILED) {
1905 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1906 qc->err_mask |= AC_ERR_DEV;
1907
1908 if (!qc->err_mask)
1909 qc->err_mask |= AC_ERR_OTHER;
1910
1911 if (qc->err_mask & ~AC_ERR_OTHER)
1912 qc->err_mask &= ~AC_ERR_OTHER;
d95a717f
TH
1913 }
1914
15869303 1915 /* finish up */
ba6a1308 1916 spin_lock_irqsave(ap->lock, flags);
15869303 1917
e61e0672 1918 *tf = qc->result_tf;
77853bf2
TH
1919 err_mask = qc->err_mask;
1920
1921 ata_qc_free(qc);
9af5c9c9
TH
1922 link->active_tag = preempted_tag;
1923 link->sactive = preempted_sactive;
dedaf2b0 1924 ap->qc_active = preempted_qc_active;
da917d69 1925 ap->nr_active_links = preempted_nr_active_links;
77853bf2 1926
1f7dd3e9
TH
1927 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1928 * Until those drivers are fixed, we detect the condition
1929 * here, fail the command with AC_ERR_SYSTEM and reenable the
1930 * port.
1931 *
1932 * Note that this doesn't change any behavior as internal
1933 * command failure results in disabling the device in the
1934 * higher layer for LLDDs without new reset/EH callbacks.
1935 *
1936 * Kill the following code as soon as those drivers are fixed.
1937 */
198e0fed 1938 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1939 err_mask |= AC_ERR_SYSTEM;
1940 ata_port_probe(ap);
1941 }
1942
ba6a1308 1943 spin_unlock_irqrestore(ap->lock, flags);
15869303 1944
77853bf2 1945 return err_mask;
a2a7a662
TH
1946}
1947
2432697b 1948/**
33480a0e 1949 * ata_exec_internal - execute libata internal command
2432697b
TH
1950 * @dev: Device to which the command is sent
1951 * @tf: Taskfile registers for the command and the result
1952 * @cdb: CDB for packet command
1953 * @dma_dir: Data tranfer direction of the command
1954 * @buf: Data buffer of the command
1955 * @buflen: Length of data buffer
2b789108 1956 * @timeout: Timeout in msecs (0 for default)
2432697b
TH
1957 *
1958 * Wrapper around ata_exec_internal_sg() which takes simple
1959 * buffer instead of sg list.
1960 *
1961 * LOCKING:
1962 * None. Should be called with kernel context, might sleep.
1963 *
1964 * RETURNS:
1965 * Zero on success, AC_ERR_* mask on failure
1966 */
1967unsigned ata_exec_internal(struct ata_device *dev,
1968 struct ata_taskfile *tf, const u8 *cdb,
2b789108
TH
1969 int dma_dir, void *buf, unsigned int buflen,
1970 unsigned long timeout)
2432697b 1971{
33480a0e
TH
1972 struct scatterlist *psg = NULL, sg;
1973 unsigned int n_elem = 0;
2432697b 1974
33480a0e
TH
1975 if (dma_dir != DMA_NONE) {
1976 WARN_ON(!buf);
1977 sg_init_one(&sg, buf, buflen);
1978 psg = &sg;
1979 n_elem++;
1980 }
2432697b 1981
2b789108
TH
1982 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1983 timeout);
2432697b
TH
1984}
1985
977e6b9f
TH
1986/**
1987 * ata_do_simple_cmd - execute simple internal command
1988 * @dev: Device to which the command is sent
1989 * @cmd: Opcode to execute
1990 *
1991 * Execute a 'simple' command, that only consists of the opcode
1992 * 'cmd' itself, without filling any other registers
1993 *
1994 * LOCKING:
1995 * Kernel thread context (may sleep).
1996 *
1997 * RETURNS:
1998 * Zero on success, AC_ERR_* mask on failure
e58eb583 1999 */
77b08fb5 2000unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
2001{
2002 struct ata_taskfile tf;
e58eb583
TH
2003
2004 ata_tf_init(dev, &tf);
2005
2006 tf.command = cmd;
2007 tf.flags |= ATA_TFLAG_DEVICE;
2008 tf.protocol = ATA_PROT_NODATA;
2009
2b789108 2010 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
e58eb583
TH
2011}
2012
1bc4ccff
AC
2013/**
2014 * ata_pio_need_iordy - check if iordy needed
2015 * @adev: ATA device
2016 *
2017 * Check if the current speed of the device requires IORDY. Used
2018 * by various controllers for chip configuration.
2019 */
a617c09f 2020
1bc4ccff
AC
2021unsigned int ata_pio_need_iordy(const struct ata_device *adev)
2022{
432729f0
AC
2023 /* Controller doesn't support IORDY. Probably a pointless check
2024 as the caller should know this */
9af5c9c9 2025 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1bc4ccff 2026 return 0;
432729f0
AC
2027 /* PIO3 and higher it is mandatory */
2028 if (adev->pio_mode > XFER_PIO_2)
2029 return 1;
2030 /* We turn it on when possible */
2031 if (ata_id_has_iordy(adev->id))
1bc4ccff 2032 return 1;
432729f0
AC
2033 return 0;
2034}
2e9edbf8 2035
432729f0
AC
2036/**
2037 * ata_pio_mask_no_iordy - Return the non IORDY mask
2038 * @adev: ATA device
2039 *
2040 * Compute the highest mode possible if we are not using iordy. Return
2041 * -1 if no iordy mode is available.
2042 */
a617c09f 2043
432729f0
AC
2044static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
2045{
1bc4ccff 2046 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1bc4ccff 2047 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
432729f0 2048 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1bc4ccff
AC
2049 /* Is the speed faster than the drive allows non IORDY ? */
2050 if (pio) {
2051 /* This is cycle times not frequency - watch the logic! */
2052 if (pio > 240) /* PIO2 is 240nS per cycle */
432729f0
AC
2053 return 3 << ATA_SHIFT_PIO;
2054 return 7 << ATA_SHIFT_PIO;
1bc4ccff
AC
2055 }
2056 }
432729f0 2057 return 3 << ATA_SHIFT_PIO;
1bc4ccff
AC
2058}
2059
1da177e4 2060/**
49016aca 2061 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
2062 * @dev: target device
2063 * @p_class: pointer to class of the target device (may be changed)
bff04647 2064 * @flags: ATA_READID_* flags
fe635c7e 2065 * @id: buffer to read IDENTIFY data into
1da177e4 2066 *
49016aca
TH
2067 * Read ID data from the specified device. ATA_CMD_ID_ATA is
2068 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
2069 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
2070 * for pre-ATA4 drives.
1da177e4 2071 *
50a99018 2072 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
2dcb407e 2073 * now we abort if we hit that case.
50a99018 2074 *
1da177e4 2075 * LOCKING:
49016aca
TH
2076 * Kernel thread context (may sleep)
2077 *
2078 * RETURNS:
2079 * 0 on success, -errno otherwise.
1da177e4 2080 */
a9beec95 2081int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 2082 unsigned int flags, u16 *id)
1da177e4 2083{
9af5c9c9 2084 struct ata_port *ap = dev->link->ap;
49016aca 2085 unsigned int class = *p_class;
a0123703 2086 struct ata_taskfile tf;
49016aca
TH
2087 unsigned int err_mask = 0;
2088 const char *reason;
54936f8b 2089 int may_fallback = 1, tried_spinup = 0;
49016aca 2090 int rc;
1da177e4 2091
0dd4b21f 2092 if (ata_msg_ctl(ap))
7f5e4e8d 2093 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
1da177e4 2094
49016aca 2095 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
49016aca 2096 retry:
3373efd8 2097 ata_tf_init(dev, &tf);
a0123703 2098
49016aca
TH
2099 switch (class) {
2100 case ATA_DEV_ATA:
a0123703 2101 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
2102 break;
2103 case ATA_DEV_ATAPI:
a0123703 2104 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
2105 break;
2106 default:
2107 rc = -ENODEV;
2108 reason = "unsupported class";
2109 goto err_out;
1da177e4
LT
2110 }
2111
a0123703 2112 tf.protocol = ATA_PROT_PIO;
81afe893
TH
2113
2114 /* Some devices choke if TF registers contain garbage. Make
2115 * sure those are properly initialized.
2116 */
2117 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2118
2119 /* Device presence detection is unreliable on some
2120 * controllers. Always poll IDENTIFY if available.
2121 */
2122 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 2123
3373efd8 2124 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
2b789108 2125 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
a0123703 2126 if (err_mask) {
800b3996 2127 if (err_mask & AC_ERR_NODEV_HINT) {
1ffc151f
TH
2128 ata_dev_printk(dev, KERN_DEBUG,
2129 "NODEV after polling detection\n");
55a8e2c8
TH
2130 return -ENOENT;
2131 }
2132
1ffc151f
TH
2133 if ((err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
2134 /* Device or controller might have reported
2135 * the wrong device class. Give a shot at the
2136 * other IDENTIFY if the current one is
2137 * aborted by the device.
2138 */
2139 if (may_fallback) {
2140 may_fallback = 0;
2141
2142 if (class == ATA_DEV_ATA)
2143 class = ATA_DEV_ATAPI;
2144 else
2145 class = ATA_DEV_ATA;
2146 goto retry;
2147 }
2148
2149 /* Control reaches here iff the device aborted
2150 * both flavors of IDENTIFYs which happens
2151 * sometimes with phantom devices.
2152 */
2153 ata_dev_printk(dev, KERN_DEBUG,
2154 "both IDENTIFYs aborted, assuming NODEV\n");
2155 return -ENOENT;
54936f8b
TH
2156 }
2157
49016aca
TH
2158 rc = -EIO;
2159 reason = "I/O error";
1da177e4
LT
2160 goto err_out;
2161 }
2162
54936f8b
TH
2163 /* Falling back doesn't make sense if ID data was read
2164 * successfully at least once.
2165 */
2166 may_fallback = 0;
2167
49016aca 2168 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 2169
49016aca 2170 /* sanity check */
a4f5749b 2171 rc = -EINVAL;
6070068b 2172 reason = "device reports invalid type";
a4f5749b
TH
2173
2174 if (class == ATA_DEV_ATA) {
2175 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
2176 goto err_out;
2177 } else {
2178 if (ata_id_is_ata(id))
2179 goto err_out;
49016aca
TH
2180 }
2181
169439c2
ML
2182 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
2183 tried_spinup = 1;
2184 /*
2185 * Drive powered-up in standby mode, and requires a specific
2186 * SET_FEATURES spin-up subcommand before it will accept
2187 * anything other than the original IDENTIFY command.
2188 */
218f3d30 2189 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
fb0582f9 2190 if (err_mask && id[2] != 0x738c) {
169439c2
ML
2191 rc = -EIO;
2192 reason = "SPINUP failed";
2193 goto err_out;
2194 }
2195 /*
2196 * If the drive initially returned incomplete IDENTIFY info,
2197 * we now must reissue the IDENTIFY command.
2198 */
2199 if (id[2] == 0x37c8)
2200 goto retry;
2201 }
2202
bff04647 2203 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
2204 /*
2205 * The exact sequence expected by certain pre-ATA4 drives is:
2206 * SRST RESET
50a99018
AC
2207 * IDENTIFY (optional in early ATA)
2208 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
49016aca
TH
2209 * anything else..
2210 * Some drives were very specific about that exact sequence.
50a99018
AC
2211 *
2212 * Note that ATA4 says lba is mandatory so the second check
2213 * shoud never trigger.
49016aca
TH
2214 */
2215 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 2216 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
2217 if (err_mask) {
2218 rc = -EIO;
2219 reason = "INIT_DEV_PARAMS failed";
2220 goto err_out;
2221 }
2222
2223 /* current CHS translation info (id[53-58]) might be
2224 * changed. reread the identify device info.
2225 */
bff04647 2226 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
2227 goto retry;
2228 }
2229 }
2230
2231 *p_class = class;
fe635c7e 2232
49016aca
TH
2233 return 0;
2234
2235 err_out:
88574551 2236 if (ata_msg_warn(ap))
0dd4b21f 2237 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 2238 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
2239 return rc;
2240}
2241
3373efd8 2242static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 2243{
9af5c9c9
TH
2244 struct ata_port *ap = dev->link->ap;
2245 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
2246}
2247
a6e6ce8e
TH
2248static void ata_dev_config_ncq(struct ata_device *dev,
2249 char *desc, size_t desc_sz)
2250{
9af5c9c9 2251 struct ata_port *ap = dev->link->ap;
a6e6ce8e
TH
2252 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2253
2254 if (!ata_id_has_ncq(dev->id)) {
2255 desc[0] = '\0';
2256 return;
2257 }
75683fe7 2258 if (dev->horkage & ATA_HORKAGE_NONCQ) {
6919a0a6
AC
2259 snprintf(desc, desc_sz, "NCQ (not used)");
2260 return;
2261 }
a6e6ce8e 2262 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 2263 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
2264 dev->flags |= ATA_DFLAG_NCQ;
2265 }
2266
2267 if (hdepth >= ddepth)
2268 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
2269 else
2270 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
2271}
2272
49016aca 2273/**
ffeae418 2274 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
2275 * @dev: Target device to configure
2276 *
2277 * Configure @dev according to @dev->id. Generic and low-level
2278 * driver specific fixups are also applied.
49016aca
TH
2279 *
2280 * LOCKING:
ffeae418
TH
2281 * Kernel thread context (may sleep)
2282 *
2283 * RETURNS:
2284 * 0 on success, -errno otherwise
49016aca 2285 */
efdaedc4 2286int ata_dev_configure(struct ata_device *dev)
49016aca 2287{
9af5c9c9
TH
2288 struct ata_port *ap = dev->link->ap;
2289 struct ata_eh_context *ehc = &dev->link->eh_context;
6746544c 2290 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1148c3a7 2291 const u16 *id = dev->id;
7dc951ae 2292 unsigned long xfer_mask;
b352e57d 2293 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
2294 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2295 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 2296 int rc;
49016aca 2297
0dd4b21f 2298 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
44877b4e 2299 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
7f5e4e8d 2300 __func__);
ffeae418 2301 return 0;
49016aca
TH
2302 }
2303
0dd4b21f 2304 if (ata_msg_probe(ap))
7f5e4e8d 2305 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __func__);
1da177e4 2306
75683fe7
TH
2307 /* set horkage */
2308 dev->horkage |= ata_dev_blacklisted(dev);
33267325 2309 ata_force_horkage(dev);
75683fe7 2310
6746544c
TH
2311 /* let ACPI work its magic */
2312 rc = ata_acpi_on_devcfg(dev);
2313 if (rc)
2314 return rc;
08573a86 2315
05027adc
TH
2316 /* massage HPA, do it early as it might change IDENTIFY data */
2317 rc = ata_hpa_resize(dev);
2318 if (rc)
2319 return rc;
2320
c39f5ebe 2321 /* print device capabilities */
0dd4b21f 2322 if (ata_msg_probe(ap))
88574551
TH
2323 ata_dev_printk(dev, KERN_DEBUG,
2324 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2325 "85:%04x 86:%04x 87:%04x 88:%04x\n",
7f5e4e8d 2326 __func__,
f15a1daf
TH
2327 id[49], id[82], id[83], id[84],
2328 id[85], id[86], id[87], id[88]);
c39f5ebe 2329
208a9933 2330 /* initialize to-be-configured parameters */
ea1dd4e1 2331 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
2332 dev->max_sectors = 0;
2333 dev->cdb_len = 0;
2334 dev->n_sectors = 0;
2335 dev->cylinders = 0;
2336 dev->heads = 0;
2337 dev->sectors = 0;
2338
1da177e4
LT
2339 /*
2340 * common ATA, ATAPI feature tests
2341 */
2342
ff8854b2 2343 /* find max transfer mode; for printk only */
1148c3a7 2344 xfer_mask = ata_id_xfermask(id);
1da177e4 2345
0dd4b21f
BP
2346 if (ata_msg_probe(ap))
2347 ata_dump_id(id);
1da177e4 2348
ef143d57
AL
2349 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2350 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2351 sizeof(fwrevbuf));
2352
2353 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2354 sizeof(modelbuf));
2355
1da177e4
LT
2356 /* ATA-specific feature tests */
2357 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
2358 if (ata_id_is_cfa(id)) {
2359 if (id[162] & 1) /* CPRM may make this media unusable */
44877b4e
TH
2360 ata_dev_printk(dev, KERN_WARNING,
2361 "supports DRM functions and may "
2362 "not be fully accessable.\n");
b352e57d 2363 snprintf(revbuf, 7, "CFA");
ae8d4ee7 2364 } else {
2dcb407e 2365 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
ae8d4ee7
AC
2366 /* Warn the user if the device has TPM extensions */
2367 if (ata_id_has_tpm(id))
2368 ata_dev_printk(dev, KERN_WARNING,
2369 "supports DRM functions and may "
2370 "not be fully accessable.\n");
2371 }
b352e57d 2372
1148c3a7 2373 dev->n_sectors = ata_id_n_sectors(id);
2940740b 2374
3f64f565
EM
2375 if (dev->id[59] & 0x100)
2376 dev->multi_count = dev->id[59] & 0xff;
2377
1148c3a7 2378 if (ata_id_has_lba(id)) {
4c2d721a 2379 const char *lba_desc;
a6e6ce8e 2380 char ncq_desc[20];
8bf62ece 2381
4c2d721a
TH
2382 lba_desc = "LBA";
2383 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 2384 if (ata_id_has_lba48(id)) {
8bf62ece 2385 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 2386 lba_desc = "LBA48";
6fc49adb
TH
2387
2388 if (dev->n_sectors >= (1UL << 28) &&
2389 ata_id_has_flush_ext(id))
2390 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 2391 }
8bf62ece 2392
a6e6ce8e
TH
2393 /* config NCQ */
2394 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2395
8bf62ece 2396 /* print device info to dmesg */
3f64f565
EM
2397 if (ata_msg_drv(ap) && print_info) {
2398 ata_dev_printk(dev, KERN_INFO,
2399 "%s: %s, %s, max %s\n",
2400 revbuf, modelbuf, fwrevbuf,
2401 ata_mode_string(xfer_mask));
2402 ata_dev_printk(dev, KERN_INFO,
2403 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 2404 (unsigned long long)dev->n_sectors,
3f64f565
EM
2405 dev->multi_count, lba_desc, ncq_desc);
2406 }
ffeae418 2407 } else {
8bf62ece
AL
2408 /* CHS */
2409
2410 /* Default translation */
1148c3a7
TH
2411 dev->cylinders = id[1];
2412 dev->heads = id[3];
2413 dev->sectors = id[6];
8bf62ece 2414
1148c3a7 2415 if (ata_id_current_chs_valid(id)) {
8bf62ece 2416 /* Current CHS translation is valid. */
1148c3a7
TH
2417 dev->cylinders = id[54];
2418 dev->heads = id[55];
2419 dev->sectors = id[56];
8bf62ece
AL
2420 }
2421
2422 /* print device info to dmesg */
3f64f565 2423 if (ata_msg_drv(ap) && print_info) {
88574551 2424 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2425 "%s: %s, %s, max %s\n",
2426 revbuf, modelbuf, fwrevbuf,
2427 ata_mode_string(xfer_mask));
a84471fe 2428 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2429 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2430 (unsigned long long)dev->n_sectors,
2431 dev->multi_count, dev->cylinders,
2432 dev->heads, dev->sectors);
2433 }
07f6f7d0
AL
2434 }
2435
6e7846e9 2436 dev->cdb_len = 16;
1da177e4
LT
2437 }
2438
2439 /* ATAPI-specific feature tests */
2c13b7ce 2440 else if (dev->class == ATA_DEV_ATAPI) {
854c73a2
TH
2441 const char *cdb_intr_string = "";
2442 const char *atapi_an_string = "";
91163006 2443 const char *dma_dir_string = "";
7d77b247 2444 u32 sntf;
08a556db 2445
1148c3a7 2446 rc = atapi_cdb_len(id);
1da177e4 2447 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 2448 if (ata_msg_warn(ap))
88574551
TH
2449 ata_dev_printk(dev, KERN_WARNING,
2450 "unsupported CDB len\n");
ffeae418 2451 rc = -EINVAL;
1da177e4
LT
2452 goto err_out_nosup;
2453 }
6e7846e9 2454 dev->cdb_len = (unsigned int) rc;
1da177e4 2455
7d77b247
TH
2456 /* Enable ATAPI AN if both the host and device have
2457 * the support. If PMP is attached, SNTF is required
2458 * to enable ATAPI AN to discern between PHY status
2459 * changed notifications and ATAPI ANs.
9f45cbd3 2460 */
7d77b247
TH
2461 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2462 (!ap->nr_pmp_links ||
2463 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
854c73a2
TH
2464 unsigned int err_mask;
2465
9f45cbd3 2466 /* issue SET feature command to turn this on */
218f3d30
JG
2467 err_mask = ata_dev_set_feature(dev,
2468 SETFEATURES_SATA_ENABLE, SATA_AN);
854c73a2 2469 if (err_mask)
9f45cbd3 2470 ata_dev_printk(dev, KERN_ERR,
854c73a2
TH
2471 "failed to enable ATAPI AN "
2472 "(err_mask=0x%x)\n", err_mask);
2473 else {
9f45cbd3 2474 dev->flags |= ATA_DFLAG_AN;
854c73a2
TH
2475 atapi_an_string = ", ATAPI AN";
2476 }
9f45cbd3
KCA
2477 }
2478
08a556db 2479 if (ata_id_cdb_intr(dev->id)) {
312f7da2 2480 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
2481 cdb_intr_string = ", CDB intr";
2482 }
312f7da2 2483
91163006
TH
2484 if (atapi_dmadir || atapi_id_dmadir(dev->id)) {
2485 dev->flags |= ATA_DFLAG_DMADIR;
2486 dma_dir_string = ", DMADIR";
2487 }
2488
1da177e4 2489 /* print device info to dmesg */
5afc8142 2490 if (ata_msg_drv(ap) && print_info)
ef143d57 2491 ata_dev_printk(dev, KERN_INFO,
91163006 2492 "ATAPI: %s, %s, max %s%s%s%s\n",
ef143d57 2493 modelbuf, fwrevbuf,
12436c30 2494 ata_mode_string(xfer_mask),
91163006
TH
2495 cdb_intr_string, atapi_an_string,
2496 dma_dir_string);
1da177e4
LT
2497 }
2498
914ed354
TH
2499 /* determine max_sectors */
2500 dev->max_sectors = ATA_MAX_SECTORS;
2501 if (dev->flags & ATA_DFLAG_LBA48)
2502 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2503
ca77329f
KCA
2504 if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2505 if (ata_id_has_hipm(dev->id))
2506 dev->flags |= ATA_DFLAG_HIPM;
2507 if (ata_id_has_dipm(dev->id))
2508 dev->flags |= ATA_DFLAG_DIPM;
2509 }
2510
c5038fc0
AC
2511 /* Limit PATA drive on SATA cable bridge transfers to udma5,
2512 200 sectors */
3373efd8 2513 if (ata_dev_knobble(dev)) {
5afc8142 2514 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
2515 ata_dev_printk(dev, KERN_INFO,
2516 "applying bridge limits\n");
5a529139 2517 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
2518 dev->max_sectors = ATA_MAX_SECTORS;
2519 }
2520
f8d8e579 2521 if ((dev->class == ATA_DEV_ATAPI) &&
f442cd86 2522 (atapi_command_packet_set(id) == TYPE_TAPE)) {
f8d8e579 2523 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
f442cd86
AL
2524 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2525 }
f8d8e579 2526
75683fe7 2527 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
03ec52de
TH
2528 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2529 dev->max_sectors);
18d6e9d5 2530
ca77329f
KCA
2531 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2532 dev->horkage |= ATA_HORKAGE_IPM;
2533
2534 /* reset link pm_policy for this port to no pm */
2535 ap->pm_policy = MAX_PERFORMANCE;
2536 }
2537
4b2f3ede 2538 if (ap->ops->dev_config)
cd0d3bbc 2539 ap->ops->dev_config(dev);
4b2f3ede 2540
c5038fc0
AC
2541 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2542 /* Let the user know. We don't want to disallow opens for
2543 rescue purposes, or in case the vendor is just a blithering
2544 idiot. Do this after the dev_config call as some controllers
2545 with buggy firmware may want to avoid reporting false device
2546 bugs */
2547
2548 if (print_info) {
2549 ata_dev_printk(dev, KERN_WARNING,
2550"Drive reports diagnostics failure. This may indicate a drive\n");
2551 ata_dev_printk(dev, KERN_WARNING,
2552"fault or invalid emulation. Contact drive vendor for information.\n");
2553 }
2554 }
2555
0dd4b21f
BP
2556 if (ata_msg_probe(ap))
2557 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
7f5e4e8d 2558 __func__, ata_chk_status(ap));
ffeae418 2559 return 0;
1da177e4
LT
2560
2561err_out_nosup:
0dd4b21f 2562 if (ata_msg_probe(ap))
88574551 2563 ata_dev_printk(dev, KERN_DEBUG,
7f5e4e8d 2564 "%s: EXIT, err\n", __func__);
ffeae418 2565 return rc;
1da177e4
LT
2566}
2567
be0d18df 2568/**
2e41e8e6 2569 * ata_cable_40wire - return 40 wire cable type
be0d18df
AC
2570 * @ap: port
2571 *
2e41e8e6 2572 * Helper method for drivers which want to hardwire 40 wire cable
be0d18df
AC
2573 * detection.
2574 */
2575
2576int ata_cable_40wire(struct ata_port *ap)
2577{
2578 return ATA_CBL_PATA40;
2579}
2580
2581/**
2e41e8e6 2582 * ata_cable_80wire - return 80 wire cable type
be0d18df
AC
2583 * @ap: port
2584 *
2e41e8e6 2585 * Helper method for drivers which want to hardwire 80 wire cable
be0d18df
AC
2586 * detection.
2587 */
2588
2589int ata_cable_80wire(struct ata_port *ap)
2590{
2591 return ATA_CBL_PATA80;
2592}
2593
2594/**
2595 * ata_cable_unknown - return unknown PATA cable.
2596 * @ap: port
2597 *
2598 * Helper method for drivers which have no PATA cable detection.
2599 */
2600
2601int ata_cable_unknown(struct ata_port *ap)
2602{
2603 return ATA_CBL_PATA_UNK;
2604}
2605
c88f90c3
TH
2606/**
2607 * ata_cable_ignore - return ignored PATA cable.
2608 * @ap: port
2609 *
2610 * Helper method for drivers which don't use cable type to limit
2611 * transfer mode.
2612 */
2613int ata_cable_ignore(struct ata_port *ap)
2614{
2615 return ATA_CBL_PATA_IGN;
2616}
2617
be0d18df
AC
2618/**
2619 * ata_cable_sata - return SATA cable type
2620 * @ap: port
2621 *
2622 * Helper method for drivers which have SATA cables
2623 */
2624
2625int ata_cable_sata(struct ata_port *ap)
2626{
2627 return ATA_CBL_SATA;
2628}
2629
1da177e4
LT
2630/**
2631 * ata_bus_probe - Reset and probe ATA bus
2632 * @ap: Bus to probe
2633 *
0cba632b
JG
2634 * Master ATA bus probing function. Initiates a hardware-dependent
2635 * bus reset, then attempts to identify any devices found on
2636 * the bus.
2637 *
1da177e4 2638 * LOCKING:
0cba632b 2639 * PCI/etc. bus probe sem.
1da177e4
LT
2640 *
2641 * RETURNS:
96072e69 2642 * Zero on success, negative errno otherwise.
1da177e4
LT
2643 */
2644
80289167 2645int ata_bus_probe(struct ata_port *ap)
1da177e4 2646{
28ca5c57 2647 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 2648 int tries[ATA_MAX_DEVICES];
f58229f8 2649 int rc;
e82cbdb9 2650 struct ata_device *dev;
1da177e4 2651
28ca5c57 2652 ata_port_probe(ap);
c19ba8af 2653
f58229f8
TH
2654 ata_link_for_each_dev(dev, &ap->link)
2655 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
14d2bac1
TH
2656
2657 retry:
cdeab114
TH
2658 ata_link_for_each_dev(dev, &ap->link) {
2659 /* If we issue an SRST then an ATA drive (not ATAPI)
2660 * may change configuration and be in PIO0 timing. If
2661 * we do a hard reset (or are coming from power on)
2662 * this is true for ATA or ATAPI. Until we've set a
2663 * suitable controller mode we should not touch the
2664 * bus as we may be talking too fast.
2665 */
2666 dev->pio_mode = XFER_PIO_0;
2667
2668 /* If the controller has a pio mode setup function
2669 * then use it to set the chipset to rights. Don't
2670 * touch the DMA setup as that will be dealt with when
2671 * configuring devices.
2672 */
2673 if (ap->ops->set_piomode)
2674 ap->ops->set_piomode(ap, dev);
2675 }
2676
2044470c 2677 /* reset and determine device classes */
52783c5d 2678 ap->ops->phy_reset(ap);
2061a47a 2679
f58229f8 2680 ata_link_for_each_dev(dev, &ap->link) {
52783c5d
TH
2681 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2682 dev->class != ATA_DEV_UNKNOWN)
2683 classes[dev->devno] = dev->class;
2684 else
2685 classes[dev->devno] = ATA_DEV_NONE;
2044470c 2686
52783c5d 2687 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 2688 }
1da177e4 2689
52783c5d 2690 ata_port_probe(ap);
2044470c 2691
f31f0cc2
JG
2692 /* read IDENTIFY page and configure devices. We have to do the identify
2693 specific sequence bass-ackwards so that PDIAG- is released by
2694 the slave device */
2695
a4ba7fe2 2696 ata_link_for_each_dev_reverse(dev, &ap->link) {
f58229f8
TH
2697 if (tries[dev->devno])
2698 dev->class = classes[dev->devno];
ffeae418 2699
14d2bac1 2700 if (!ata_dev_enabled(dev))
ffeae418 2701 continue;
ffeae418 2702
bff04647
TH
2703 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2704 dev->id);
14d2bac1
TH
2705 if (rc)
2706 goto fail;
f31f0cc2
JG
2707 }
2708
be0d18df
AC
2709 /* Now ask for the cable type as PDIAG- should have been released */
2710 if (ap->ops->cable_detect)
2711 ap->cbl = ap->ops->cable_detect(ap);
2712
614fe29b
AC
2713 /* We may have SATA bridge glue hiding here irrespective of the
2714 reported cable types and sensed types */
2715 ata_link_for_each_dev(dev, &ap->link) {
2716 if (!ata_dev_enabled(dev))
2717 continue;
2718 /* SATA drives indicate we have a bridge. We don't know which
2719 end of the link the bridge is which is a problem */
2720 if (ata_id_is_sata(dev->id))
2721 ap->cbl = ATA_CBL_SATA;
2722 }
2723
f31f0cc2
JG
2724 /* After the identify sequence we can now set up the devices. We do
2725 this in the normal order so that the user doesn't get confused */
2726
f58229f8 2727 ata_link_for_each_dev(dev, &ap->link) {
f31f0cc2
JG
2728 if (!ata_dev_enabled(dev))
2729 continue;
14d2bac1 2730
9af5c9c9 2731 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
efdaedc4 2732 rc = ata_dev_configure(dev);
9af5c9c9 2733 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
2734 if (rc)
2735 goto fail;
1da177e4
LT
2736 }
2737
e82cbdb9 2738 /* configure transfer mode */
0260731f 2739 rc = ata_set_mode(&ap->link, &dev);
4ae72a1e 2740 if (rc)
51713d35 2741 goto fail;
1da177e4 2742
f58229f8
TH
2743 ata_link_for_each_dev(dev, &ap->link)
2744 if (ata_dev_enabled(dev))
e82cbdb9 2745 return 0;
1da177e4 2746
e82cbdb9
TH
2747 /* no device present, disable port */
2748 ata_port_disable(ap);
96072e69 2749 return -ENODEV;
14d2bac1
TH
2750
2751 fail:
4ae72a1e
TH
2752 tries[dev->devno]--;
2753
14d2bac1
TH
2754 switch (rc) {
2755 case -EINVAL:
4ae72a1e 2756 /* eeek, something went very wrong, give up */
14d2bac1
TH
2757 tries[dev->devno] = 0;
2758 break;
4ae72a1e
TH
2759
2760 case -ENODEV:
2761 /* give it just one more chance */
2762 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 2763 case -EIO:
4ae72a1e
TH
2764 if (tries[dev->devno] == 1) {
2765 /* This is the last chance, better to slow
2766 * down than lose it.
2767 */
936fd732 2768 sata_down_spd_limit(&ap->link);
4ae72a1e
TH
2769 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2770 }
14d2bac1
TH
2771 }
2772
4ae72a1e 2773 if (!tries[dev->devno])
3373efd8 2774 ata_dev_disable(dev);
ec573755 2775
14d2bac1 2776 goto retry;
1da177e4
LT
2777}
2778
2779/**
0cba632b
JG
2780 * ata_port_probe - Mark port as enabled
2781 * @ap: Port for which we indicate enablement
1da177e4 2782 *
0cba632b
JG
2783 * Modify @ap data structure such that the system
2784 * thinks that the entire port is enabled.
2785 *
cca3974e 2786 * LOCKING: host lock, or some other form of
0cba632b 2787 * serialization.
1da177e4
LT
2788 */
2789
2790void ata_port_probe(struct ata_port *ap)
2791{
198e0fed 2792 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
2793}
2794
3be680b7
TH
2795/**
2796 * sata_print_link_status - Print SATA link status
936fd732 2797 * @link: SATA link to printk link status about
3be680b7
TH
2798 *
2799 * This function prints link speed and status of a SATA link.
2800 *
2801 * LOCKING:
2802 * None.
2803 */
936fd732 2804void sata_print_link_status(struct ata_link *link)
3be680b7 2805{
6d5f9732 2806 u32 sstatus, scontrol, tmp;
3be680b7 2807
936fd732 2808 if (sata_scr_read(link, SCR_STATUS, &sstatus))
3be680b7 2809 return;
936fd732 2810 sata_scr_read(link, SCR_CONTROL, &scontrol);
3be680b7 2811
936fd732 2812 if (ata_link_online(link)) {
3be680b7 2813 tmp = (sstatus >> 4) & 0xf;
936fd732 2814 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2815 "SATA link up %s (SStatus %X SControl %X)\n",
2816 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 2817 } else {
936fd732 2818 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2819 "SATA link down (SStatus %X SControl %X)\n",
2820 sstatus, scontrol);
3be680b7
TH
2821 }
2822}
2823
ebdfca6e
AC
2824/**
2825 * ata_dev_pair - return other device on cable
ebdfca6e
AC
2826 * @adev: device
2827 *
2828 * Obtain the other device on the same cable, or if none is
2829 * present NULL is returned
2830 */
2e9edbf8 2831
3373efd8 2832struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2833{
9af5c9c9
TH
2834 struct ata_link *link = adev->link;
2835 struct ata_device *pair = &link->device[1 - adev->devno];
e1211e3f 2836 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2837 return NULL;
2838 return pair;
2839}
2840
1da177e4 2841/**
780a87f7
JG
2842 * ata_port_disable - Disable port.
2843 * @ap: Port to be disabled.
1da177e4 2844 *
780a87f7
JG
2845 * Modify @ap data structure such that the system
2846 * thinks that the entire port is disabled, and should
2847 * never attempt to probe or communicate with devices
2848 * on this port.
2849 *
cca3974e 2850 * LOCKING: host lock, or some other form of
780a87f7 2851 * serialization.
1da177e4
LT
2852 */
2853
2854void ata_port_disable(struct ata_port *ap)
2855{
9af5c9c9
TH
2856 ap->link.device[0].class = ATA_DEV_NONE;
2857 ap->link.device[1].class = ATA_DEV_NONE;
198e0fed 2858 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2859}
2860
1c3fae4d 2861/**
3c567b7d 2862 * sata_down_spd_limit - adjust SATA spd limit downward
936fd732 2863 * @link: Link to adjust SATA spd limit for
1c3fae4d 2864 *
936fd732 2865 * Adjust SATA spd limit of @link downward. Note that this
1c3fae4d 2866 * function only adjusts the limit. The change must be applied
3c567b7d 2867 * using sata_set_spd().
1c3fae4d
TH
2868 *
2869 * LOCKING:
2870 * Inherited from caller.
2871 *
2872 * RETURNS:
2873 * 0 on success, negative errno on failure
2874 */
936fd732 2875int sata_down_spd_limit(struct ata_link *link)
1c3fae4d 2876{
81952c54
TH
2877 u32 sstatus, spd, mask;
2878 int rc, highbit;
1c3fae4d 2879
936fd732 2880 if (!sata_scr_valid(link))
008a7896
TH
2881 return -EOPNOTSUPP;
2882
2883 /* If SCR can be read, use it to determine the current SPD.
936fd732 2884 * If not, use cached value in link->sata_spd.
008a7896 2885 */
936fd732 2886 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
008a7896
TH
2887 if (rc == 0)
2888 spd = (sstatus >> 4) & 0xf;
2889 else
936fd732 2890 spd = link->sata_spd;
1c3fae4d 2891
936fd732 2892 mask = link->sata_spd_limit;
1c3fae4d
TH
2893 if (mask <= 1)
2894 return -EINVAL;
008a7896
TH
2895
2896 /* unconditionally mask off the highest bit */
1c3fae4d
TH
2897 highbit = fls(mask) - 1;
2898 mask &= ~(1 << highbit);
2899
008a7896
TH
2900 /* Mask off all speeds higher than or equal to the current
2901 * one. Force 1.5Gbps if current SPD is not available.
2902 */
2903 if (spd > 1)
2904 mask &= (1 << (spd - 1)) - 1;
2905 else
2906 mask &= 1;
2907
2908 /* were we already at the bottom? */
1c3fae4d
TH
2909 if (!mask)
2910 return -EINVAL;
2911
936fd732 2912 link->sata_spd_limit = mask;
1c3fae4d 2913
936fd732 2914 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
f15a1daf 2915 sata_spd_string(fls(mask)));
1c3fae4d
TH
2916
2917 return 0;
2918}
2919
936fd732 2920static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
1c3fae4d 2921{
5270222f
TH
2922 struct ata_link *host_link = &link->ap->link;
2923 u32 limit, target, spd;
1c3fae4d 2924
5270222f
TH
2925 limit = link->sata_spd_limit;
2926
2927 /* Don't configure downstream link faster than upstream link.
2928 * It doesn't speed up anything and some PMPs choke on such
2929 * configuration.
2930 */
2931 if (!ata_is_host_link(link) && host_link->sata_spd)
2932 limit &= (1 << host_link->sata_spd) - 1;
2933
2934 if (limit == UINT_MAX)
2935 target = 0;
1c3fae4d 2936 else
5270222f 2937 target = fls(limit);
1c3fae4d
TH
2938
2939 spd = (*scontrol >> 4) & 0xf;
5270222f 2940 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
1c3fae4d 2941
5270222f 2942 return spd != target;
1c3fae4d
TH
2943}
2944
2945/**
3c567b7d 2946 * sata_set_spd_needed - is SATA spd configuration needed
936fd732 2947 * @link: Link in question
1c3fae4d
TH
2948 *
2949 * Test whether the spd limit in SControl matches
936fd732 2950 * @link->sata_spd_limit. This function is used to determine
1c3fae4d
TH
2951 * whether hardreset is necessary to apply SATA spd
2952 * configuration.
2953 *
2954 * LOCKING:
2955 * Inherited from caller.
2956 *
2957 * RETURNS:
2958 * 1 if SATA spd configuration is needed, 0 otherwise.
2959 */
936fd732 2960int sata_set_spd_needed(struct ata_link *link)
1c3fae4d
TH
2961{
2962 u32 scontrol;
2963
936fd732 2964 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
db64bcf3 2965 return 1;
1c3fae4d 2966
936fd732 2967 return __sata_set_spd_needed(link, &scontrol);
1c3fae4d
TH
2968}
2969
2970/**
3c567b7d 2971 * sata_set_spd - set SATA spd according to spd limit
936fd732 2972 * @link: Link to set SATA spd for
1c3fae4d 2973 *
936fd732 2974 * Set SATA spd of @link according to sata_spd_limit.
1c3fae4d
TH
2975 *
2976 * LOCKING:
2977 * Inherited from caller.
2978 *
2979 * RETURNS:
2980 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2981 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2982 */
936fd732 2983int sata_set_spd(struct ata_link *link)
1c3fae4d
TH
2984{
2985 u32 scontrol;
81952c54 2986 int rc;
1c3fae4d 2987
936fd732 2988 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 2989 return rc;
1c3fae4d 2990
936fd732 2991 if (!__sata_set_spd_needed(link, &scontrol))
1c3fae4d
TH
2992 return 0;
2993
936fd732 2994 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54
TH
2995 return rc;
2996
1c3fae4d
TH
2997 return 1;
2998}
2999
452503f9
AC
3000/*
3001 * This mode timing computation functionality is ported over from
3002 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
3003 */
3004/*
b352e57d 3005 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 3006 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
3007 * for UDMA6, which is currently supported only by Maxtor drives.
3008 *
3009 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
3010 */
3011
3012static const struct ata_timing ata_timing[] = {
70cd071e
TH
3013/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
3014 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
3015 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
3016 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
3017 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
3018 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
3019 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
3020 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
452503f9 3021
70cd071e
TH
3022 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
3023 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
3024 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
452503f9 3025
70cd071e
TH
3026 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
3027 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
3028 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
b352e57d 3029 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
70cd071e 3030 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
452503f9
AC
3031
3032/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
70cd071e
TH
3033 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
3034 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
3035 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
3036 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
3037 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
3038 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
3039 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
452503f9
AC
3040
3041 { 0xFF }
3042};
3043
2dcb407e
JG
3044#define ENOUGH(v, unit) (((v)-1)/(unit)+1)
3045#define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
452503f9
AC
3046
3047static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
3048{
3049 q->setup = EZ(t->setup * 1000, T);
3050 q->act8b = EZ(t->act8b * 1000, T);
3051 q->rec8b = EZ(t->rec8b * 1000, T);
3052 q->cyc8b = EZ(t->cyc8b * 1000, T);
3053 q->active = EZ(t->active * 1000, T);
3054 q->recover = EZ(t->recover * 1000, T);
3055 q->cycle = EZ(t->cycle * 1000, T);
3056 q->udma = EZ(t->udma * 1000, UT);
3057}
3058
3059void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
3060 struct ata_timing *m, unsigned int what)
3061{
3062 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
3063 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
3064 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
3065 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
3066 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
3067 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
3068 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
3069 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
3070}
3071
6357357c 3072const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
452503f9 3073{
70cd071e
TH
3074 const struct ata_timing *t = ata_timing;
3075
3076 while (xfer_mode > t->mode)
3077 t++;
452503f9 3078
70cd071e
TH
3079 if (xfer_mode == t->mode)
3080 return t;
3081 return NULL;
452503f9
AC
3082}
3083
3084int ata_timing_compute(struct ata_device *adev, unsigned short speed,
3085 struct ata_timing *t, int T, int UT)
3086{
3087 const struct ata_timing *s;
3088 struct ata_timing p;
3089
3090 /*
2e9edbf8 3091 * Find the mode.
75b1f2f8 3092 */
452503f9
AC
3093
3094 if (!(s = ata_timing_find_mode(speed)))
3095 return -EINVAL;
3096
75b1f2f8
AL
3097 memcpy(t, s, sizeof(*s));
3098
452503f9
AC
3099 /*
3100 * If the drive is an EIDE drive, it can tell us it needs extended
3101 * PIO/MW_DMA cycle timing.
3102 */
3103
3104 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
3105 memset(&p, 0, sizeof(p));
2dcb407e 3106 if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
452503f9
AC
3107 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
3108 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2dcb407e 3109 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
452503f9
AC
3110 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
3111 }
3112 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
3113 }
3114
3115 /*
3116 * Convert the timing to bus clock counts.
3117 */
3118
75b1f2f8 3119 ata_timing_quantize(t, t, T, UT);
452503f9
AC
3120
3121 /*
c893a3ae
RD
3122 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
3123 * S.M.A.R.T * and some other commands. We have to ensure that the
3124 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
3125 */
3126
fd3367af 3127 if (speed > XFER_PIO_6) {
452503f9
AC
3128 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
3129 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
3130 }
3131
3132 /*
c893a3ae 3133 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
3134 */
3135
3136 if (t->act8b + t->rec8b < t->cyc8b) {
3137 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
3138 t->rec8b = t->cyc8b - t->act8b;
3139 }
3140
3141 if (t->active + t->recover < t->cycle) {
3142 t->active += (t->cycle - (t->active + t->recover)) / 2;
3143 t->recover = t->cycle - t->active;
3144 }
a617c09f 3145
4f701d1e
AC
3146 /* In a few cases quantisation may produce enough errors to
3147 leave t->cycle too low for the sum of active and recovery
3148 if so we must correct this */
3149 if (t->active + t->recover > t->cycle)
3150 t->cycle = t->active + t->recover;
452503f9
AC
3151
3152 return 0;
3153}
3154
a0f79b92
TH
3155/**
3156 * ata_timing_cycle2mode - find xfer mode for the specified cycle duration
3157 * @xfer_shift: ATA_SHIFT_* value for transfer type to examine.
3158 * @cycle: cycle duration in ns
3159 *
3160 * Return matching xfer mode for @cycle. The returned mode is of
3161 * the transfer type specified by @xfer_shift. If @cycle is too
3162 * slow for @xfer_shift, 0xff is returned. If @cycle is faster
3163 * than the fastest known mode, the fasted mode is returned.
3164 *
3165 * LOCKING:
3166 * None.
3167 *
3168 * RETURNS:
3169 * Matching xfer_mode, 0xff if no match found.
3170 */
3171u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle)
3172{
3173 u8 base_mode = 0xff, last_mode = 0xff;
3174 const struct ata_xfer_ent *ent;
3175 const struct ata_timing *t;
3176
3177 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
3178 if (ent->shift == xfer_shift)
3179 base_mode = ent->base;
3180
3181 for (t = ata_timing_find_mode(base_mode);
3182 t && ata_xfer_mode2shift(t->mode) == xfer_shift; t++) {
3183 unsigned short this_cycle;
3184
3185 switch (xfer_shift) {
3186 case ATA_SHIFT_PIO:
3187 case ATA_SHIFT_MWDMA:
3188 this_cycle = t->cycle;
3189 break;
3190 case ATA_SHIFT_UDMA:
3191 this_cycle = t->udma;
3192 break;
3193 default:
3194 return 0xff;
3195 }
3196
3197 if (cycle > this_cycle)
3198 break;
3199
3200 last_mode = t->mode;
3201 }
3202
3203 return last_mode;
3204}
3205
cf176e1a
TH
3206/**
3207 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 3208 * @dev: Device to adjust xfer masks
458337db 3209 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
3210 *
3211 * Adjust xfer masks of @dev downward. Note that this function
3212 * does not apply the change. Invoking ata_set_mode() afterwards
3213 * will apply the limit.
3214 *
3215 * LOCKING:
3216 * Inherited from caller.
3217 *
3218 * RETURNS:
3219 * 0 on success, negative errno on failure
3220 */
458337db 3221int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 3222{
458337db 3223 char buf[32];
7dc951ae
TH
3224 unsigned long orig_mask, xfer_mask;
3225 unsigned long pio_mask, mwdma_mask, udma_mask;
458337db 3226 int quiet, highbit;
cf176e1a 3227
458337db
TH
3228 quiet = !!(sel & ATA_DNXFER_QUIET);
3229 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 3230
458337db
TH
3231 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3232 dev->mwdma_mask,
3233 dev->udma_mask);
3234 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 3235
458337db
TH
3236 switch (sel) {
3237 case ATA_DNXFER_PIO:
3238 highbit = fls(pio_mask) - 1;
3239 pio_mask &= ~(1 << highbit);
3240 break;
3241
3242 case ATA_DNXFER_DMA:
3243 if (udma_mask) {
3244 highbit = fls(udma_mask) - 1;
3245 udma_mask &= ~(1 << highbit);
3246 if (!udma_mask)
3247 return -ENOENT;
3248 } else if (mwdma_mask) {
3249 highbit = fls(mwdma_mask) - 1;
3250 mwdma_mask &= ~(1 << highbit);
3251 if (!mwdma_mask)
3252 return -ENOENT;
3253 }
3254 break;
3255
3256 case ATA_DNXFER_40C:
3257 udma_mask &= ATA_UDMA_MASK_40C;
3258 break;
3259
3260 case ATA_DNXFER_FORCE_PIO0:
3261 pio_mask &= 1;
3262 case ATA_DNXFER_FORCE_PIO:
3263 mwdma_mask = 0;
3264 udma_mask = 0;
3265 break;
3266
458337db
TH
3267 default:
3268 BUG();
3269 }
3270
3271 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3272
3273 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3274 return -ENOENT;
3275
3276 if (!quiet) {
3277 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3278 snprintf(buf, sizeof(buf), "%s:%s",
3279 ata_mode_string(xfer_mask),
3280 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3281 else
3282 snprintf(buf, sizeof(buf), "%s",
3283 ata_mode_string(xfer_mask));
3284
3285 ata_dev_printk(dev, KERN_WARNING,
3286 "limiting speed to %s\n", buf);
3287 }
cf176e1a
TH
3288
3289 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3290 &dev->udma_mask);
3291
cf176e1a 3292 return 0;
cf176e1a
TH
3293}
3294
3373efd8 3295static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 3296{
9af5c9c9 3297 struct ata_eh_context *ehc = &dev->link->eh_context;
4055dee7
TH
3298 const char *dev_err_whine = "";
3299 int ign_dev_err = 0;
83206a29
TH
3300 unsigned int err_mask;
3301 int rc;
1da177e4 3302
e8384607 3303 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
3304 if (dev->xfer_shift == ATA_SHIFT_PIO)
3305 dev->flags |= ATA_DFLAG_PIO;
3306
3373efd8 3307 err_mask = ata_dev_set_xfermode(dev);
2dcb407e 3308
4055dee7
TH
3309 if (err_mask & ~AC_ERR_DEV)
3310 goto fail;
3311
3312 /* revalidate */
3313 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3314 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
3315 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
3316 if (rc)
3317 return rc;
3318
11750a40
AC
3319 /* Old CFA may refuse this command, which is just fine */
3320 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
4055dee7 3321 ign_dev_err = 1;
2dcb407e 3322
0bc2a79a
AC
3323 /* Some very old devices and some bad newer ones fail any kind of
3324 SET_XFERMODE request but support PIO0-2 timings and no IORDY */
3325 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
3326 dev->pio_mode <= XFER_PIO_2)
4055dee7 3327 ign_dev_err = 1;
2dcb407e 3328
3acaf94b
AC
3329 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3330 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
c5038fc0 3331 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3acaf94b
AC
3332 dev->dma_mode == XFER_MW_DMA_0 &&
3333 (dev->id[63] >> 8) & 1)
4055dee7 3334 ign_dev_err = 1;
3acaf94b 3335
4055dee7
TH
3336 /* if the device is actually configured correctly, ignore dev err */
3337 if (dev->xfer_mode == ata_xfer_mask2mode(ata_id_xfermask(dev->id)))
3338 ign_dev_err = 1;
1da177e4 3339
4055dee7
TH
3340 if (err_mask & AC_ERR_DEV) {
3341 if (!ign_dev_err)
3342 goto fail;
3343 else
3344 dev_err_whine = " (device error ignored)";
3345 }
48a8a14f 3346
23e71c3d
TH
3347 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3348 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 3349
4055dee7
TH
3350 ata_dev_printk(dev, KERN_INFO, "configured for %s%s\n",
3351 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)),
3352 dev_err_whine);
3353
83206a29 3354 return 0;
4055dee7
TH
3355
3356 fail:
3357 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3358 "(err_mask=0x%x)\n", err_mask);
3359 return -EIO;
1da177e4
LT
3360}
3361
1da177e4 3362/**
04351821 3363 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 3364 * @link: link on which timings will be programmed
1967b7ff 3365 * @r_failed_dev: out parameter for failed device
1da177e4 3366 *
04351821
AC
3367 * Standard implementation of the function used to tune and set
3368 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3369 * ata_dev_set_mode() fails, pointer to the failing device is
e82cbdb9 3370 * returned in @r_failed_dev.
780a87f7 3371 *
1da177e4 3372 * LOCKING:
0cba632b 3373 * PCI/etc. bus probe sem.
e82cbdb9
TH
3374 *
3375 * RETURNS:
3376 * 0 on success, negative errno otherwise
1da177e4 3377 */
04351821 3378
0260731f 3379int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
1da177e4 3380{
0260731f 3381 struct ata_port *ap = link->ap;
e8e0619f 3382 struct ata_device *dev;
f58229f8 3383 int rc = 0, used_dma = 0, found = 0;
3adcebb2 3384
a6d5a51c 3385 /* step 1: calculate xfer_mask */
f58229f8 3386 ata_link_for_each_dev(dev, link) {
7dc951ae 3387 unsigned long pio_mask, dma_mask;
b3a70601 3388 unsigned int mode_mask;
a6d5a51c 3389
e1211e3f 3390 if (!ata_dev_enabled(dev))
a6d5a51c
TH
3391 continue;
3392
b3a70601
AC
3393 mode_mask = ATA_DMA_MASK_ATA;
3394 if (dev->class == ATA_DEV_ATAPI)
3395 mode_mask = ATA_DMA_MASK_ATAPI;
3396 else if (ata_id_is_cfa(dev->id))
3397 mode_mask = ATA_DMA_MASK_CFA;
3398
3373efd8 3399 ata_dev_xfermask(dev);
33267325 3400 ata_force_xfermask(dev);
1da177e4 3401
acf356b1
TH
3402 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3403 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
b3a70601
AC
3404
3405 if (libata_dma_mask & mode_mask)
3406 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3407 else
3408 dma_mask = 0;
3409
acf356b1
TH
3410 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3411 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 3412
4f65977d 3413 found = 1;
70cd071e 3414 if (dev->dma_mode != 0xff)
5444a6f4 3415 used_dma = 1;
a6d5a51c 3416 }
4f65977d 3417 if (!found)
e82cbdb9 3418 goto out;
a6d5a51c
TH
3419
3420 /* step 2: always set host PIO timings */
f58229f8 3421 ata_link_for_each_dev(dev, link) {
e8e0619f
TH
3422 if (!ata_dev_enabled(dev))
3423 continue;
3424
70cd071e 3425 if (dev->pio_mode == 0xff) {
f15a1daf 3426 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 3427 rc = -EINVAL;
e82cbdb9 3428 goto out;
e8e0619f
TH
3429 }
3430
3431 dev->xfer_mode = dev->pio_mode;
3432 dev->xfer_shift = ATA_SHIFT_PIO;
3433 if (ap->ops->set_piomode)
3434 ap->ops->set_piomode(ap, dev);
3435 }
1da177e4 3436
a6d5a51c 3437 /* step 3: set host DMA timings */
f58229f8 3438 ata_link_for_each_dev(dev, link) {
70cd071e 3439 if (!ata_dev_enabled(dev) || dev->dma_mode == 0xff)
e8e0619f
TH
3440 continue;
3441
3442 dev->xfer_mode = dev->dma_mode;
3443 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3444 if (ap->ops->set_dmamode)
3445 ap->ops->set_dmamode(ap, dev);
3446 }
1da177e4
LT
3447
3448 /* step 4: update devices' xfer mode */
f58229f8 3449 ata_link_for_each_dev(dev, link) {
18d90deb 3450 /* don't update suspended devices' xfer mode */
9666f400 3451 if (!ata_dev_enabled(dev))
83206a29
TH
3452 continue;
3453
3373efd8 3454 rc = ata_dev_set_mode(dev);
5bbc53f4 3455 if (rc)
e82cbdb9 3456 goto out;
83206a29 3457 }
1da177e4 3458
e8e0619f
TH
3459 /* Record simplex status. If we selected DMA then the other
3460 * host channels are not permitted to do so.
5444a6f4 3461 */
cca3974e 3462 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 3463 ap->host->simplex_claimed = ap;
5444a6f4 3464
e82cbdb9
TH
3465 out:
3466 if (rc)
3467 *r_failed_dev = dev;
3468 return rc;
1da177e4
LT
3469}
3470
1fdffbce
JG
3471/**
3472 * ata_tf_to_host - issue ATA taskfile to host controller
3473 * @ap: port to which command is being issued
3474 * @tf: ATA taskfile register set
3475 *
3476 * Issues ATA taskfile register set to ATA host controller,
3477 * with proper synchronization with interrupt handler and
3478 * other threads.
3479 *
3480 * LOCKING:
cca3974e 3481 * spin_lock_irqsave(host lock)
1fdffbce
JG
3482 */
3483
3484static inline void ata_tf_to_host(struct ata_port *ap,
3485 const struct ata_taskfile *tf)
3486{
3487 ap->ops->tf_load(ap, tf);
3488 ap->ops->exec_command(ap, tf);
3489}
3490
1da177e4
LT
3491/**
3492 * ata_busy_sleep - sleep until BSY clears, or timeout
3493 * @ap: port containing status register to be polled
3494 * @tmout_pat: impatience timeout
3495 * @tmout: overall timeout
3496 *
780a87f7
JG
3497 * Sleep until ATA Status register bit BSY clears,
3498 * or a timeout occurs.
3499 *
d1adc1bb
TH
3500 * LOCKING:
3501 * Kernel thread context (may sleep).
3502 *
3503 * RETURNS:
3504 * 0 on success, -errno otherwise.
1da177e4 3505 */
d1adc1bb
TH
3506int ata_busy_sleep(struct ata_port *ap,
3507 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
3508{
3509 unsigned long timer_start, timeout;
3510 u8 status;
3511
3512 status = ata_busy_wait(ap, ATA_BUSY, 300);
3513 timer_start = jiffies;
3514 timeout = timer_start + tmout_pat;
d1adc1bb
TH
3515 while (status != 0xff && (status & ATA_BUSY) &&
3516 time_before(jiffies, timeout)) {
1da177e4
LT
3517 msleep(50);
3518 status = ata_busy_wait(ap, ATA_BUSY, 3);
3519 }
3520
d1adc1bb 3521 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 3522 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
3523 "port is slow to respond, please be patient "
3524 "(Status 0x%x)\n", status);
1da177e4
LT
3525
3526 timeout = timer_start + tmout;
d1adc1bb
TH
3527 while (status != 0xff && (status & ATA_BUSY) &&
3528 time_before(jiffies, timeout)) {
1da177e4
LT
3529 msleep(50);
3530 status = ata_chk_status(ap);
3531 }
3532
d1adc1bb
TH
3533 if (status == 0xff)
3534 return -ENODEV;
3535
1da177e4 3536 if (status & ATA_BUSY) {
f15a1daf 3537 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
3538 "(%lu secs, Status 0x%x)\n",
3539 tmout / HZ, status);
d1adc1bb 3540 return -EBUSY;
1da177e4
LT
3541 }
3542
3543 return 0;
3544}
3545
88ff6eaf
TH
3546/**
3547 * ata_wait_after_reset - wait before checking status after reset
3548 * @ap: port containing status register to be polled
3549 * @deadline: deadline jiffies for the operation
3550 *
3551 * After reset, we need to pause a while before reading status.
3552 * Also, certain combination of controller and device report 0xff
3553 * for some duration (e.g. until SATA PHY is up and running)
3554 * which is interpreted as empty port in ATA world. This
3555 * function also waits for such devices to get out of 0xff
3556 * status.
3557 *
3558 * LOCKING:
3559 * Kernel thread context (may sleep).
3560 */
3561void ata_wait_after_reset(struct ata_port *ap, unsigned long deadline)
3562{
3563 unsigned long until = jiffies + ATA_TMOUT_FF_WAIT;
3564
3565 if (time_before(until, deadline))
3566 deadline = until;
3567
3568 /* Spec mandates ">= 2ms" before checking status. We wait
3569 * 150ms, because that was the magic delay used for ATAPI
3570 * devices in Hale Landis's ATADRVR, for the period of time
3571 * between when the ATA command register is written, and then
3572 * status is checked. Because waiting for "a while" before
3573 * checking status is fine, post SRST, we perform this magic
3574 * delay here as well.
3575 *
3576 * Old drivers/ide uses the 2mS rule and then waits for ready.
3577 */
3578 msleep(150);
3579
3580 /* Wait for 0xff to clear. Some SATA devices take a long time
3581 * to clear 0xff after reset. For example, HHD424020F7SV00
3582 * iVDR needs >= 800ms while. Quantum GoVault needs even more
3583 * than that.
1974e201
TH
3584 *
3585 * Note that some PATA controllers (pata_ali) explode if
3586 * status register is read more than once when there's no
3587 * device attached.
88ff6eaf 3588 */
1974e201
TH
3589 if (ap->flags & ATA_FLAG_SATA) {
3590 while (1) {
3591 u8 status = ata_chk_status(ap);
88ff6eaf 3592
1974e201
TH
3593 if (status != 0xff || time_after(jiffies, deadline))
3594 return;
88ff6eaf 3595
1974e201
TH
3596 msleep(50);
3597 }
88ff6eaf
TH
3598 }
3599}
3600
d4b2bab4
TH
3601/**
3602 * ata_wait_ready - sleep until BSY clears, or timeout
3603 * @ap: port containing status register to be polled
3604 * @deadline: deadline jiffies for the operation
3605 *
3606 * Sleep until ATA Status register bit BSY clears, or timeout
3607 * occurs.
3608 *
3609 * LOCKING:
3610 * Kernel thread context (may sleep).
3611 *
3612 * RETURNS:
3613 * 0 on success, -errno otherwise.
3614 */
3615int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3616{
3617 unsigned long start = jiffies;
3618 int warned = 0;
3619
3620 while (1) {
3621 u8 status = ata_chk_status(ap);
3622 unsigned long now = jiffies;
3623
3624 if (!(status & ATA_BUSY))
3625 return 0;
936fd732 3626 if (!ata_link_online(&ap->link) && status == 0xff)
d4b2bab4
TH
3627 return -ENODEV;
3628 if (time_after(now, deadline))
3629 return -EBUSY;
3630
3631 if (!warned && time_after(now, start + 5 * HZ) &&
3632 (deadline - now > 3 * HZ)) {
3633 ata_port_printk(ap, KERN_WARNING,
3634 "port is slow to respond, please be patient "
3635 "(Status 0x%x)\n", status);
3636 warned = 1;
3637 }
3638
3639 msleep(50);
3640 }
3641}
3642
3643static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3644 unsigned long deadline)
1da177e4
LT
3645{
3646 struct ata_ioports *ioaddr = &ap->ioaddr;
3647 unsigned int dev0 = devmask & (1 << 0);
3648 unsigned int dev1 = devmask & (1 << 1);
9b89391c 3649 int rc, ret = 0;
1da177e4
LT
3650
3651 /* if device 0 was found in ata_devchk, wait for its
3652 * BSY bit to clear
3653 */
d4b2bab4
TH
3654 if (dev0) {
3655 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3656 if (rc) {
3657 if (rc != -ENODEV)
3658 return rc;
3659 ret = rc;
3660 }
d4b2bab4 3661 }
1da177e4 3662
e141d999
TH
3663 /* if device 1 was found in ata_devchk, wait for register
3664 * access briefly, then wait for BSY to clear.
1da177e4 3665 */
e141d999
TH
3666 if (dev1) {
3667 int i;
1da177e4
LT
3668
3669 ap->ops->dev_select(ap, 1);
e141d999
TH
3670
3671 /* Wait for register access. Some ATAPI devices fail
3672 * to set nsect/lbal after reset, so don't waste too
3673 * much time on it. We're gonna wait for !BSY anyway.
3674 */
3675 for (i = 0; i < 2; i++) {
3676 u8 nsect, lbal;
3677
3678 nsect = ioread8(ioaddr->nsect_addr);
3679 lbal = ioread8(ioaddr->lbal_addr);
3680 if ((nsect == 1) && (lbal == 1))
3681 break;
3682 msleep(50); /* give drive a breather */
3683 }
3684
d4b2bab4 3685 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3686 if (rc) {
3687 if (rc != -ENODEV)
3688 return rc;
3689 ret = rc;
3690 }
d4b2bab4 3691 }
1da177e4
LT
3692
3693 /* is all this really necessary? */
3694 ap->ops->dev_select(ap, 0);
3695 if (dev1)
3696 ap->ops->dev_select(ap, 1);
3697 if (dev0)
3698 ap->ops->dev_select(ap, 0);
d4b2bab4 3699
9b89391c 3700 return ret;
1da177e4
LT
3701}
3702
d4b2bab4
TH
3703static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3704 unsigned long deadline)
1da177e4
LT
3705{
3706 struct ata_ioports *ioaddr = &ap->ioaddr;
3707
44877b4e 3708 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
1da177e4
LT
3709
3710 /* software reset. causes dev0 to be selected */
0d5ff566
TH
3711 iowrite8(ap->ctl, ioaddr->ctl_addr);
3712 udelay(20); /* FIXME: flush */
3713 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3714 udelay(20); /* FIXME: flush */
3715 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4 3716
88ff6eaf
TH
3717 /* wait a while before checking status */
3718 ata_wait_after_reset(ap, deadline);
1da177e4 3719
2e9edbf8 3720 /* Before we perform post reset processing we want to see if
298a41ca
TH
3721 * the bus shows 0xFF because the odd clown forgets the D7
3722 * pulldown resistor.
3723 */
150981b0 3724 if (ata_chk_status(ap) == 0xFF)
9b89391c 3725 return -ENODEV;
09c7ad79 3726
d4b2bab4 3727 return ata_bus_post_reset(ap, devmask, deadline);
1da177e4
LT
3728}
3729
3730/**
3731 * ata_bus_reset - reset host port and associated ATA channel
3732 * @ap: port to reset
3733 *
3734 * This is typically the first time we actually start issuing
3735 * commands to the ATA channel. We wait for BSY to clear, then
3736 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3737 * result. Determine what devices, if any, are on the channel
3738 * by looking at the device 0/1 error register. Look at the signature
3739 * stored in each device's taskfile registers, to determine if
3740 * the device is ATA or ATAPI.
3741 *
3742 * LOCKING:
0cba632b 3743 * PCI/etc. bus probe sem.
cca3974e 3744 * Obtains host lock.
1da177e4
LT
3745 *
3746 * SIDE EFFECTS:
198e0fed 3747 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
3748 */
3749
3750void ata_bus_reset(struct ata_port *ap)
3751{
9af5c9c9 3752 struct ata_device *device = ap->link.device;
1da177e4
LT
3753 struct ata_ioports *ioaddr = &ap->ioaddr;
3754 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3755 u8 err;
aec5c3c1 3756 unsigned int dev0, dev1 = 0, devmask = 0;
9b89391c 3757 int rc;
1da177e4 3758
44877b4e 3759 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
1da177e4
LT
3760
3761 /* determine if device 0/1 are present */
3762 if (ap->flags & ATA_FLAG_SATA_RESET)
3763 dev0 = 1;
3764 else {
3765 dev0 = ata_devchk(ap, 0);
3766 if (slave_possible)
3767 dev1 = ata_devchk(ap, 1);
3768 }
3769
3770 if (dev0)
3771 devmask |= (1 << 0);
3772 if (dev1)
3773 devmask |= (1 << 1);
3774
3775 /* select device 0 again */
3776 ap->ops->dev_select(ap, 0);
3777
3778 /* issue bus reset */
9b89391c
TH
3779 if (ap->flags & ATA_FLAG_SRST) {
3780 rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
3781 if (rc && rc != -ENODEV)
aec5c3c1 3782 goto err_out;
9b89391c 3783 }
1da177e4
LT
3784
3785 /*
3786 * determine by signature whether we have ATA or ATAPI devices
3787 */
3f19859e 3788 device[0].class = ata_dev_try_classify(&device[0], dev0, &err);
1da177e4 3789 if ((slave_possible) && (err != 0x81))
3f19859e 3790 device[1].class = ata_dev_try_classify(&device[1], dev1, &err);
1da177e4 3791
1da177e4 3792 /* is double-select really necessary? */
9af5c9c9 3793 if (device[1].class != ATA_DEV_NONE)
1da177e4 3794 ap->ops->dev_select(ap, 1);
9af5c9c9 3795 if (device[0].class != ATA_DEV_NONE)
1da177e4
LT
3796 ap->ops->dev_select(ap, 0);
3797
3798 /* if no devices were detected, disable this port */
9af5c9c9
TH
3799 if ((device[0].class == ATA_DEV_NONE) &&
3800 (device[1].class == ATA_DEV_NONE))
1da177e4
LT
3801 goto err_out;
3802
3803 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3804 /* set up device control for ATA_FLAG_SATA_RESET */
0d5ff566 3805 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3806 }
3807
3808 DPRINTK("EXIT\n");
3809 return;
3810
3811err_out:
f15a1daf 3812 ata_port_printk(ap, KERN_ERR, "disabling port\n");
ac8869d5 3813 ata_port_disable(ap);
1da177e4
LT
3814
3815 DPRINTK("EXIT\n");
3816}
3817
d7bb4cc7 3818/**
936fd732
TH
3819 * sata_link_debounce - debounce SATA phy status
3820 * @link: ATA link to debounce SATA phy status for
d7bb4cc7 3821 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3822 * @deadline: deadline jiffies for the operation
d7bb4cc7 3823 *
936fd732 3824* Make sure SStatus of @link reaches stable state, determined by
d7bb4cc7
TH
3825 * holding the same value where DET is not 1 for @duration polled
3826 * every @interval, before @timeout. Timeout constraints the
d4b2bab4
TH
3827 * beginning of the stable state. Because DET gets stuck at 1 on
3828 * some controllers after hot unplugging, this functions waits
d7bb4cc7
TH
3829 * until timeout then returns 0 if DET is stable at 1.
3830 *
d4b2bab4
TH
3831 * @timeout is further limited by @deadline. The sooner of the
3832 * two is used.
3833 *
d7bb4cc7
TH
3834 * LOCKING:
3835 * Kernel thread context (may sleep)
3836 *
3837 * RETURNS:
3838 * 0 on success, -errno on failure.
3839 */
936fd732
TH
3840int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3841 unsigned long deadline)
7a7921e8 3842{
d7bb4cc7 3843 unsigned long interval_msec = params[0];
d4b2bab4
TH
3844 unsigned long duration = msecs_to_jiffies(params[1]);
3845 unsigned long last_jiffies, t;
d7bb4cc7
TH
3846 u32 last, cur;
3847 int rc;
3848
d4b2bab4
TH
3849 t = jiffies + msecs_to_jiffies(params[2]);
3850 if (time_before(t, deadline))
3851 deadline = t;
3852
936fd732 3853 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3854 return rc;
3855 cur &= 0xf;
3856
3857 last = cur;
3858 last_jiffies = jiffies;
3859
3860 while (1) {
3861 msleep(interval_msec);
936fd732 3862 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3863 return rc;
3864 cur &= 0xf;
3865
3866 /* DET stable? */
3867 if (cur == last) {
d4b2bab4 3868 if (cur == 1 && time_before(jiffies, deadline))
d7bb4cc7
TH
3869 continue;
3870 if (time_after(jiffies, last_jiffies + duration))
3871 return 0;
3872 continue;
3873 }
3874
3875 /* unstable, start over */
3876 last = cur;
3877 last_jiffies = jiffies;
3878
f1545154
TH
3879 /* Check deadline. If debouncing failed, return
3880 * -EPIPE to tell upper layer to lower link speed.
3881 */
d4b2bab4 3882 if (time_after(jiffies, deadline))
f1545154 3883 return -EPIPE;
d7bb4cc7
TH
3884 }
3885}
3886
3887/**
936fd732
TH
3888 * sata_link_resume - resume SATA link
3889 * @link: ATA link to resume SATA
d7bb4cc7 3890 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3891 * @deadline: deadline jiffies for the operation
d7bb4cc7 3892 *
936fd732 3893 * Resume SATA phy @link and debounce it.
d7bb4cc7
TH
3894 *
3895 * LOCKING:
3896 * Kernel thread context (may sleep)
3897 *
3898 * RETURNS:
3899 * 0 on success, -errno on failure.
3900 */
936fd732
TH
3901int sata_link_resume(struct ata_link *link, const unsigned long *params,
3902 unsigned long deadline)
d7bb4cc7
TH
3903{
3904 u32 scontrol;
81952c54
TH
3905 int rc;
3906
936fd732 3907 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 3908 return rc;
7a7921e8 3909
852ee16a 3910 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54 3911
936fd732 3912 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54 3913 return rc;
7a7921e8 3914
d7bb4cc7
TH
3915 /* Some PHYs react badly if SStatus is pounded immediately
3916 * after resuming. Delay 200ms before debouncing.
3917 */
3918 msleep(200);
7a7921e8 3919
936fd732 3920 return sata_link_debounce(link, params, deadline);
7a7921e8
TH
3921}
3922
f5914a46
TH
3923/**
3924 * ata_std_prereset - prepare for reset
cc0680a5 3925 * @link: ATA link to be reset
d4b2bab4 3926 * @deadline: deadline jiffies for the operation
f5914a46 3927 *
cc0680a5 3928 * @link is about to be reset. Initialize it. Failure from
b8cffc6a
TH
3929 * prereset makes libata abort whole reset sequence and give up
3930 * that port, so prereset should be best-effort. It does its
3931 * best to prepare for reset sequence but if things go wrong, it
3932 * should just whine, not fail.
f5914a46
TH
3933 *
3934 * LOCKING:
3935 * Kernel thread context (may sleep)
3936 *
3937 * RETURNS:
3938 * 0 on success, -errno otherwise.
3939 */
cc0680a5 3940int ata_std_prereset(struct ata_link *link, unsigned long deadline)
f5914a46 3941{
cc0680a5 3942 struct ata_port *ap = link->ap;
936fd732 3943 struct ata_eh_context *ehc = &link->eh_context;
e9c83914 3944 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
3945 int rc;
3946
31daabda 3947 /* handle link resume */
28324304 3948 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
0c88758b 3949 (link->flags & ATA_LFLAG_HRST_TO_RESUME))
28324304
TH
3950 ehc->i.action |= ATA_EH_HARDRESET;
3951
633273a3
TH
3952 /* Some PMPs don't work with only SRST, force hardreset if PMP
3953 * is supported.
3954 */
3955 if (ap->flags & ATA_FLAG_PMP)
3956 ehc->i.action |= ATA_EH_HARDRESET;
3957
f5914a46
TH
3958 /* if we're about to do hardreset, nothing more to do */
3959 if (ehc->i.action & ATA_EH_HARDRESET)
3960 return 0;
3961
936fd732 3962 /* if SATA, resume link */
a16abc0b 3963 if (ap->flags & ATA_FLAG_SATA) {
936fd732 3964 rc = sata_link_resume(link, timing, deadline);
b8cffc6a
TH
3965 /* whine about phy resume failure but proceed */
3966 if (rc && rc != -EOPNOTSUPP)
cc0680a5 3967 ata_link_printk(link, KERN_WARNING, "failed to resume "
f5914a46 3968 "link for reset (errno=%d)\n", rc);
f5914a46
TH
3969 }
3970
3971 /* Wait for !BSY if the controller can wait for the first D2H
3972 * Reg FIS and we don't know that no device is attached.
3973 */
0c88758b 3974 if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) {
b8cffc6a 3975 rc = ata_wait_ready(ap, deadline);
6dffaf61 3976 if (rc && rc != -ENODEV) {
cc0680a5 3977 ata_link_printk(link, KERN_WARNING, "device not ready "
b8cffc6a
TH
3978 "(errno=%d), forcing hardreset\n", rc);
3979 ehc->i.action |= ATA_EH_HARDRESET;
3980 }
3981 }
f5914a46
TH
3982
3983 return 0;
3984}
3985
c2bd5804
TH
3986/**
3987 * ata_std_softreset - reset host port via ATA SRST
cc0680a5 3988 * @link: ATA link to reset
c2bd5804 3989 * @classes: resulting classes of attached devices
d4b2bab4 3990 * @deadline: deadline jiffies for the operation
c2bd5804 3991 *
52783c5d 3992 * Reset host port using ATA SRST.
c2bd5804
TH
3993 *
3994 * LOCKING:
3995 * Kernel thread context (may sleep)
3996 *
3997 * RETURNS:
3998 * 0 on success, -errno otherwise.
3999 */
cc0680a5 4000int ata_std_softreset(struct ata_link *link, unsigned int *classes,
d4b2bab4 4001 unsigned long deadline)
c2bd5804 4002{
cc0680a5 4003 struct ata_port *ap = link->ap;
c2bd5804 4004 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
d4b2bab4
TH
4005 unsigned int devmask = 0;
4006 int rc;
c2bd5804
TH
4007 u8 err;
4008
4009 DPRINTK("ENTER\n");
4010
936fd732 4011 if (ata_link_offline(link)) {
3a39746a
TH
4012 classes[0] = ATA_DEV_NONE;
4013 goto out;
4014 }
4015
c2bd5804
TH
4016 /* determine if device 0/1 are present */
4017 if (ata_devchk(ap, 0))
4018 devmask |= (1 << 0);
4019 if (slave_possible && ata_devchk(ap, 1))
4020 devmask |= (1 << 1);
4021
c2bd5804
TH
4022 /* select device 0 again */
4023 ap->ops->dev_select(ap, 0);
4024
4025 /* issue bus reset */
4026 DPRINTK("about to softreset, devmask=%x\n", devmask);
d4b2bab4 4027 rc = ata_bus_softreset(ap, devmask, deadline);
9b89391c 4028 /* if link is occupied, -ENODEV too is an error */
936fd732 4029 if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
cc0680a5 4030 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
d4b2bab4 4031 return rc;
c2bd5804
TH
4032 }
4033
4034 /* determine by signature whether we have ATA or ATAPI devices */
3f19859e
TH
4035 classes[0] = ata_dev_try_classify(&link->device[0],
4036 devmask & (1 << 0), &err);
c2bd5804 4037 if (slave_possible && err != 0x81)
3f19859e
TH
4038 classes[1] = ata_dev_try_classify(&link->device[1],
4039 devmask & (1 << 1), &err);
c2bd5804 4040
3a39746a 4041 out:
c2bd5804
TH
4042 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
4043 return 0;
4044}
4045
4046/**
cc0680a5
TH
4047 * sata_link_hardreset - reset link via SATA phy reset
4048 * @link: link to reset
b6103f6d 4049 * @timing: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 4050 * @deadline: deadline jiffies for the operation
c2bd5804 4051 *
cc0680a5 4052 * SATA phy-reset @link using DET bits of SControl register.
c2bd5804
TH
4053 *
4054 * LOCKING:
4055 * Kernel thread context (may sleep)
4056 *
4057 * RETURNS:
4058 * 0 on success, -errno otherwise.
4059 */
cc0680a5 4060int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
d4b2bab4 4061 unsigned long deadline)
c2bd5804 4062{
852ee16a 4063 u32 scontrol;
81952c54 4064 int rc;
852ee16a 4065
c2bd5804
TH
4066 DPRINTK("ENTER\n");
4067
936fd732 4068 if (sata_set_spd_needed(link)) {
1c3fae4d
TH
4069 /* SATA spec says nothing about how to reconfigure
4070 * spd. To be on the safe side, turn off phy during
4071 * reconfiguration. This works for at least ICH7 AHCI
4072 * and Sil3124.
4073 */
936fd732 4074 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 4075 goto out;
81952c54 4076
a34b6fc0 4077 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54 4078
936fd732 4079 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
b6103f6d 4080 goto out;
1c3fae4d 4081
936fd732 4082 sata_set_spd(link);
1c3fae4d
TH
4083 }
4084
4085 /* issue phy wake/reset */
936fd732 4086 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 4087 goto out;
81952c54 4088
852ee16a 4089 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54 4090
936fd732 4091 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
b6103f6d 4092 goto out;
c2bd5804 4093
1c3fae4d 4094 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
4095 * 10.4.2 says at least 1 ms.
4096 */
4097 msleep(1);
4098
936fd732
TH
4099 /* bring link back */
4100 rc = sata_link_resume(link, timing, deadline);
b6103f6d
TH
4101 out:
4102 DPRINTK("EXIT, rc=%d\n", rc);
4103 return rc;
4104}
4105
4106/**
4107 * sata_std_hardreset - reset host port via SATA phy reset
cc0680a5 4108 * @link: link to reset
b6103f6d 4109 * @class: resulting class of attached device
d4b2bab4 4110 * @deadline: deadline jiffies for the operation
b6103f6d
TH
4111 *
4112 * SATA phy-reset host port using DET bits of SControl register,
4113 * wait for !BSY and classify the attached device.
4114 *
4115 * LOCKING:
4116 * Kernel thread context (may sleep)
4117 *
4118 * RETURNS:
4119 * 0 on success, -errno otherwise.
4120 */
cc0680a5 4121int sata_std_hardreset(struct ata_link *link, unsigned int *class,
d4b2bab4 4122 unsigned long deadline)
b6103f6d 4123{
cc0680a5 4124 struct ata_port *ap = link->ap;
936fd732 4125 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
b6103f6d
TH
4126 int rc;
4127
4128 DPRINTK("ENTER\n");
4129
4130 /* do hardreset */
cc0680a5 4131 rc = sata_link_hardreset(link, timing, deadline);
b6103f6d 4132 if (rc) {
cc0680a5 4133 ata_link_printk(link, KERN_ERR,
b6103f6d
TH
4134 "COMRESET failed (errno=%d)\n", rc);
4135 return rc;
4136 }
c2bd5804 4137
c2bd5804 4138 /* TODO: phy layer with polling, timeouts, etc. */
936fd732 4139 if (ata_link_offline(link)) {
c2bd5804
TH
4140 *class = ATA_DEV_NONE;
4141 DPRINTK("EXIT, link offline\n");
4142 return 0;
4143 }
4144
88ff6eaf
TH
4145 /* wait a while before checking status */
4146 ata_wait_after_reset(ap, deadline);
34fee227 4147
633273a3
TH
4148 /* If PMP is supported, we have to do follow-up SRST. Note
4149 * that some PMPs don't send D2H Reg FIS after hardreset at
4150 * all if the first port is empty. Wait for it just for a
4151 * second and request follow-up SRST.
4152 */
4153 if (ap->flags & ATA_FLAG_PMP) {
4154 ata_wait_ready(ap, jiffies + HZ);
4155 return -EAGAIN;
4156 }
4157
d4b2bab4 4158 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
4159 /* link occupied, -ENODEV too is an error */
4160 if (rc) {
cc0680a5 4161 ata_link_printk(link, KERN_ERR,
d4b2bab4
TH
4162 "COMRESET failed (errno=%d)\n", rc);
4163 return rc;
c2bd5804
TH
4164 }
4165
3a39746a
TH
4166 ap->ops->dev_select(ap, 0); /* probably unnecessary */
4167
3f19859e 4168 *class = ata_dev_try_classify(link->device, 1, NULL);
c2bd5804
TH
4169
4170 DPRINTK("EXIT, class=%u\n", *class);
4171 return 0;
4172}
4173
4174/**
4175 * ata_std_postreset - standard postreset callback
cc0680a5 4176 * @link: the target ata_link
c2bd5804
TH
4177 * @classes: classes of attached devices
4178 *
4179 * This function is invoked after a successful reset. Note that
4180 * the device might have been reset more than once using
4181 * different reset methods before postreset is invoked.
c2bd5804 4182 *
c2bd5804
TH
4183 * LOCKING:
4184 * Kernel thread context (may sleep)
4185 */
cc0680a5 4186void ata_std_postreset(struct ata_link *link, unsigned int *classes)
c2bd5804 4187{
cc0680a5 4188 struct ata_port *ap = link->ap;
dc2b3515
TH
4189 u32 serror;
4190
c2bd5804
TH
4191 DPRINTK("ENTER\n");
4192
c2bd5804 4193 /* print link status */
936fd732 4194 sata_print_link_status(link);
c2bd5804 4195
dc2b3515 4196 /* clear SError */
936fd732
TH
4197 if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
4198 sata_scr_write(link, SCR_ERROR, serror);
f7fe7ad4 4199 link->eh_info.serror = 0;
dc2b3515 4200
c2bd5804
TH
4201 /* is double-select really necessary? */
4202 if (classes[0] != ATA_DEV_NONE)
4203 ap->ops->dev_select(ap, 1);
4204 if (classes[1] != ATA_DEV_NONE)
4205 ap->ops->dev_select(ap, 0);
4206
3a39746a
TH
4207 /* bail out if no device is present */
4208 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
4209 DPRINTK("EXIT, no device\n");
4210 return;
4211 }
4212
4213 /* set up device control */
0d5ff566
TH
4214 if (ap->ioaddr.ctl_addr)
4215 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
c2bd5804
TH
4216
4217 DPRINTK("EXIT\n");
4218}
4219
623a3128
TH
4220/**
4221 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
4222 * @dev: device to compare against
4223 * @new_class: class of the new device
4224 * @new_id: IDENTIFY page of the new device
4225 *
4226 * Compare @new_class and @new_id against @dev and determine
4227 * whether @dev is the device indicated by @new_class and
4228 * @new_id.
4229 *
4230 * LOCKING:
4231 * None.
4232 *
4233 * RETURNS:
4234 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
4235 */
3373efd8
TH
4236static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
4237 const u16 *new_id)
623a3128
TH
4238{
4239 const u16 *old_id = dev->id;
a0cf733b
TH
4240 unsigned char model[2][ATA_ID_PROD_LEN + 1];
4241 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
4242
4243 if (dev->class != new_class) {
f15a1daf
TH
4244 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
4245 dev->class, new_class);
623a3128
TH
4246 return 0;
4247 }
4248
a0cf733b
TH
4249 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
4250 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
4251 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
4252 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
4253
4254 if (strcmp(model[0], model[1])) {
f15a1daf
TH
4255 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
4256 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
4257 return 0;
4258 }
4259
4260 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
4261 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
4262 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
4263 return 0;
4264 }
4265
623a3128
TH
4266 return 1;
4267}
4268
4269/**
fe30911b 4270 * ata_dev_reread_id - Re-read IDENTIFY data
3fae450c 4271 * @dev: target ATA device
bff04647 4272 * @readid_flags: read ID flags
623a3128
TH
4273 *
4274 * Re-read IDENTIFY page and make sure @dev is still attached to
4275 * the port.
4276 *
4277 * LOCKING:
4278 * Kernel thread context (may sleep)
4279 *
4280 * RETURNS:
4281 * 0 on success, negative errno otherwise
4282 */
fe30911b 4283int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
623a3128 4284{
5eb45c02 4285 unsigned int class = dev->class;
9af5c9c9 4286 u16 *id = (void *)dev->link->ap->sector_buf;
623a3128
TH
4287 int rc;
4288
fe635c7e 4289 /* read ID data */
bff04647 4290 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128 4291 if (rc)
fe30911b 4292 return rc;
623a3128
TH
4293
4294 /* is the device still there? */
fe30911b
TH
4295 if (!ata_dev_same_device(dev, class, id))
4296 return -ENODEV;
623a3128 4297
fe635c7e 4298 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
fe30911b
TH
4299 return 0;
4300}
4301
4302/**
4303 * ata_dev_revalidate - Revalidate ATA device
4304 * @dev: device to revalidate
422c9daa 4305 * @new_class: new class code
fe30911b
TH
4306 * @readid_flags: read ID flags
4307 *
4308 * Re-read IDENTIFY page, make sure @dev is still attached to the
4309 * port and reconfigure it according to the new IDENTIFY page.
4310 *
4311 * LOCKING:
4312 * Kernel thread context (may sleep)
4313 *
4314 * RETURNS:
4315 * 0 on success, negative errno otherwise
4316 */
422c9daa
TH
4317int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4318 unsigned int readid_flags)
fe30911b 4319{
6ddcd3b0 4320 u64 n_sectors = dev->n_sectors;
fe30911b
TH
4321 int rc;
4322
4323 if (!ata_dev_enabled(dev))
4324 return -ENODEV;
4325
422c9daa
TH
4326 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4327 if (ata_class_enabled(new_class) &&
4328 new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
4329 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
4330 dev->class, new_class);
4331 rc = -ENODEV;
4332 goto fail;
4333 }
4334
fe30911b
TH
4335 /* re-read ID */
4336 rc = ata_dev_reread_id(dev, readid_flags);
4337 if (rc)
4338 goto fail;
623a3128
TH
4339
4340 /* configure device according to the new ID */
efdaedc4 4341 rc = ata_dev_configure(dev);
6ddcd3b0
TH
4342 if (rc)
4343 goto fail;
4344
4345 /* verify n_sectors hasn't changed */
b54eebd6
TH
4346 if (dev->class == ATA_DEV_ATA && n_sectors &&
4347 dev->n_sectors != n_sectors) {
6ddcd3b0
TH
4348 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
4349 "%llu != %llu\n",
4350 (unsigned long long)n_sectors,
4351 (unsigned long long)dev->n_sectors);
8270bec4
TH
4352
4353 /* restore original n_sectors */
4354 dev->n_sectors = n_sectors;
4355
6ddcd3b0
TH
4356 rc = -ENODEV;
4357 goto fail;
4358 }
4359
4360 return 0;
623a3128
TH
4361
4362 fail:
f15a1daf 4363 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
4364 return rc;
4365}
4366
6919a0a6
AC
4367struct ata_blacklist_entry {
4368 const char *model_num;
4369 const char *model_rev;
4370 unsigned long horkage;
4371};
4372
4373static const struct ata_blacklist_entry ata_device_blacklist [] = {
4374 /* Devices with DMA related problems under Linux */
4375 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
4376 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
4377 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
4378 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
4379 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
4380 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
4381 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
4382 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4383 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
4384 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
4385 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
4386 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4387 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4388 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4389 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4390 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
4391 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
4392 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
4393 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4394 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4395 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
4396 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
4397 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
4398 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
4399 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
4400 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
4401 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4402 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
2dcb407e 4403 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
39f19886 4404 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
3af9a77a
TH
4405 /* Odd clown on sil3726/4726 PMPs */
4406 { "Config Disk", NULL, ATA_HORKAGE_NODMA |
4407 ATA_HORKAGE_SKIP_PM },
6919a0a6 4408
18d6e9d5 4409 /* Weird ATAPI devices */
40a1d531 4410 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
18d6e9d5 4411
6919a0a6
AC
4412 /* Devices we expect to fail diagnostics */
4413
4414 /* Devices where NCQ should be avoided */
4415 /* NCQ is slow */
2dcb407e 4416 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
459ad688 4417 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
09125ea6
TH
4418 /* http://thread.gmane.org/gmane.linux.ide/14907 */
4419 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30 4420 /* NCQ is broken */
539cc7c7 4421 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
0e3dbc01 4422 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
da6f0ec2 4423 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
e41bd3e8 4424 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
539cc7c7 4425
36e337d0
RH
4426 /* Blacklist entries taken from Silicon Image 3124/3132
4427 Windows driver .inf file - also several Linux problem reports */
4428 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4429 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4430 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
6919a0a6 4431
16c55b03
TH
4432 /* devices which puke on READ_NATIVE_MAX */
4433 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4434 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4435 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4436 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
6919a0a6 4437
93328e11
AC
4438 /* Devices which report 1 sector over size HPA */
4439 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4440 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
b152fcd3 4441 { "ST310211A", NULL, ATA_HORKAGE_HPA_SIZE, },
93328e11 4442
6bbfd53d
AC
4443 /* Devices which get the IVB wrong */
4444 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4445 { "TSSTcorp CDDVDW SH-S202J", "SB00", ATA_HORKAGE_IVB, },
e9f33406
PM
4446 { "TSSTcorp CDDVDW SH-S202J", "SB01", ATA_HORKAGE_IVB, },
4447 { "TSSTcorp CDDVDW SH-S202N", "SB00", ATA_HORKAGE_IVB, },
4448 { "TSSTcorp CDDVDW SH-S202N", "SB01", ATA_HORKAGE_IVB, },
6bbfd53d 4449
6919a0a6
AC
4450 /* End Marker */
4451 { }
1da177e4 4452};
2e9edbf8 4453
741b7763 4454static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
539cc7c7
JG
4455{
4456 const char *p;
4457 int len;
4458
4459 /*
4460 * check for trailing wildcard: *\0
4461 */
4462 p = strchr(patt, wildchar);
4463 if (p && ((*(p + 1)) == 0))
4464 len = p - patt;
317b50b8 4465 else {
539cc7c7 4466 len = strlen(name);
317b50b8
AP
4467 if (!len) {
4468 if (!*patt)
4469 return 0;
4470 return -1;
4471 }
4472 }
539cc7c7
JG
4473
4474 return strncmp(patt, name, len);
4475}
4476
75683fe7 4477static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
1da177e4 4478{
8bfa79fc
TH
4479 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4480 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 4481 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 4482
8bfa79fc
TH
4483 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4484 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 4485
6919a0a6 4486 while (ad->model_num) {
539cc7c7 4487 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
6919a0a6
AC
4488 if (ad->model_rev == NULL)
4489 return ad->horkage;
539cc7c7 4490 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
6919a0a6 4491 return ad->horkage;
f4b15fef 4492 }
6919a0a6 4493 ad++;
f4b15fef 4494 }
1da177e4
LT
4495 return 0;
4496}
4497
6919a0a6
AC
4498static int ata_dma_blacklisted(const struct ata_device *dev)
4499{
4500 /* We don't support polling DMA.
4501 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4502 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4503 */
9af5c9c9 4504 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
6919a0a6
AC
4505 (dev->flags & ATA_DFLAG_CDB_INTR))
4506 return 1;
75683fe7 4507 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
6919a0a6
AC
4508}
4509
6bbfd53d
AC
4510/**
4511 * ata_is_40wire - check drive side detection
4512 * @dev: device
4513 *
4514 * Perform drive side detection decoding, allowing for device vendors
4515 * who can't follow the documentation.
4516 */
4517
4518static int ata_is_40wire(struct ata_device *dev)
4519{
4520 if (dev->horkage & ATA_HORKAGE_IVB)
4521 return ata_drive_40wire_relaxed(dev->id);
4522 return ata_drive_40wire(dev->id);
4523}
4524
a6d5a51c
TH
4525/**
4526 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
4527 * @dev: Device to compute xfermask for
4528 *
acf356b1
TH
4529 * Compute supported xfermask of @dev and store it in
4530 * dev->*_mask. This function is responsible for applying all
4531 * known limits including host controller limits, device
4532 * blacklist, etc...
a6d5a51c
TH
4533 *
4534 * LOCKING:
4535 * None.
a6d5a51c 4536 */
3373efd8 4537static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 4538{
9af5c9c9
TH
4539 struct ata_link *link = dev->link;
4540 struct ata_port *ap = link->ap;
cca3974e 4541 struct ata_host *host = ap->host;
a6d5a51c 4542 unsigned long xfer_mask;
1da177e4 4543
37deecb5 4544 /* controller modes available */
565083e1
TH
4545 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4546 ap->mwdma_mask, ap->udma_mask);
4547
8343f889 4548 /* drive modes available */
37deecb5
TH
4549 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4550 dev->mwdma_mask, dev->udma_mask);
4551 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 4552
b352e57d
AC
4553 /*
4554 * CFA Advanced TrueIDE timings are not allowed on a shared
4555 * cable
4556 */
4557 if (ata_dev_pair(dev)) {
4558 /* No PIO5 or PIO6 */
4559 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4560 /* No MWDMA3 or MWDMA 4 */
4561 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4562 }
4563
37deecb5
TH
4564 if (ata_dma_blacklisted(dev)) {
4565 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
4566 ata_dev_printk(dev, KERN_WARNING,
4567 "device is on DMA blacklist, disabling DMA\n");
37deecb5 4568 }
a6d5a51c 4569
14d66ab7 4570 if ((host->flags & ATA_HOST_SIMPLEX) &&
2dcb407e 4571 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5
TH
4572 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4573 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4574 "other device, disabling DMA\n");
5444a6f4 4575 }
565083e1 4576
e424675f
JG
4577 if (ap->flags & ATA_FLAG_NO_IORDY)
4578 xfer_mask &= ata_pio_mask_no_iordy(dev);
4579
5444a6f4 4580 if (ap->ops->mode_filter)
a76b62ca 4581 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
5444a6f4 4582
8343f889
RH
4583 /* Apply cable rule here. Don't apply it early because when
4584 * we handle hot plug the cable type can itself change.
4585 * Check this last so that we know if the transfer rate was
4586 * solely limited by the cable.
4587 * Unknown or 80 wire cables reported host side are checked
4588 * drive side as well. Cases where we know a 40wire cable
4589 * is used safely for 80 are not checked here.
4590 */
4591 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4592 /* UDMA/44 or higher would be available */
2dcb407e 4593 if ((ap->cbl == ATA_CBL_PATA40) ||
6bbfd53d 4594 (ata_is_40wire(dev) &&
2dcb407e
JG
4595 (ap->cbl == ATA_CBL_PATA_UNK ||
4596 ap->cbl == ATA_CBL_PATA80))) {
4597 ata_dev_printk(dev, KERN_WARNING,
8343f889
RH
4598 "limited to UDMA/33 due to 40-wire cable\n");
4599 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4600 }
4601
565083e1
TH
4602 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4603 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
4604}
4605
1da177e4
LT
4606/**
4607 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
4608 * @dev: Device to which command will be sent
4609 *
780a87f7
JG
4610 * Issue SET FEATURES - XFER MODE command to device @dev
4611 * on port @ap.
4612 *
1da177e4 4613 * LOCKING:
0cba632b 4614 * PCI/etc. bus probe sem.
83206a29
TH
4615 *
4616 * RETURNS:
4617 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
4618 */
4619
3373efd8 4620static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 4621{
a0123703 4622 struct ata_taskfile tf;
83206a29 4623 unsigned int err_mask;
1da177e4
LT
4624
4625 /* set up set-features taskfile */
4626 DPRINTK("set features - xfer mode\n");
4627
464cf177
TH
4628 /* Some controllers and ATAPI devices show flaky interrupt
4629 * behavior after setting xfer mode. Use polling instead.
4630 */
3373efd8 4631 ata_tf_init(dev, &tf);
a0123703
TH
4632 tf.command = ATA_CMD_SET_FEATURES;
4633 tf.feature = SETFEATURES_XFER;
464cf177 4634 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
a0123703 4635 tf.protocol = ATA_PROT_NODATA;
b9f8ab2d 4636 /* If we are using IORDY we must send the mode setting command */
11b7becc
JG
4637 if (ata_pio_need_iordy(dev))
4638 tf.nsect = dev->xfer_mode;
b9f8ab2d
AC
4639 /* If the device has IORDY and the controller does not - turn it off */
4640 else if (ata_id_has_iordy(dev->id))
11b7becc 4641 tf.nsect = 0x01;
b9f8ab2d
AC
4642 else /* In the ancient relic department - skip all of this */
4643 return 0;
1da177e4 4644
2b789108 4645 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
9f45cbd3
KCA
4646
4647 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4648 return err_mask;
4649}
9f45cbd3 4650/**
218f3d30 4651 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
9f45cbd3
KCA
4652 * @dev: Device to which command will be sent
4653 * @enable: Whether to enable or disable the feature
218f3d30 4654 * @feature: The sector count represents the feature to set
9f45cbd3
KCA
4655 *
4656 * Issue SET FEATURES - SATA FEATURES command to device @dev
218f3d30 4657 * on port @ap with sector count
9f45cbd3
KCA
4658 *
4659 * LOCKING:
4660 * PCI/etc. bus probe sem.
4661 *
4662 * RETURNS:
4663 * 0 on success, AC_ERR_* mask otherwise.
4664 */
218f3d30
JG
4665static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4666 u8 feature)
9f45cbd3
KCA
4667{
4668 struct ata_taskfile tf;
4669 unsigned int err_mask;
4670
4671 /* set up set-features taskfile */
4672 DPRINTK("set features - SATA features\n");
4673
4674 ata_tf_init(dev, &tf);
4675 tf.command = ATA_CMD_SET_FEATURES;
4676 tf.feature = enable;
4677 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4678 tf.protocol = ATA_PROT_NODATA;
218f3d30 4679 tf.nsect = feature;
9f45cbd3 4680
2b789108 4681 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1da177e4 4682
83206a29
TH
4683 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4684 return err_mask;
1da177e4
LT
4685}
4686
8bf62ece
AL
4687/**
4688 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 4689 * @dev: Device to which command will be sent
e2a7f77a
RD
4690 * @heads: Number of heads (taskfile parameter)
4691 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
4692 *
4693 * LOCKING:
6aff8f1f
TH
4694 * Kernel thread context (may sleep)
4695 *
4696 * RETURNS:
4697 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 4698 */
3373efd8
TH
4699static unsigned int ata_dev_init_params(struct ata_device *dev,
4700 u16 heads, u16 sectors)
8bf62ece 4701{
a0123703 4702 struct ata_taskfile tf;
6aff8f1f 4703 unsigned int err_mask;
8bf62ece
AL
4704
4705 /* Number of sectors per track 1-255. Number of heads 1-16 */
4706 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 4707 return AC_ERR_INVALID;
8bf62ece
AL
4708
4709 /* set up init dev params taskfile */
4710 DPRINTK("init dev params \n");
4711
3373efd8 4712 ata_tf_init(dev, &tf);
a0123703
TH
4713 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4714 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4715 tf.protocol = ATA_PROT_NODATA;
4716 tf.nsect = sectors;
4717 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 4718
2b789108 4719 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
18b2466c
AC
4720 /* A clean abort indicates an original or just out of spec drive
4721 and we should continue as we issue the setup based on the
4722 drive reported working geometry */
4723 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4724 err_mask = 0;
8bf62ece 4725
6aff8f1f
TH
4726 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4727 return err_mask;
8bf62ece
AL
4728}
4729
1da177e4 4730/**
0cba632b
JG
4731 * ata_sg_clean - Unmap DMA memory associated with command
4732 * @qc: Command containing DMA memory to be released
4733 *
4734 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
4735 *
4736 * LOCKING:
cca3974e 4737 * spin_lock_irqsave(host lock)
1da177e4 4738 */
70e6ad0c 4739void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
4740{
4741 struct ata_port *ap = qc->ap;
ff2aeb1e 4742 struct scatterlist *sg = qc->sg;
1da177e4
LT
4743 int dir = qc->dma_dir;
4744
a4631474 4745 WARN_ON(sg == NULL);
1da177e4 4746
dde20207 4747 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 4748
dde20207
JB
4749 if (qc->n_elem)
4750 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
1da177e4
LT
4751
4752 qc->flags &= ~ATA_QCFLAG_DMAMAP;
ff2aeb1e 4753 qc->sg = NULL;
1da177e4
LT
4754}
4755
4756/**
4757 * ata_fill_sg - Fill PCI IDE PRD table
4758 * @qc: Metadata associated with taskfile to be transferred
4759 *
780a87f7
JG
4760 * Fill PCI IDE PRD (scatter-gather) table with segments
4761 * associated with the current disk command.
4762 *
1da177e4 4763 * LOCKING:
cca3974e 4764 * spin_lock_irqsave(host lock)
1da177e4
LT
4765 *
4766 */
4767static void ata_fill_sg(struct ata_queued_cmd *qc)
4768{
1da177e4 4769 struct ata_port *ap = qc->ap;
cedc9a47 4770 struct scatterlist *sg;
ff2aeb1e 4771 unsigned int si, pi;
1da177e4 4772
ff2aeb1e
TH
4773 pi = 0;
4774 for_each_sg(qc->sg, sg, qc->n_elem, si) {
1da177e4
LT
4775 u32 addr, offset;
4776 u32 sg_len, len;
4777
4778 /* determine if physical DMA addr spans 64K boundary.
4779 * Note h/w doesn't support 64-bit, so we unconditionally
4780 * truncate dma_addr_t to u32.
4781 */
4782 addr = (u32) sg_dma_address(sg);
4783 sg_len = sg_dma_len(sg);
4784
4785 while (sg_len) {
4786 offset = addr & 0xffff;
4787 len = sg_len;
4788 if ((offset + sg_len) > 0x10000)
4789 len = 0x10000 - offset;
4790
ff2aeb1e
TH
4791 ap->prd[pi].addr = cpu_to_le32(addr);
4792 ap->prd[pi].flags_len = cpu_to_le32(len & 0xffff);
4793 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
1da177e4 4794
ff2aeb1e 4795 pi++;
1da177e4
LT
4796 sg_len -= len;
4797 addr += len;
4798 }
4799 }
4800
ff2aeb1e 4801 ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
1da177e4 4802}
b9a4197e 4803
d26fc955
AC
4804/**
4805 * ata_fill_sg_dumb - Fill PCI IDE PRD table
4806 * @qc: Metadata associated with taskfile to be transferred
4807 *
4808 * Fill PCI IDE PRD (scatter-gather) table with segments
4809 * associated with the current disk command. Perform the fill
4810 * so that we avoid writing any length 64K records for
4811 * controllers that don't follow the spec.
4812 *
4813 * LOCKING:
4814 * spin_lock_irqsave(host lock)
4815 *
4816 */
4817static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4818{
4819 struct ata_port *ap = qc->ap;
4820 struct scatterlist *sg;
ff2aeb1e 4821 unsigned int si, pi;
d26fc955 4822
ff2aeb1e
TH
4823 pi = 0;
4824 for_each_sg(qc->sg, sg, qc->n_elem, si) {
d26fc955
AC
4825 u32 addr, offset;
4826 u32 sg_len, len, blen;
4827
2dcb407e 4828 /* determine if physical DMA addr spans 64K boundary.
d26fc955
AC
4829 * Note h/w doesn't support 64-bit, so we unconditionally
4830 * truncate dma_addr_t to u32.
4831 */
4832 addr = (u32) sg_dma_address(sg);
4833 sg_len = sg_dma_len(sg);
4834
4835 while (sg_len) {
4836 offset = addr & 0xffff;
4837 len = sg_len;
4838 if ((offset + sg_len) > 0x10000)
4839 len = 0x10000 - offset;
4840
4841 blen = len & 0xffff;
ff2aeb1e 4842 ap->prd[pi].addr = cpu_to_le32(addr);
d26fc955
AC
4843 if (blen == 0) {
4844 /* Some PATA chipsets like the CS5530 can't
4845 cope with 0x0000 meaning 64K as the spec says */
ff2aeb1e 4846 ap->prd[pi].flags_len = cpu_to_le32(0x8000);
d26fc955 4847 blen = 0x8000;
ff2aeb1e 4848 ap->prd[++pi].addr = cpu_to_le32(addr + 0x8000);
d26fc955 4849 }
ff2aeb1e
TH
4850 ap->prd[pi].flags_len = cpu_to_le32(blen);
4851 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", pi, addr, len);
d26fc955 4852
ff2aeb1e 4853 pi++;
d26fc955
AC
4854 sg_len -= len;
4855 addr += len;
4856 }
4857 }
4858
ff2aeb1e 4859 ap->prd[pi - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
d26fc955
AC
4860}
4861
1da177e4
LT
4862/**
4863 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4864 * @qc: Metadata associated with taskfile to check
4865 *
780a87f7
JG
4866 * Allow low-level driver to filter ATA PACKET commands, returning
4867 * a status indicating whether or not it is OK to use DMA for the
4868 * supplied PACKET command.
4869 *
1da177e4 4870 * LOCKING:
cca3974e 4871 * spin_lock_irqsave(host lock)
0cba632b 4872 *
1da177e4
LT
4873 * RETURNS: 0 when ATAPI DMA can be used
4874 * nonzero otherwise
4875 */
4876int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4877{
4878 struct ata_port *ap = qc->ap;
b9a4197e
TH
4879
4880 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4881 * few ATAPI devices choke on such DMA requests.
4882 */
4883 if (unlikely(qc->nbytes & 15))
4884 return 1;
6f23a31d 4885
1da177e4 4886 if (ap->ops->check_atapi_dma)
b9a4197e 4887 return ap->ops->check_atapi_dma(qc);
1da177e4 4888
b9a4197e 4889 return 0;
1da177e4 4890}
b9a4197e 4891
31cc23b3
TH
4892/**
4893 * ata_std_qc_defer - Check whether a qc needs to be deferred
4894 * @qc: ATA command in question
4895 *
4896 * Non-NCQ commands cannot run with any other command, NCQ or
4897 * not. As upper layer only knows the queue depth, we are
4898 * responsible for maintaining exclusion. This function checks
4899 * whether a new command @qc can be issued.
4900 *
4901 * LOCKING:
4902 * spin_lock_irqsave(host lock)
4903 *
4904 * RETURNS:
4905 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4906 */
4907int ata_std_qc_defer(struct ata_queued_cmd *qc)
4908{
4909 struct ata_link *link = qc->dev->link;
4910
4911 if (qc->tf.protocol == ATA_PROT_NCQ) {
4912 if (!ata_tag_valid(link->active_tag))
4913 return 0;
4914 } else {
4915 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4916 return 0;
4917 }
4918
4919 return ATA_DEFER_LINK;
4920}
4921
1da177e4
LT
4922/**
4923 * ata_qc_prep - Prepare taskfile for submission
4924 * @qc: Metadata associated with taskfile to be prepared
4925 *
780a87f7
JG
4926 * Prepare ATA taskfile for submission.
4927 *
1da177e4 4928 * LOCKING:
cca3974e 4929 * spin_lock_irqsave(host lock)
1da177e4
LT
4930 */
4931void ata_qc_prep(struct ata_queued_cmd *qc)
4932{
4933 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4934 return;
4935
4936 ata_fill_sg(qc);
4937}
4938
d26fc955
AC
4939/**
4940 * ata_dumb_qc_prep - Prepare taskfile for submission
4941 * @qc: Metadata associated with taskfile to be prepared
4942 *
4943 * Prepare ATA taskfile for submission.
4944 *
4945 * LOCKING:
4946 * spin_lock_irqsave(host lock)
4947 */
4948void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4949{
4950 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4951 return;
4952
4953 ata_fill_sg_dumb(qc);
4954}
4955
e46834cd
BK
4956void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4957
0cba632b
JG
4958/**
4959 * ata_sg_init - Associate command with scatter-gather table.
4960 * @qc: Command to be associated
4961 * @sg: Scatter-gather table.
4962 * @n_elem: Number of elements in s/g table.
4963 *
4964 * Initialize the data-related elements of queued_cmd @qc
4965 * to point to a scatter-gather table @sg, containing @n_elem
4966 * elements.
4967 *
4968 * LOCKING:
cca3974e 4969 * spin_lock_irqsave(host lock)
0cba632b 4970 */
1da177e4
LT
4971void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4972 unsigned int n_elem)
4973{
ff2aeb1e 4974 qc->sg = sg;
1da177e4 4975 qc->n_elem = n_elem;
ff2aeb1e 4976 qc->cursg = qc->sg;
1da177e4
LT
4977}
4978
ff2aeb1e
TH
4979/**
4980 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4981 * @qc: Command with scatter-gather table to be mapped.
4982 *
4983 * DMA-map the scatter-gather table associated with queued_cmd @qc.
4984 *
4985 * LOCKING:
4986 * spin_lock_irqsave(host lock)
4987 *
4988 * RETURNS:
4989 * Zero on success, negative on error.
4990 *
4991 */
4992static int ata_sg_setup(struct ata_queued_cmd *qc)
4993{
4994 struct ata_port *ap = qc->ap;
dde20207 4995 unsigned int n_elem;
ff2aeb1e
TH
4996
4997 VPRINTK("ENTER, ata%u\n", ap->print_id);
4998
dde20207
JB
4999 n_elem = dma_map_sg(ap->dev, qc->sg, qc->n_elem, qc->dma_dir);
5000 if (n_elem < 1)
5001 return -1;
ff2aeb1e 5002
dde20207 5003 DPRINTK("%d sg elements mapped\n", n_elem);
1da177e4 5004
dde20207 5005 qc->n_elem = n_elem;
f92a2636 5006 qc->flags |= ATA_QCFLAG_DMAMAP;
1da177e4
LT
5007
5008 return 0;
5009}
5010
0baab86b 5011/**
c893a3ae 5012 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
5013 * @buf: Buffer to swap
5014 * @buf_words: Number of 16-bit words in buffer.
5015 *
5016 * Swap halves of 16-bit words if needed to convert from
5017 * little-endian byte order to native cpu byte order, or
5018 * vice-versa.
5019 *
5020 * LOCKING:
6f0ef4fa 5021 * Inherited from caller.
0baab86b 5022 */
1da177e4
LT
5023void swap_buf_le16(u16 *buf, unsigned int buf_words)
5024{
5025#ifdef __BIG_ENDIAN
5026 unsigned int i;
5027
5028 for (i = 0; i < buf_words; i++)
5029 buf[i] = le16_to_cpu(buf[i]);
5030#endif /* __BIG_ENDIAN */
5031}
5032
6ae4cfb5 5033/**
0d5ff566 5034 * ata_data_xfer - Transfer data by PIO
55dba312 5035 * @dev: device to target
6ae4cfb5
AL
5036 * @buf: data buffer
5037 * @buflen: buffer length
0affa456 5038 * @rw: read/write
6ae4cfb5
AL
5039 *
5040 * Transfer data from/to the device data register by PIO.
5041 *
5042 * LOCKING:
5043 * Inherited from caller.
55dba312
TH
5044 *
5045 * RETURNS:
5046 * Bytes consumed.
6ae4cfb5 5047 */
55dba312
TH
5048unsigned int ata_data_xfer(struct ata_device *dev, unsigned char *buf,
5049 unsigned int buflen, int rw)
1da177e4 5050{
55dba312
TH
5051 struct ata_port *ap = dev->link->ap;
5052 void __iomem *data_addr = ap->ioaddr.data_addr;
6ae4cfb5 5053 unsigned int words = buflen >> 1;
1da177e4 5054
6ae4cfb5 5055 /* Transfer multiple of 2 bytes */
55dba312
TH
5056 if (rw == READ)
5057 ioread16_rep(data_addr, buf, words);
1da177e4 5058 else
55dba312 5059 iowrite16_rep(data_addr, buf, words);
6ae4cfb5
AL
5060
5061 /* Transfer trailing 1 byte, if any. */
5062 if (unlikely(buflen & 0x01)) {
4ca4e439 5063 __le16 align_buf[1] = { 0 };
6ae4cfb5
AL
5064 unsigned char *trailing_buf = buf + buflen - 1;
5065
55dba312
TH
5066 if (rw == READ) {
5067 align_buf[0] = cpu_to_le16(ioread16(data_addr));
6ae4cfb5 5068 memcpy(trailing_buf, align_buf, 1);
55dba312
TH
5069 } else {
5070 memcpy(align_buf, trailing_buf, 1);
5071 iowrite16(le16_to_cpu(align_buf[0]), data_addr);
6ae4cfb5 5072 }
55dba312 5073 words++;
6ae4cfb5 5074 }
55dba312
TH
5075
5076 return words << 1;
1da177e4
LT
5077}
5078
75e99585 5079/**
0d5ff566 5080 * ata_data_xfer_noirq - Transfer data by PIO
55dba312 5081 * @dev: device to target
75e99585
AC
5082 * @buf: data buffer
5083 * @buflen: buffer length
0affa456 5084 * @rw: read/write
75e99585 5085 *
88574551 5086 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
5087 * transfer with interrupts disabled.
5088 *
5089 * LOCKING:
5090 * Inherited from caller.
55dba312
TH
5091 *
5092 * RETURNS:
5093 * Bytes consumed.
75e99585 5094 */
55dba312
TH
5095unsigned int ata_data_xfer_noirq(struct ata_device *dev, unsigned char *buf,
5096 unsigned int buflen, int rw)
75e99585
AC
5097{
5098 unsigned long flags;
55dba312
TH
5099 unsigned int consumed;
5100
75e99585 5101 local_irq_save(flags);
55dba312 5102 consumed = ata_data_xfer(dev, buf, buflen, rw);
75e99585 5103 local_irq_restore(flags);
55dba312
TH
5104
5105 return consumed;
75e99585
AC
5106}
5107
5108
6ae4cfb5 5109/**
5a5dbd18 5110 * ata_pio_sector - Transfer a sector of data.
6ae4cfb5
AL
5111 * @qc: Command on going
5112 *
5a5dbd18 5113 * Transfer qc->sect_size bytes of data from/to the ATA device.
6ae4cfb5
AL
5114 *
5115 * LOCKING:
5116 * Inherited from caller.
5117 */
5118
1da177e4
LT
5119static void ata_pio_sector(struct ata_queued_cmd *qc)
5120{
5121 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
1da177e4
LT
5122 struct ata_port *ap = qc->ap;
5123 struct page *page;
5124 unsigned int offset;
5125 unsigned char *buf;
5126
5a5dbd18 5127 if (qc->curbytes == qc->nbytes - qc->sect_size)
14be71f4 5128 ap->hsm_task_state = HSM_ST_LAST;
1da177e4 5129
45711f1a 5130 page = sg_page(qc->cursg);
87260216 5131 offset = qc->cursg->offset + qc->cursg_ofs;
1da177e4
LT
5132
5133 /* get the current page and offset */
5134 page = nth_page(page, (offset >> PAGE_SHIFT));
5135 offset %= PAGE_SIZE;
5136
1da177e4
LT
5137 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5138
91b8b313
AL
5139 if (PageHighMem(page)) {
5140 unsigned long flags;
5141
a6b2c5d4 5142 /* FIXME: use a bounce buffer */
91b8b313
AL
5143 local_irq_save(flags);
5144 buf = kmap_atomic(page, KM_IRQ0);
083958d3 5145
91b8b313 5146 /* do the actual data transfer */
5a5dbd18 5147 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
1da177e4 5148
91b8b313
AL
5149 kunmap_atomic(buf, KM_IRQ0);
5150 local_irq_restore(flags);
5151 } else {
5152 buf = page_address(page);
5a5dbd18 5153 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
91b8b313 5154 }
1da177e4 5155
5a5dbd18
ML
5156 qc->curbytes += qc->sect_size;
5157 qc->cursg_ofs += qc->sect_size;
1da177e4 5158
87260216
JA
5159 if (qc->cursg_ofs == qc->cursg->length) {
5160 qc->cursg = sg_next(qc->cursg);
1da177e4
LT
5161 qc->cursg_ofs = 0;
5162 }
1da177e4 5163}
1da177e4 5164
07f6f7d0 5165/**
5a5dbd18 5166 * ata_pio_sectors - Transfer one or many sectors.
07f6f7d0
AL
5167 * @qc: Command on going
5168 *
5a5dbd18 5169 * Transfer one or many sectors of data from/to the
07f6f7d0
AL
5170 * ATA device for the DRQ request.
5171 *
5172 * LOCKING:
5173 * Inherited from caller.
5174 */
1da177e4 5175
07f6f7d0
AL
5176static void ata_pio_sectors(struct ata_queued_cmd *qc)
5177{
5178 if (is_multi_taskfile(&qc->tf)) {
5179 /* READ/WRITE MULTIPLE */
5180 unsigned int nsect;
5181
587005de 5182 WARN_ON(qc->dev->multi_count == 0);
1da177e4 5183
5a5dbd18 5184 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
726f0785 5185 qc->dev->multi_count);
07f6f7d0
AL
5186 while (nsect--)
5187 ata_pio_sector(qc);
5188 } else
5189 ata_pio_sector(qc);
4cc980b3
AL
5190
5191 ata_altstatus(qc->ap); /* flush */
07f6f7d0
AL
5192}
5193
c71c1857
AL
5194/**
5195 * atapi_send_cdb - Write CDB bytes to hardware
5196 * @ap: Port to which ATAPI device is attached.
5197 * @qc: Taskfile currently active
5198 *
5199 * When device has indicated its readiness to accept
5200 * a CDB, this function is called. Send the CDB.
5201 *
5202 * LOCKING:
5203 * caller.
5204 */
5205
5206static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
5207{
5208 /* send SCSI cdb */
5209 DPRINTK("send cdb\n");
db024d53 5210 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 5211
a6b2c5d4 5212 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
5213 ata_altstatus(ap); /* flush */
5214
5215 switch (qc->tf.protocol) {
0dc36888 5216 case ATAPI_PROT_PIO:
c71c1857
AL
5217 ap->hsm_task_state = HSM_ST;
5218 break;
0dc36888 5219 case ATAPI_PROT_NODATA:
c71c1857
AL
5220 ap->hsm_task_state = HSM_ST_LAST;
5221 break;
0dc36888 5222 case ATAPI_PROT_DMA:
c71c1857
AL
5223 ap->hsm_task_state = HSM_ST_LAST;
5224 /* initiate bmdma */
5225 ap->ops->bmdma_start(qc);
5226 break;
5227 }
1da177e4
LT
5228}
5229
6ae4cfb5
AL
5230/**
5231 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
5232 * @qc: Command on going
5233 * @bytes: number of bytes
5234 *
5235 * Transfer Transfer data from/to the ATAPI device.
5236 *
5237 * LOCKING:
5238 * Inherited from caller.
5239 *
5240 */
140b5e59 5241static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
1da177e4 5242{
56c819df 5243 int rw = (qc->tf.flags & ATA_TFLAG_WRITE) ? WRITE : READ;
1da177e4 5244 struct ata_port *ap = qc->ap;
56c819df
TH
5245 struct ata_device *dev = qc->dev;
5246 struct ata_eh_info *ehi = &dev->link->eh_info;
140b5e59 5247 struct scatterlist *sg;
1da177e4
LT
5248 struct page *page;
5249 unsigned char *buf;
56c819df 5250 unsigned int offset, count, consumed;
1da177e4
LT
5251
5252next_sg:
140b5e59
TH
5253 sg = qc->cursg;
5254 if (unlikely(!sg)) {
fa2fc7f4
JB
5255 ata_ehi_push_desc(ehi, "unexpected or too much trailing data "
5256 "buf=%u cur=%u bytes=%u",
5257 qc->nbytes, qc->curbytes, bytes);
5258 return -1;
140b5e59 5259 }
1da177e4 5260
45711f1a 5261 page = sg_page(sg);
1da177e4
LT
5262 offset = sg->offset + qc->cursg_ofs;
5263
5264 /* get the current page and offset */
5265 page = nth_page(page, (offset >> PAGE_SHIFT));
5266 offset %= PAGE_SIZE;
5267
6952df03 5268 /* don't overrun current sg */
32529e01 5269 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
5270
5271 /* don't cross page boundaries */
5272 count = min(count, (unsigned int)PAGE_SIZE - offset);
5273
7282aa4b
AL
5274 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5275
91b8b313
AL
5276 if (PageHighMem(page)) {
5277 unsigned long flags;
5278
a6b2c5d4 5279 /* FIXME: use bounce buffer */
91b8b313
AL
5280 local_irq_save(flags);
5281 buf = kmap_atomic(page, KM_IRQ0);
083958d3 5282
91b8b313 5283 /* do the actual data transfer */
56c819df 5284 consumed = ap->ops->data_xfer(dev, buf + offset, count, rw);
7282aa4b 5285
91b8b313
AL
5286 kunmap_atomic(buf, KM_IRQ0);
5287 local_irq_restore(flags);
5288 } else {
5289 buf = page_address(page);
56c819df 5290 consumed = ap->ops->data_xfer(dev, buf + offset, count, rw);
91b8b313 5291 }
1da177e4 5292
56c819df 5293 bytes -= min(bytes, consumed);
1da177e4
LT
5294 qc->curbytes += count;
5295 qc->cursg_ofs += count;
5296
32529e01 5297 if (qc->cursg_ofs == sg->length) {
87260216 5298 qc->cursg = sg_next(qc->cursg);
1da177e4
LT
5299 qc->cursg_ofs = 0;
5300 }
5301
56c819df
TH
5302 /* consumed can be larger than count only for the last transfer */
5303 WARN_ON(qc->cursg && count != consumed);
5304
563a6e1f 5305 if (bytes)
1da177e4 5306 goto next_sg;
140b5e59 5307 return 0;
1da177e4
LT
5308}
5309
6ae4cfb5
AL
5310/**
5311 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
5312 * @qc: Command on going
5313 *
5314 * Transfer Transfer data from/to the ATAPI device.
5315 *
5316 * LOCKING:
5317 * Inherited from caller.
6ae4cfb5
AL
5318 */
5319
1da177e4
LT
5320static void atapi_pio_bytes(struct ata_queued_cmd *qc)
5321{
5322 struct ata_port *ap = qc->ap;
5323 struct ata_device *dev = qc->dev;
56c819df 5324 struct ata_eh_info *ehi = &dev->link->eh_info;
1da177e4
LT
5325 unsigned int ireason, bc_lo, bc_hi, bytes;
5326 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
5327
eec4c3f3
AL
5328 /* Abuse qc->result_tf for temp storage of intermediate TF
5329 * here to save some kernel stack usage.
5330 * For normal completion, qc->result_tf is not relevant. For
5331 * error, qc->result_tf is later overwritten by ata_qc_complete().
5332 * So, the correctness of qc->result_tf is not affected.
5333 */
5334 ap->ops->tf_read(ap, &qc->result_tf);
5335 ireason = qc->result_tf.nsect;
5336 bc_lo = qc->result_tf.lbam;
5337 bc_hi = qc->result_tf.lbah;
1da177e4
LT
5338 bytes = (bc_hi << 8) | bc_lo;
5339
5340 /* shall be cleared to zero, indicating xfer of data */
0106372d 5341 if (unlikely(ireason & (1 << 0)))
56c819df 5342 goto atapi_check;
1da177e4
LT
5343
5344 /* make sure transfer direction matches expected */
5345 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
0106372d 5346 if (unlikely(do_write != i_write))
56c819df 5347 goto atapi_check;
0106372d
AL
5348
5349 if (unlikely(!bytes))
56c819df 5350 goto atapi_check;
1da177e4 5351
44877b4e 5352 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
312f7da2 5353
56c819df 5354 if (unlikely(__atapi_pio_bytes(qc, bytes)))
140b5e59 5355 goto err_out;
4cc980b3 5356 ata_altstatus(ap); /* flush */
1da177e4
LT
5357
5358 return;
5359
56c819df
TH
5360 atapi_check:
5361 ata_ehi_push_desc(ehi, "ATAPI check failed (ireason=0x%x bytes=%u)",
5362 ireason, bytes);
5363 err_out:
11a56d24 5364 qc->err_mask |= AC_ERR_HSM;
14be71f4 5365 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
5366}
5367
5368/**
c234fb00
AL
5369 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
5370 * @ap: the target ata_port
5371 * @qc: qc on going
1da177e4 5372 *
c234fb00
AL
5373 * RETURNS:
5374 * 1 if ok in workqueue, 0 otherwise.
1da177e4 5375 */
c234fb00
AL
5376
5377static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 5378{
c234fb00
AL
5379 if (qc->tf.flags & ATA_TFLAG_POLLING)
5380 return 1;
1da177e4 5381
c234fb00
AL
5382 if (ap->hsm_task_state == HSM_ST_FIRST) {
5383 if (qc->tf.protocol == ATA_PROT_PIO &&
5384 (qc->tf.flags & ATA_TFLAG_WRITE))
5385 return 1;
1da177e4 5386
405e66b3 5387 if (ata_is_atapi(qc->tf.protocol) &&
c234fb00
AL
5388 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5389 return 1;
fe79e683
AL
5390 }
5391
c234fb00
AL
5392 return 0;
5393}
1da177e4 5394
c17ea20d
TH
5395/**
5396 * ata_hsm_qc_complete - finish a qc running on standard HSM
5397 * @qc: Command to complete
5398 * @in_wq: 1 if called from workqueue, 0 otherwise
5399 *
5400 * Finish @qc which is running on standard HSM.
5401 *
5402 * LOCKING:
cca3974e 5403 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
5404 * Otherwise, none on entry and grabs host lock.
5405 */
5406static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
5407{
5408 struct ata_port *ap = qc->ap;
5409 unsigned long flags;
5410
5411 if (ap->ops->error_handler) {
5412 if (in_wq) {
ba6a1308 5413 spin_lock_irqsave(ap->lock, flags);
c17ea20d 5414
cca3974e
JG
5415 /* EH might have kicked in while host lock is
5416 * released.
c17ea20d
TH
5417 */
5418 qc = ata_qc_from_tag(ap, qc->tag);
5419 if (qc) {
5420 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
83625006 5421 ap->ops->irq_on(ap);
c17ea20d
TH
5422 ata_qc_complete(qc);
5423 } else
5424 ata_port_freeze(ap);
5425 }
5426
ba6a1308 5427 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
5428 } else {
5429 if (likely(!(qc->err_mask & AC_ERR_HSM)))
5430 ata_qc_complete(qc);
5431 else
5432 ata_port_freeze(ap);
5433 }
5434 } else {
5435 if (in_wq) {
ba6a1308 5436 spin_lock_irqsave(ap->lock, flags);
83625006 5437 ap->ops->irq_on(ap);
c17ea20d 5438 ata_qc_complete(qc);
ba6a1308 5439 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
5440 } else
5441 ata_qc_complete(qc);
5442 }
5443}
5444
bb5cb290
AL
5445/**
5446 * ata_hsm_move - move the HSM to the next state.
5447 * @ap: the target ata_port
5448 * @qc: qc on going
5449 * @status: current device status
5450 * @in_wq: 1 if called from workqueue, 0 otherwise
5451 *
5452 * RETURNS:
5453 * 1 when poll next status needed, 0 otherwise.
5454 */
9a1004d0
TH
5455int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
5456 u8 status, int in_wq)
e2cec771 5457{
bb5cb290
AL
5458 unsigned long flags = 0;
5459 int poll_next;
5460
6912ccd5
AL
5461 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
5462
bb5cb290
AL
5463 /* Make sure ata_qc_issue_prot() does not throw things
5464 * like DMA polling into the workqueue. Notice that
5465 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
5466 */
c234fb00 5467 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 5468
e2cec771 5469fsm_start:
999bb6f4 5470 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
44877b4e 5471 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
999bb6f4 5472
e2cec771
AL
5473 switch (ap->hsm_task_state) {
5474 case HSM_ST_FIRST:
bb5cb290
AL
5475 /* Send first data block or PACKET CDB */
5476
5477 /* If polling, we will stay in the work queue after
5478 * sending the data. Otherwise, interrupt handler
5479 * takes over after sending the data.
5480 */
5481 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
5482
e2cec771 5483 /* check device status */
3655d1d3
AL
5484 if (unlikely((status & ATA_DRQ) == 0)) {
5485 /* handle BSY=0, DRQ=0 as error */
5486 if (likely(status & (ATA_ERR | ATA_DF)))
5487 /* device stops HSM for abort/error */
5488 qc->err_mask |= AC_ERR_DEV;
5489 else
5490 /* HSM violation. Let EH handle this */
5491 qc->err_mask |= AC_ERR_HSM;
5492
14be71f4 5493 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 5494 goto fsm_start;
1da177e4
LT
5495 }
5496
71601958
AL
5497 /* Device should not ask for data transfer (DRQ=1)
5498 * when it finds something wrong.
eee6c32f
AL
5499 * We ignore DRQ here and stop the HSM by
5500 * changing hsm_task_state to HSM_ST_ERR and
5501 * let the EH abort the command or reset the device.
71601958
AL
5502 */
5503 if (unlikely(status & (ATA_ERR | ATA_DF))) {
2d3b8eea
AL
5504 /* Some ATAPI tape drives forget to clear the ERR bit
5505 * when doing the next command (mostly request sense).
5506 * We ignore ERR here to workaround and proceed sending
5507 * the CDB.
5508 */
5509 if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
5510 ata_port_printk(ap, KERN_WARNING,
5511 "DRQ=1 with device error, "
5512 "dev_stat 0x%X\n", status);
5513 qc->err_mask |= AC_ERR_HSM;
5514 ap->hsm_task_state = HSM_ST_ERR;
5515 goto fsm_start;
5516 }
71601958 5517 }
1da177e4 5518
bb5cb290
AL
5519 /* Send the CDB (atapi) or the first data block (ata pio out).
5520 * During the state transition, interrupt handler shouldn't
5521 * be invoked before the data transfer is complete and
5522 * hsm_task_state is changed. Hence, the following locking.
5523 */
5524 if (in_wq)
ba6a1308 5525 spin_lock_irqsave(ap->lock, flags);
1da177e4 5526
bb5cb290
AL
5527 if (qc->tf.protocol == ATA_PROT_PIO) {
5528 /* PIO data out protocol.
5529 * send first data block.
5530 */
0565c26d 5531
bb5cb290
AL
5532 /* ata_pio_sectors() might change the state
5533 * to HSM_ST_LAST. so, the state is changed here
5534 * before ata_pio_sectors().
5535 */
5536 ap->hsm_task_state = HSM_ST;
5537 ata_pio_sectors(qc);
bb5cb290
AL
5538 } else
5539 /* send CDB */
5540 atapi_send_cdb(ap, qc);
5541
5542 if (in_wq)
ba6a1308 5543 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
5544
5545 /* if polling, ata_pio_task() handles the rest.
5546 * otherwise, interrupt handler takes over from here.
5547 */
e2cec771 5548 break;
1c848984 5549
e2cec771
AL
5550 case HSM_ST:
5551 /* complete command or read/write the data register */
0dc36888 5552 if (qc->tf.protocol == ATAPI_PROT_PIO) {
e2cec771
AL
5553 /* ATAPI PIO protocol */
5554 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
5555 /* No more data to transfer or device error.
5556 * Device error will be tagged in HSM_ST_LAST.
5557 */
e2cec771
AL
5558 ap->hsm_task_state = HSM_ST_LAST;
5559 goto fsm_start;
5560 }
1da177e4 5561
71601958
AL
5562 /* Device should not ask for data transfer (DRQ=1)
5563 * when it finds something wrong.
eee6c32f
AL
5564 * We ignore DRQ here and stop the HSM by
5565 * changing hsm_task_state to HSM_ST_ERR and
5566 * let the EH abort the command or reset the device.
71601958
AL
5567 */
5568 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
5569 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
5570 "device error, dev_stat 0x%X\n",
5571 status);
3655d1d3 5572 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
5573 ap->hsm_task_state = HSM_ST_ERR;
5574 goto fsm_start;
71601958 5575 }
1da177e4 5576
e2cec771 5577 atapi_pio_bytes(qc);
7fb6ec28 5578
e2cec771
AL
5579 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
5580 /* bad ireason reported by device */
5581 goto fsm_start;
1da177e4 5582
e2cec771
AL
5583 } else {
5584 /* ATA PIO protocol */
5585 if (unlikely((status & ATA_DRQ) == 0)) {
5586 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
5587 if (likely(status & (ATA_ERR | ATA_DF)))
5588 /* device stops HSM for abort/error */
5589 qc->err_mask |= AC_ERR_DEV;
5590 else
55a8e2c8
TH
5591 /* HSM violation. Let EH handle this.
5592 * Phantom devices also trigger this
5593 * condition. Mark hint.
5594 */
5595 qc->err_mask |= AC_ERR_HSM |
5596 AC_ERR_NODEV_HINT;
3655d1d3 5597
e2cec771
AL
5598 ap->hsm_task_state = HSM_ST_ERR;
5599 goto fsm_start;
5600 }
1da177e4 5601
eee6c32f
AL
5602 /* For PIO reads, some devices may ask for
5603 * data transfer (DRQ=1) alone with ERR=1.
5604 * We respect DRQ here and transfer one
5605 * block of junk data before changing the
5606 * hsm_task_state to HSM_ST_ERR.
5607 *
5608 * For PIO writes, ERR=1 DRQ=1 doesn't make
5609 * sense since the data block has been
5610 * transferred to the device.
71601958
AL
5611 */
5612 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
5613 /* data might be corrputed */
5614 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
5615
5616 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5617 ata_pio_sectors(qc);
eee6c32f
AL
5618 status = ata_wait_idle(ap);
5619 }
5620
3655d1d3
AL
5621 if (status & (ATA_BUSY | ATA_DRQ))
5622 qc->err_mask |= AC_ERR_HSM;
5623
eee6c32f
AL
5624 /* ata_pio_sectors() might change the
5625 * state to HSM_ST_LAST. so, the state
5626 * is changed after ata_pio_sectors().
5627 */
5628 ap->hsm_task_state = HSM_ST_ERR;
5629 goto fsm_start;
71601958
AL
5630 }
5631
e2cec771
AL
5632 ata_pio_sectors(qc);
5633
5634 if (ap->hsm_task_state == HSM_ST_LAST &&
5635 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5636 /* all data read */
52a32205 5637 status = ata_wait_idle(ap);
e2cec771
AL
5638 goto fsm_start;
5639 }
5640 }
5641
bb5cb290 5642 poll_next = 1;
1da177e4
LT
5643 break;
5644
14be71f4 5645 case HSM_ST_LAST:
6912ccd5
AL
5646 if (unlikely(!ata_ok(status))) {
5647 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
5648 ap->hsm_task_state = HSM_ST_ERR;
5649 goto fsm_start;
5650 }
5651
5652 /* no more data to transfer */
4332a771 5653 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
44877b4e 5654 ap->print_id, qc->dev->devno, status);
e2cec771 5655
6912ccd5
AL
5656 WARN_ON(qc->err_mask);
5657
e2cec771 5658 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 5659
e2cec771 5660 /* complete taskfile transaction */
c17ea20d 5661 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5662
5663 poll_next = 0;
1da177e4
LT
5664 break;
5665
14be71f4 5666 case HSM_ST_ERR:
e2cec771
AL
5667 /* make sure qc->err_mask is available to
5668 * know what's wrong and recover
5669 */
5670 WARN_ON(qc->err_mask == 0);
5671
5672 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 5673
999bb6f4 5674 /* complete taskfile transaction */
c17ea20d 5675 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5676
5677 poll_next = 0;
e2cec771
AL
5678 break;
5679 default:
bb5cb290 5680 poll_next = 0;
6912ccd5 5681 BUG();
1da177e4
LT
5682 }
5683
bb5cb290 5684 return poll_next;
1da177e4
LT
5685}
5686
65f27f38 5687static void ata_pio_task(struct work_struct *work)
8061f5f0 5688{
65f27f38
DH
5689 struct ata_port *ap =
5690 container_of(work, struct ata_port, port_task.work);
5691 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 5692 u8 status;
a1af3734 5693 int poll_next;
8061f5f0 5694
7fb6ec28 5695fsm_start:
a1af3734 5696 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 5697
a1af3734
AL
5698 /*
5699 * This is purely heuristic. This is a fast path.
5700 * Sometimes when we enter, BSY will be cleared in
5701 * a chk-status or two. If not, the drive is probably seeking
5702 * or something. Snooze for a couple msecs, then
5703 * chk-status again. If still busy, queue delayed work.
5704 */
5705 status = ata_busy_wait(ap, ATA_BUSY, 5);
5706 if (status & ATA_BUSY) {
5707 msleep(2);
5708 status = ata_busy_wait(ap, ATA_BUSY, 10);
5709 if (status & ATA_BUSY) {
442eacc3 5710 ata_pio_queue_task(ap, qc, ATA_SHORT_PAUSE);
a1af3734
AL
5711 return;
5712 }
8061f5f0
TH
5713 }
5714
a1af3734
AL
5715 /* move the HSM */
5716 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 5717
a1af3734
AL
5718 /* another command or interrupt handler
5719 * may be running at this point.
5720 */
5721 if (poll_next)
7fb6ec28 5722 goto fsm_start;
8061f5f0
TH
5723}
5724
1da177e4
LT
5725/**
5726 * ata_qc_new - Request an available ATA command, for queueing
5727 * @ap: Port associated with device @dev
5728 * @dev: Device from whom we request an available command structure
5729 *
5730 * LOCKING:
0cba632b 5731 * None.
1da177e4
LT
5732 */
5733
5734static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5735{
5736 struct ata_queued_cmd *qc = NULL;
5737 unsigned int i;
5738
e3180499 5739 /* no command while frozen */
b51e9e5d 5740 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
5741 return NULL;
5742
2ab7db1f
TH
5743 /* the last tag is reserved for internal command. */
5744 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 5745 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 5746 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
5747 break;
5748 }
5749
5750 if (qc)
5751 qc->tag = i;
5752
5753 return qc;
5754}
5755
5756/**
5757 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
5758 * @dev: Device from whom we request an available command structure
5759 *
5760 * LOCKING:
0cba632b 5761 * None.
1da177e4
LT
5762 */
5763
3373efd8 5764struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 5765{
9af5c9c9 5766 struct ata_port *ap = dev->link->ap;
1da177e4
LT
5767 struct ata_queued_cmd *qc;
5768
5769 qc = ata_qc_new(ap);
5770 if (qc) {
1da177e4
LT
5771 qc->scsicmd = NULL;
5772 qc->ap = ap;
5773 qc->dev = dev;
1da177e4 5774
2c13b7ce 5775 ata_qc_reinit(qc);
1da177e4
LT
5776 }
5777
5778 return qc;
5779}
5780
1da177e4
LT
5781/**
5782 * ata_qc_free - free unused ata_queued_cmd
5783 * @qc: Command to complete
5784 *
5785 * Designed to free unused ata_queued_cmd object
5786 * in case something prevents using it.
5787 *
5788 * LOCKING:
cca3974e 5789 * spin_lock_irqsave(host lock)
1da177e4
LT
5790 */
5791void ata_qc_free(struct ata_queued_cmd *qc)
5792{
4ba946e9
TH
5793 struct ata_port *ap = qc->ap;
5794 unsigned int tag;
5795
a4631474 5796 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 5797
4ba946e9
TH
5798 qc->flags = 0;
5799 tag = qc->tag;
5800 if (likely(ata_tag_valid(tag))) {
4ba946e9 5801 qc->tag = ATA_TAG_POISON;
6cec4a39 5802 clear_bit(tag, &ap->qc_allocated);
4ba946e9 5803 }
1da177e4
LT
5804}
5805
76014427 5806void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 5807{
dedaf2b0 5808 struct ata_port *ap = qc->ap;
9af5c9c9 5809 struct ata_link *link = qc->dev->link;
dedaf2b0 5810
a4631474
TH
5811 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5812 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
5813
5814 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5815 ata_sg_clean(qc);
5816
7401abf2 5817 /* command should be marked inactive atomically with qc completion */
da917d69 5818 if (qc->tf.protocol == ATA_PROT_NCQ) {
9af5c9c9 5819 link->sactive &= ~(1 << qc->tag);
da917d69
TH
5820 if (!link->sactive)
5821 ap->nr_active_links--;
5822 } else {
9af5c9c9 5823 link->active_tag = ATA_TAG_POISON;
da917d69
TH
5824 ap->nr_active_links--;
5825 }
5826
5827 /* clear exclusive status */
5828 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5829 ap->excl_link == link))
5830 ap->excl_link = NULL;
7401abf2 5831
3f3791d3
AL
5832 /* atapi: mark qc as inactive to prevent the interrupt handler
5833 * from completing the command twice later, before the error handler
5834 * is called. (when rc != 0 and atapi request sense is needed)
5835 */
5836 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 5837 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 5838
1da177e4 5839 /* call completion callback */
77853bf2 5840 qc->complete_fn(qc);
1da177e4
LT
5841}
5842
39599a53
TH
5843static void fill_result_tf(struct ata_queued_cmd *qc)
5844{
5845 struct ata_port *ap = qc->ap;
5846
39599a53 5847 qc->result_tf.flags = qc->tf.flags;
4742d54f 5848 ap->ops->tf_read(ap, &qc->result_tf);
39599a53
TH
5849}
5850
00115e0f
TH
5851static void ata_verify_xfer(struct ata_queued_cmd *qc)
5852{
5853 struct ata_device *dev = qc->dev;
5854
5855 if (ata_tag_internal(qc->tag))
5856 return;
5857
5858 if (ata_is_nodata(qc->tf.protocol))
5859 return;
5860
5861 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
5862 return;
5863
5864 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
5865}
5866
f686bcb8
TH
5867/**
5868 * ata_qc_complete - Complete an active ATA command
5869 * @qc: Command to complete
5870 * @err_mask: ATA Status register contents
5871 *
5872 * Indicate to the mid and upper layers that an ATA
5873 * command has completed, with either an ok or not-ok status.
5874 *
5875 * LOCKING:
cca3974e 5876 * spin_lock_irqsave(host lock)
f686bcb8
TH
5877 */
5878void ata_qc_complete(struct ata_queued_cmd *qc)
5879{
5880 struct ata_port *ap = qc->ap;
5881
5882 /* XXX: New EH and old EH use different mechanisms to
5883 * synchronize EH with regular execution path.
5884 *
5885 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5886 * Normal execution path is responsible for not accessing a
5887 * failed qc. libata core enforces the rule by returning NULL
5888 * from ata_qc_from_tag() for failed qcs.
5889 *
5890 * Old EH depends on ata_qc_complete() nullifying completion
5891 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5892 * not synchronize with interrupt handler. Only PIO task is
5893 * taken care of.
5894 */
5895 if (ap->ops->error_handler) {
4dbfa39b
TH
5896 struct ata_device *dev = qc->dev;
5897 struct ata_eh_info *ehi = &dev->link->eh_info;
5898
b51e9e5d 5899 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
5900
5901 if (unlikely(qc->err_mask))
5902 qc->flags |= ATA_QCFLAG_FAILED;
5903
5904 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5905 if (!ata_tag_internal(qc->tag)) {
5906 /* always fill result TF for failed qc */
39599a53 5907 fill_result_tf(qc);
f686bcb8
TH
5908 ata_qc_schedule_eh(qc);
5909 return;
5910 }
5911 }
5912
5913 /* read result TF if requested */
5914 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5915 fill_result_tf(qc);
f686bcb8 5916
4dbfa39b
TH
5917 /* Some commands need post-processing after successful
5918 * completion.
5919 */
5920 switch (qc->tf.command) {
5921 case ATA_CMD_SET_FEATURES:
5922 if (qc->tf.feature != SETFEATURES_WC_ON &&
5923 qc->tf.feature != SETFEATURES_WC_OFF)
5924 break;
5925 /* fall through */
5926 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
5927 case ATA_CMD_SET_MULTI: /* multi_count changed */
5928 /* revalidate device */
5929 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
5930 ata_port_schedule_eh(ap);
5931 break;
054a5fba
TH
5932
5933 case ATA_CMD_SLEEP:
5934 dev->flags |= ATA_DFLAG_SLEEPING;
5935 break;
4dbfa39b
TH
5936 }
5937
00115e0f
TH
5938 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
5939 ata_verify_xfer(qc);
5940
f686bcb8
TH
5941 __ata_qc_complete(qc);
5942 } else {
5943 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5944 return;
5945
5946 /* read result TF if failed or requested */
5947 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5948 fill_result_tf(qc);
f686bcb8
TH
5949
5950 __ata_qc_complete(qc);
5951 }
5952}
5953
dedaf2b0
TH
5954/**
5955 * ata_qc_complete_multiple - Complete multiple qcs successfully
5956 * @ap: port in question
5957 * @qc_active: new qc_active mask
5958 * @finish_qc: LLDD callback invoked before completing a qc
5959 *
5960 * Complete in-flight commands. This functions is meant to be
5961 * called from low-level driver's interrupt routine to complete
5962 * requests normally. ap->qc_active and @qc_active is compared
5963 * and commands are completed accordingly.
5964 *
5965 * LOCKING:
cca3974e 5966 * spin_lock_irqsave(host lock)
dedaf2b0
TH
5967 *
5968 * RETURNS:
5969 * Number of completed commands on success, -errno otherwise.
5970 */
5971int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5972 void (*finish_qc)(struct ata_queued_cmd *))
5973{
5974 int nr_done = 0;
5975 u32 done_mask;
5976 int i;
5977
5978 done_mask = ap->qc_active ^ qc_active;
5979
5980 if (unlikely(done_mask & qc_active)) {
5981 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5982 "(%08x->%08x)\n", ap->qc_active, qc_active);
5983 return -EINVAL;
5984 }
5985
5986 for (i = 0; i < ATA_MAX_QUEUE; i++) {
5987 struct ata_queued_cmd *qc;
5988
5989 if (!(done_mask & (1 << i)))
5990 continue;
5991
5992 if ((qc = ata_qc_from_tag(ap, i))) {
5993 if (finish_qc)
5994 finish_qc(qc);
5995 ata_qc_complete(qc);
5996 nr_done++;
5997 }
5998 }
5999
6000 return nr_done;
6001}
6002
1da177e4
LT
6003/**
6004 * ata_qc_issue - issue taskfile to device
6005 * @qc: command to issue to device
6006 *
6007 * Prepare an ATA command to submission to device.
6008 * This includes mapping the data into a DMA-able
6009 * area, filling in the S/G table, and finally
6010 * writing the taskfile to hardware, starting the command.
6011 *
6012 * LOCKING:
cca3974e 6013 * spin_lock_irqsave(host lock)
1da177e4 6014 */
8e0e694a 6015void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
6016{
6017 struct ata_port *ap = qc->ap;
9af5c9c9 6018 struct ata_link *link = qc->dev->link;
405e66b3 6019 u8 prot = qc->tf.protocol;
1da177e4 6020
dedaf2b0
TH
6021 /* Make sure only one non-NCQ command is outstanding. The
6022 * check is skipped for old EH because it reuses active qc to
6023 * request ATAPI sense.
6024 */
9af5c9c9 6025 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
dedaf2b0 6026
1973a023 6027 if (ata_is_ncq(prot)) {
9af5c9c9 6028 WARN_ON(link->sactive & (1 << qc->tag));
da917d69
TH
6029
6030 if (!link->sactive)
6031 ap->nr_active_links++;
9af5c9c9 6032 link->sactive |= 1 << qc->tag;
dedaf2b0 6033 } else {
9af5c9c9 6034 WARN_ON(link->sactive);
da917d69
TH
6035
6036 ap->nr_active_links++;
9af5c9c9 6037 link->active_tag = qc->tag;
dedaf2b0
TH
6038 }
6039
e4a70e76 6040 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 6041 ap->qc_active |= 1 << qc->tag;
e4a70e76 6042
f92a2636
TH
6043 /* We guarantee to LLDs that they will have at least one
6044 * non-zero sg if the command is a data command.
6045 */
ff2aeb1e 6046 BUG_ON(ata_is_data(prot) && (!qc->sg || !qc->n_elem || !qc->nbytes));
f92a2636 6047
405e66b3 6048 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
f92a2636 6049 (ap->flags & ATA_FLAG_PIO_DMA)))
001102d7
TH
6050 if (ata_sg_setup(qc))
6051 goto sg_err;
1da177e4 6052
054a5fba
TH
6053 /* if device is sleeping, schedule softreset and abort the link */
6054 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
6055 link->eh_info.action |= ATA_EH_SOFTRESET;
6056 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
6057 ata_link_abort(link);
6058 return;
6059 }
6060
1da177e4
LT
6061 ap->ops->qc_prep(qc);
6062
8e0e694a
TH
6063 qc->err_mask |= ap->ops->qc_issue(qc);
6064 if (unlikely(qc->err_mask))
6065 goto err;
6066 return;
1da177e4 6067
8e436af9 6068sg_err:
8e0e694a
TH
6069 qc->err_mask |= AC_ERR_SYSTEM;
6070err:
6071 ata_qc_complete(qc);
1da177e4
LT
6072}
6073
6074/**
6075 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
6076 * @qc: command to issue to device
6077 *
6078 * Using various libata functions and hooks, this function
6079 * starts an ATA command. ATA commands are grouped into
6080 * classes called "protocols", and issuing each type of protocol
6081 * is slightly different.
6082 *
0baab86b
EF
6083 * May be used as the qc_issue() entry in ata_port_operations.
6084 *
1da177e4 6085 * LOCKING:
cca3974e 6086 * spin_lock_irqsave(host lock)
1da177e4
LT
6087 *
6088 * RETURNS:
9a3d9eb0 6089 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
6090 */
6091
9a3d9eb0 6092unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
6093{
6094 struct ata_port *ap = qc->ap;
6095
e50362ec
AL
6096 /* Use polling pio if the LLD doesn't handle
6097 * interrupt driven pio and atapi CDB interrupt.
6098 */
6099 if (ap->flags & ATA_FLAG_PIO_POLLING) {
6100 switch (qc->tf.protocol) {
6101 case ATA_PROT_PIO:
e3472cbe 6102 case ATA_PROT_NODATA:
0dc36888
TH
6103 case ATAPI_PROT_PIO:
6104 case ATAPI_PROT_NODATA:
e50362ec
AL
6105 qc->tf.flags |= ATA_TFLAG_POLLING;
6106 break;
0dc36888 6107 case ATAPI_PROT_DMA:
e50362ec 6108 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 6109 /* see ata_dma_blacklisted() */
e50362ec
AL
6110 BUG();
6111 break;
6112 default:
6113 break;
6114 }
6115 }
6116
312f7da2 6117 /* select the device */
1da177e4
LT
6118 ata_dev_select(ap, qc->dev->devno, 1, 0);
6119
312f7da2 6120 /* start the command */
1da177e4
LT
6121 switch (qc->tf.protocol) {
6122 case ATA_PROT_NODATA:
312f7da2
AL
6123 if (qc->tf.flags & ATA_TFLAG_POLLING)
6124 ata_qc_set_polling(qc);
6125
e5338254 6126 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
6127 ap->hsm_task_state = HSM_ST_LAST;
6128
6129 if (qc->tf.flags & ATA_TFLAG_POLLING)
442eacc3 6130 ata_pio_queue_task(ap, qc, 0);
312f7da2 6131
1da177e4
LT
6132 break;
6133
6134 case ATA_PROT_DMA:
587005de 6135 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 6136
1da177e4
LT
6137 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
6138 ap->ops->bmdma_setup(qc); /* set up bmdma */
6139 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 6140 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
6141 break;
6142
312f7da2
AL
6143 case ATA_PROT_PIO:
6144 if (qc->tf.flags & ATA_TFLAG_POLLING)
6145 ata_qc_set_polling(qc);
1da177e4 6146
e5338254 6147 ata_tf_to_host(ap, &qc->tf);
312f7da2 6148
54f00389
AL
6149 if (qc->tf.flags & ATA_TFLAG_WRITE) {
6150 /* PIO data out protocol */
6151 ap->hsm_task_state = HSM_ST_FIRST;
442eacc3 6152 ata_pio_queue_task(ap, qc, 0);
54f00389
AL
6153
6154 /* always send first data block using
e27486db 6155 * the ata_pio_task() codepath.
54f00389 6156 */
312f7da2 6157 } else {
54f00389
AL
6158 /* PIO data in protocol */
6159 ap->hsm_task_state = HSM_ST;
6160
6161 if (qc->tf.flags & ATA_TFLAG_POLLING)
442eacc3 6162 ata_pio_queue_task(ap, qc, 0);
54f00389
AL
6163
6164 /* if polling, ata_pio_task() handles the rest.
6165 * otherwise, interrupt handler takes over from here.
6166 */
312f7da2
AL
6167 }
6168
1da177e4
LT
6169 break;
6170
0dc36888
TH
6171 case ATAPI_PROT_PIO:
6172 case ATAPI_PROT_NODATA:
312f7da2
AL
6173 if (qc->tf.flags & ATA_TFLAG_POLLING)
6174 ata_qc_set_polling(qc);
6175
e5338254 6176 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 6177
312f7da2
AL
6178 ap->hsm_task_state = HSM_ST_FIRST;
6179
6180 /* send cdb by polling if no cdb interrupt */
6181 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
6182 (qc->tf.flags & ATA_TFLAG_POLLING))
442eacc3 6183 ata_pio_queue_task(ap, qc, 0);
1da177e4
LT
6184 break;
6185
0dc36888 6186 case ATAPI_PROT_DMA:
587005de 6187 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 6188
1da177e4
LT
6189 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
6190 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
6191 ap->hsm_task_state = HSM_ST_FIRST;
6192
6193 /* send cdb by polling if no cdb interrupt */
6194 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
442eacc3 6195 ata_pio_queue_task(ap, qc, 0);
1da177e4
LT
6196 break;
6197
6198 default:
6199 WARN_ON(1);
9a3d9eb0 6200 return AC_ERR_SYSTEM;
1da177e4
LT
6201 }
6202
6203 return 0;
6204}
6205
1da177e4
LT
6206/**
6207 * ata_host_intr - Handle host interrupt for given (port, task)
6208 * @ap: Port on which interrupt arrived (possibly...)
6209 * @qc: Taskfile currently active in engine
6210 *
6211 * Handle host interrupt for given queued command. Currently,
6212 * only DMA interrupts are handled. All other commands are
6213 * handled via polling with interrupts disabled (nIEN bit).
6214 *
6215 * LOCKING:
cca3974e 6216 * spin_lock_irqsave(host lock)
1da177e4
LT
6217 *
6218 * RETURNS:
6219 * One if interrupt was handled, zero if not (shared irq).
6220 */
6221
2dcb407e
JG
6222inline unsigned int ata_host_intr(struct ata_port *ap,
6223 struct ata_queued_cmd *qc)
1da177e4 6224{
9af5c9c9 6225 struct ata_eh_info *ehi = &ap->link.eh_info;
312f7da2 6226 u8 status, host_stat = 0;
1da177e4 6227
312f7da2 6228 VPRINTK("ata%u: protocol %d task_state %d\n",
44877b4e 6229 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 6230
312f7da2
AL
6231 /* Check whether we are expecting interrupt in this state */
6232 switch (ap->hsm_task_state) {
6233 case HSM_ST_FIRST:
6912ccd5
AL
6234 /* Some pre-ATAPI-4 devices assert INTRQ
6235 * at this state when ready to receive CDB.
6236 */
1da177e4 6237
312f7da2 6238 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
405e66b3
TH
6239 * The flag was turned on only for atapi devices. No
6240 * need to check ata_is_atapi(qc->tf.protocol) again.
312f7da2
AL
6241 */
6242 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 6243 goto idle_irq;
1da177e4 6244 break;
312f7da2
AL
6245 case HSM_ST_LAST:
6246 if (qc->tf.protocol == ATA_PROT_DMA ||
0dc36888 6247 qc->tf.protocol == ATAPI_PROT_DMA) {
312f7da2
AL
6248 /* check status of DMA engine */
6249 host_stat = ap->ops->bmdma_status(ap);
44877b4e
TH
6250 VPRINTK("ata%u: host_stat 0x%X\n",
6251 ap->print_id, host_stat);
312f7da2
AL
6252
6253 /* if it's not our irq... */
6254 if (!(host_stat & ATA_DMA_INTR))
6255 goto idle_irq;
6256
6257 /* before we do anything else, clear DMA-Start bit */
6258 ap->ops->bmdma_stop(qc);
a4f16610
AL
6259
6260 if (unlikely(host_stat & ATA_DMA_ERR)) {
6261 /* error when transfering data to/from memory */
6262 qc->err_mask |= AC_ERR_HOST_BUS;
6263 ap->hsm_task_state = HSM_ST_ERR;
6264 }
312f7da2
AL
6265 }
6266 break;
6267 case HSM_ST:
6268 break;
1da177e4
LT
6269 default:
6270 goto idle_irq;
6271 }
6272
312f7da2
AL
6273 /* check altstatus */
6274 status = ata_altstatus(ap);
6275 if (status & ATA_BUSY)
6276 goto idle_irq;
1da177e4 6277
312f7da2
AL
6278 /* check main status, clearing INTRQ */
6279 status = ata_chk_status(ap);
6280 if (unlikely(status & ATA_BUSY))
6281 goto idle_irq;
1da177e4 6282
312f7da2
AL
6283 /* ack bmdma irq events */
6284 ap->ops->irq_clear(ap);
1da177e4 6285
bb5cb290 6286 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
6287
6288 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
0dc36888 6289 qc->tf.protocol == ATAPI_PROT_DMA))
ea54763f
TH
6290 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
6291
1da177e4
LT
6292 return 1; /* irq handled */
6293
6294idle_irq:
6295 ap->stats.idle_irq++;
6296
6297#ifdef ATA_IRQ_TRAP
6298 if ((ap->stats.idle_irq % 1000) == 0) {
6d32d30f
JG
6299 ata_chk_status(ap);
6300 ap->ops->irq_clear(ap);
f15a1daf 6301 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 6302 return 1;
1da177e4
LT
6303 }
6304#endif
6305 return 0; /* irq not handled */
6306}
6307
6308/**
6309 * ata_interrupt - Default ATA host interrupt handler
0cba632b 6310 * @irq: irq line (unused)
cca3974e 6311 * @dev_instance: pointer to our ata_host information structure
1da177e4 6312 *
0cba632b
JG
6313 * Default interrupt handler for PCI IDE devices. Calls
6314 * ata_host_intr() for each port that is not disabled.
6315 *
1da177e4 6316 * LOCKING:
cca3974e 6317 * Obtains host lock during operation.
1da177e4
LT
6318 *
6319 * RETURNS:
0cba632b 6320 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
6321 */
6322
2dcb407e 6323irqreturn_t ata_interrupt(int irq, void *dev_instance)
1da177e4 6324{
cca3974e 6325 struct ata_host *host = dev_instance;
1da177e4
LT
6326 unsigned int i;
6327 unsigned int handled = 0;
6328 unsigned long flags;
6329
6330 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 6331 spin_lock_irqsave(&host->lock, flags);
1da177e4 6332
cca3974e 6333 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
6334 struct ata_port *ap;
6335
cca3974e 6336 ap = host->ports[i];
c1389503 6337 if (ap &&
029f5468 6338 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
6339 struct ata_queued_cmd *qc;
6340
9af5c9c9 6341 qc = ata_qc_from_tag(ap, ap->link.active_tag);
312f7da2 6342 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 6343 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
6344 handled |= ata_host_intr(ap, qc);
6345 }
6346 }
6347
cca3974e 6348 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
6349
6350 return IRQ_RETVAL(handled);
6351}
6352
34bf2170
TH
6353/**
6354 * sata_scr_valid - test whether SCRs are accessible
936fd732 6355 * @link: ATA link to test SCR accessibility for
34bf2170 6356 *
936fd732 6357 * Test whether SCRs are accessible for @link.
34bf2170
TH
6358 *
6359 * LOCKING:
6360 * None.
6361 *
6362 * RETURNS:
6363 * 1 if SCRs are accessible, 0 otherwise.
6364 */
936fd732 6365int sata_scr_valid(struct ata_link *link)
34bf2170 6366{
936fd732
TH
6367 struct ata_port *ap = link->ap;
6368
a16abc0b 6369 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
34bf2170
TH
6370}
6371
6372/**
6373 * sata_scr_read - read SCR register of the specified port
936fd732 6374 * @link: ATA link to read SCR for
34bf2170
TH
6375 * @reg: SCR to read
6376 * @val: Place to store read value
6377 *
936fd732 6378 * Read SCR register @reg of @link into *@val. This function is
633273a3
TH
6379 * guaranteed to succeed if @link is ap->link, the cable type of
6380 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
6381 *
6382 * LOCKING:
633273a3 6383 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6384 *
6385 * RETURNS:
6386 * 0 on success, negative errno on failure.
6387 */
936fd732 6388int sata_scr_read(struct ata_link *link, int reg, u32 *val)
34bf2170 6389{
633273a3
TH
6390 if (ata_is_host_link(link)) {
6391 struct ata_port *ap = link->ap;
936fd732 6392
633273a3
TH
6393 if (sata_scr_valid(link))
6394 return ap->ops->scr_read(ap, reg, val);
6395 return -EOPNOTSUPP;
6396 }
6397
6398 return sata_pmp_scr_read(link, reg, val);
34bf2170
TH
6399}
6400
6401/**
6402 * sata_scr_write - write SCR register of the specified port
936fd732 6403 * @link: ATA link to write SCR for
34bf2170
TH
6404 * @reg: SCR to write
6405 * @val: value to write
6406 *
936fd732 6407 * Write @val to SCR register @reg of @link. This function is
633273a3
TH
6408 * guaranteed to succeed if @link is ap->link, the cable type of
6409 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
6410 *
6411 * LOCKING:
633273a3 6412 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6413 *
6414 * RETURNS:
6415 * 0 on success, negative errno on failure.
6416 */
936fd732 6417int sata_scr_write(struct ata_link *link, int reg, u32 val)
34bf2170 6418{
633273a3
TH
6419 if (ata_is_host_link(link)) {
6420 struct ata_port *ap = link->ap;
6421
6422 if (sata_scr_valid(link))
6423 return ap->ops->scr_write(ap, reg, val);
6424 return -EOPNOTSUPP;
6425 }
936fd732 6426
633273a3 6427 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
6428}
6429
6430/**
6431 * sata_scr_write_flush - write SCR register of the specified port and flush
936fd732 6432 * @link: ATA link to write SCR for
34bf2170
TH
6433 * @reg: SCR to write
6434 * @val: value to write
6435 *
6436 * This function is identical to sata_scr_write() except that this
6437 * function performs flush after writing to the register.
6438 *
6439 * LOCKING:
633273a3 6440 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6441 *
6442 * RETURNS:
6443 * 0 on success, negative errno on failure.
6444 */
936fd732 6445int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
34bf2170 6446{
633273a3
TH
6447 if (ata_is_host_link(link)) {
6448 struct ata_port *ap = link->ap;
6449 int rc;
da3dbb17 6450
633273a3
TH
6451 if (sata_scr_valid(link)) {
6452 rc = ap->ops->scr_write(ap, reg, val);
6453 if (rc == 0)
6454 rc = ap->ops->scr_read(ap, reg, &val);
6455 return rc;
6456 }
6457 return -EOPNOTSUPP;
34bf2170 6458 }
633273a3
TH
6459
6460 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
6461}
6462
6463/**
936fd732
TH
6464 * ata_link_online - test whether the given link is online
6465 * @link: ATA link to test
34bf2170 6466 *
936fd732
TH
6467 * Test whether @link is online. Note that this function returns
6468 * 0 if online status of @link cannot be obtained, so
6469 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
6470 *
6471 * LOCKING:
6472 * None.
6473 *
6474 * RETURNS:
6475 * 1 if the port online status is available and online.
6476 */
936fd732 6477int ata_link_online(struct ata_link *link)
34bf2170
TH
6478{
6479 u32 sstatus;
6480
936fd732
TH
6481 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6482 (sstatus & 0xf) == 0x3)
34bf2170
TH
6483 return 1;
6484 return 0;
6485}
6486
6487/**
936fd732
TH
6488 * ata_link_offline - test whether the given link is offline
6489 * @link: ATA link to test
34bf2170 6490 *
936fd732
TH
6491 * Test whether @link is offline. Note that this function
6492 * returns 0 if offline status of @link cannot be obtained, so
6493 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
6494 *
6495 * LOCKING:
6496 * None.
6497 *
6498 * RETURNS:
6499 * 1 if the port offline status is available and offline.
6500 */
936fd732 6501int ata_link_offline(struct ata_link *link)
34bf2170
TH
6502{
6503 u32 sstatus;
6504
936fd732
TH
6505 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6506 (sstatus & 0xf) != 0x3)
34bf2170
TH
6507 return 1;
6508 return 0;
6509}
0baab86b 6510
77b08fb5 6511int ata_flush_cache(struct ata_device *dev)
9b847548 6512{
977e6b9f 6513 unsigned int err_mask;
9b847548
JA
6514 u8 cmd;
6515
6516 if (!ata_try_flush_cache(dev))
6517 return 0;
6518
6fc49adb 6519 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
6520 cmd = ATA_CMD_FLUSH_EXT;
6521 else
6522 cmd = ATA_CMD_FLUSH;
6523
4f34337b
AC
6524 /* This is wrong. On a failed flush we get back the LBA of the lost
6525 sector and we should (assuming it wasn't aborted as unknown) issue
2dcb407e 6526 a further flush command to continue the writeback until it
4f34337b 6527 does not error */
977e6b9f
TH
6528 err_mask = ata_do_simple_cmd(dev, cmd);
6529 if (err_mask) {
6530 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
6531 return -EIO;
6532 }
6533
6534 return 0;
9b847548
JA
6535}
6536
6ffa01d8 6537#ifdef CONFIG_PM
cca3974e
JG
6538static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
6539 unsigned int action, unsigned int ehi_flags,
6540 int wait)
500530f6
TH
6541{
6542 unsigned long flags;
6543 int i, rc;
6544
cca3974e
JG
6545 for (i = 0; i < host->n_ports; i++) {
6546 struct ata_port *ap = host->ports[i];
e3667ebf 6547 struct ata_link *link;
500530f6
TH
6548
6549 /* Previous resume operation might still be in
6550 * progress. Wait for PM_PENDING to clear.
6551 */
6552 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
6553 ata_port_wait_eh(ap);
6554 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6555 }
6556
6557 /* request PM ops to EH */
6558 spin_lock_irqsave(ap->lock, flags);
6559
6560 ap->pm_mesg = mesg;
6561 if (wait) {
6562 rc = 0;
6563 ap->pm_result = &rc;
6564 }
6565
6566 ap->pflags |= ATA_PFLAG_PM_PENDING;
e3667ebf
TH
6567 __ata_port_for_each_link(link, ap) {
6568 link->eh_info.action |= action;
6569 link->eh_info.flags |= ehi_flags;
6570 }
500530f6
TH
6571
6572 ata_port_schedule_eh(ap);
6573
6574 spin_unlock_irqrestore(ap->lock, flags);
6575
6576 /* wait and check result */
6577 if (wait) {
6578 ata_port_wait_eh(ap);
6579 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6580 if (rc)
6581 return rc;
6582 }
6583 }
6584
6585 return 0;
6586}
6587
6588/**
cca3974e
JG
6589 * ata_host_suspend - suspend host
6590 * @host: host to suspend
500530f6
TH
6591 * @mesg: PM message
6592 *
cca3974e 6593 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
6594 * function requests EH to perform PM operations and waits for EH
6595 * to finish.
6596 *
6597 * LOCKING:
6598 * Kernel thread context (may sleep).
6599 *
6600 * RETURNS:
6601 * 0 on success, -errno on failure.
6602 */
cca3974e 6603int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6 6604{
9666f400 6605 int rc;
500530f6 6606
ca77329f
KCA
6607 /*
6608 * disable link pm on all ports before requesting
6609 * any pm activity
6610 */
6611 ata_lpm_enable(host);
6612
cca3974e 6613 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
72ad6ec4
JG
6614 if (rc == 0)
6615 host->dev->power.power_state = mesg;
500530f6
TH
6616 return rc;
6617}
6618
6619/**
cca3974e
JG
6620 * ata_host_resume - resume host
6621 * @host: host to resume
500530f6 6622 *
cca3974e 6623 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
6624 * function requests EH to perform PM operations and returns.
6625 * Note that all resume operations are performed parallely.
6626 *
6627 * LOCKING:
6628 * Kernel thread context (may sleep).
6629 */
cca3974e 6630void ata_host_resume(struct ata_host *host)
500530f6 6631{
cca3974e
JG
6632 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
6633 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
72ad6ec4 6634 host->dev->power.power_state = PMSG_ON;
ca77329f
KCA
6635
6636 /* reenable link pm */
6637 ata_lpm_disable(host);
500530f6 6638}
6ffa01d8 6639#endif
500530f6 6640
c893a3ae
RD
6641/**
6642 * ata_port_start - Set port up for dma.
6643 * @ap: Port to initialize
6644 *
6645 * Called just after data structures for each port are
6646 * initialized. Allocates space for PRD table.
6647 *
6648 * May be used as the port_start() entry in ata_port_operations.
6649 *
6650 * LOCKING:
6651 * Inherited from caller.
6652 */
f0d36efd 6653int ata_port_start(struct ata_port *ap)
1da177e4 6654{
2f1f610b 6655 struct device *dev = ap->dev;
1da177e4 6656
f0d36efd
TH
6657 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6658 GFP_KERNEL);
1da177e4
LT
6659 if (!ap->prd)
6660 return -ENOMEM;
6661
1da177e4
LT
6662 return 0;
6663}
6664
3ef3b43d
TH
6665/**
6666 * ata_dev_init - Initialize an ata_device structure
6667 * @dev: Device structure to initialize
6668 *
6669 * Initialize @dev in preparation for probing.
6670 *
6671 * LOCKING:
6672 * Inherited from caller.
6673 */
6674void ata_dev_init(struct ata_device *dev)
6675{
9af5c9c9
TH
6676 struct ata_link *link = dev->link;
6677 struct ata_port *ap = link->ap;
72fa4b74
TH
6678 unsigned long flags;
6679
5a04bf4b 6680 /* SATA spd limit is bound to the first device */
9af5c9c9
TH
6681 link->sata_spd_limit = link->hw_sata_spd_limit;
6682 link->sata_spd = 0;
5a04bf4b 6683
72fa4b74
TH
6684 /* High bits of dev->flags are used to record warm plug
6685 * requests which occur asynchronously. Synchronize using
cca3974e 6686 * host lock.
72fa4b74 6687 */
ba6a1308 6688 spin_lock_irqsave(ap->lock, flags);
72fa4b74 6689 dev->flags &= ~ATA_DFLAG_INIT_MASK;
3dcc323f 6690 dev->horkage = 0;
ba6a1308 6691 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 6692
72fa4b74
TH
6693 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
6694 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
6695 dev->pio_mask = UINT_MAX;
6696 dev->mwdma_mask = UINT_MAX;
6697 dev->udma_mask = UINT_MAX;
6698}
6699
4fb37a25
TH
6700/**
6701 * ata_link_init - Initialize an ata_link structure
6702 * @ap: ATA port link is attached to
6703 * @link: Link structure to initialize
8989805d 6704 * @pmp: Port multiplier port number
4fb37a25
TH
6705 *
6706 * Initialize @link.
6707 *
6708 * LOCKING:
6709 * Kernel thread context (may sleep)
6710 */
fb7fd614 6711void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
4fb37a25
TH
6712{
6713 int i;
6714
6715 /* clear everything except for devices */
6716 memset(link, 0, offsetof(struct ata_link, device[0]));
6717
6718 link->ap = ap;
8989805d 6719 link->pmp = pmp;
4fb37a25
TH
6720 link->active_tag = ATA_TAG_POISON;
6721 link->hw_sata_spd_limit = UINT_MAX;
6722
6723 /* can't use iterator, ap isn't initialized yet */
6724 for (i = 0; i < ATA_MAX_DEVICES; i++) {
6725 struct ata_device *dev = &link->device[i];
6726
6727 dev->link = link;
6728 dev->devno = dev - link->device;
6729 ata_dev_init(dev);
6730 }
6731}
6732
6733/**
6734 * sata_link_init_spd - Initialize link->sata_spd_limit
6735 * @link: Link to configure sata_spd_limit for
6736 *
6737 * Initialize @link->[hw_]sata_spd_limit to the currently
6738 * configured value.
6739 *
6740 * LOCKING:
6741 * Kernel thread context (may sleep).
6742 *
6743 * RETURNS:
6744 * 0 on success, -errno on failure.
6745 */
fb7fd614 6746int sata_link_init_spd(struct ata_link *link)
4fb37a25 6747{
33267325
TH
6748 u32 scontrol;
6749 u8 spd;
4fb37a25
TH
6750 int rc;
6751
6752 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
6753 if (rc)
6754 return rc;
6755
6756 spd = (scontrol >> 4) & 0xf;
6757 if (spd)
6758 link->hw_sata_spd_limit &= (1 << spd) - 1;
6759
33267325
TH
6760 ata_force_spd_limit(link);
6761
4fb37a25
TH
6762 link->sata_spd_limit = link->hw_sata_spd_limit;
6763
6764 return 0;
6765}
6766
1da177e4 6767/**
f3187195
TH
6768 * ata_port_alloc - allocate and initialize basic ATA port resources
6769 * @host: ATA host this allocated port belongs to
1da177e4 6770 *
f3187195
TH
6771 * Allocate and initialize basic ATA port resources.
6772 *
6773 * RETURNS:
6774 * Allocate ATA port on success, NULL on failure.
0cba632b 6775 *
1da177e4 6776 * LOCKING:
f3187195 6777 * Inherited from calling layer (may sleep).
1da177e4 6778 */
f3187195 6779struct ata_port *ata_port_alloc(struct ata_host *host)
1da177e4 6780{
f3187195 6781 struct ata_port *ap;
1da177e4 6782
f3187195
TH
6783 DPRINTK("ENTER\n");
6784
6785 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6786 if (!ap)
6787 return NULL;
6788
f4d6d004 6789 ap->pflags |= ATA_PFLAG_INITIALIZING;
cca3974e 6790 ap->lock = &host->lock;
198e0fed 6791 ap->flags = ATA_FLAG_DISABLED;
f3187195 6792 ap->print_id = -1;
1da177e4 6793 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 6794 ap->host = host;
f3187195 6795 ap->dev = host->dev;
1da177e4 6796 ap->last_ctl = 0xFF;
bd5d825c
BP
6797
6798#if defined(ATA_VERBOSE_DEBUG)
6799 /* turn on all debugging levels */
6800 ap->msg_enable = 0x00FF;
6801#elif defined(ATA_DEBUG)
6802 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 6803#else
0dd4b21f 6804 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 6805#endif
1da177e4 6806
442eacc3 6807 INIT_DELAYED_WORK(&ap->port_task, ata_pio_task);
65f27f38
DH
6808 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6809 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 6810 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 6811 init_waitqueue_head(&ap->eh_wait_q);
5ddf24c5
TH
6812 init_timer_deferrable(&ap->fastdrain_timer);
6813 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
6814 ap->fastdrain_timer.data = (unsigned long)ap;
1da177e4 6815
838df628 6816 ap->cbl = ATA_CBL_NONE;
838df628 6817
8989805d 6818 ata_link_init(ap, &ap->link, 0);
1da177e4
LT
6819
6820#ifdef ATA_IRQ_TRAP
6821 ap->stats.unhandled_irq = 1;
6822 ap->stats.idle_irq = 1;
6823#endif
1da177e4 6824 return ap;
1da177e4
LT
6825}
6826
f0d36efd
TH
6827static void ata_host_release(struct device *gendev, void *res)
6828{
6829 struct ata_host *host = dev_get_drvdata(gendev);
6830 int i;
6831
1aa506e4
TH
6832 for (i = 0; i < host->n_ports; i++) {
6833 struct ata_port *ap = host->ports[i];
6834
4911487a
TH
6835 if (!ap)
6836 continue;
6837
6838 if (ap->scsi_host)
1aa506e4
TH
6839 scsi_host_put(ap->scsi_host);
6840
633273a3 6841 kfree(ap->pmp_link);
4911487a 6842 kfree(ap);
1aa506e4
TH
6843 host->ports[i] = NULL;
6844 }
6845
1aa56cca 6846 dev_set_drvdata(gendev, NULL);
f0d36efd
TH
6847}
6848
f3187195
TH
6849/**
6850 * ata_host_alloc - allocate and init basic ATA host resources
6851 * @dev: generic device this host is associated with
6852 * @max_ports: maximum number of ATA ports associated with this host
6853 *
6854 * Allocate and initialize basic ATA host resources. LLD calls
6855 * this function to allocate a host, initializes it fully and
6856 * attaches it using ata_host_register().
6857 *
6858 * @max_ports ports are allocated and host->n_ports is
6859 * initialized to @max_ports. The caller is allowed to decrease
6860 * host->n_ports before calling ata_host_register(). The unused
6861 * ports will be automatically freed on registration.
6862 *
6863 * RETURNS:
6864 * Allocate ATA host on success, NULL on failure.
6865 *
6866 * LOCKING:
6867 * Inherited from calling layer (may sleep).
6868 */
6869struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6870{
6871 struct ata_host *host;
6872 size_t sz;
6873 int i;
6874
6875 DPRINTK("ENTER\n");
6876
6877 if (!devres_open_group(dev, NULL, GFP_KERNEL))
6878 return NULL;
6879
6880 /* alloc a container for our list of ATA ports (buses) */
6881 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6882 /* alloc a container for our list of ATA ports (buses) */
6883 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6884 if (!host)
6885 goto err_out;
6886
6887 devres_add(dev, host);
6888 dev_set_drvdata(dev, host);
6889
6890 spin_lock_init(&host->lock);
6891 host->dev = dev;
6892 host->n_ports = max_ports;
6893
6894 /* allocate ports bound to this host */
6895 for (i = 0; i < max_ports; i++) {
6896 struct ata_port *ap;
6897
6898 ap = ata_port_alloc(host);
6899 if (!ap)
6900 goto err_out;
6901
6902 ap->port_no = i;
6903 host->ports[i] = ap;
6904 }
6905
6906 devres_remove_group(dev, NULL);
6907 return host;
6908
6909 err_out:
6910 devres_release_group(dev, NULL);
6911 return NULL;
6912}
6913
f5cda257
TH
6914/**
6915 * ata_host_alloc_pinfo - alloc host and init with port_info array
6916 * @dev: generic device this host is associated with
6917 * @ppi: array of ATA port_info to initialize host with
6918 * @n_ports: number of ATA ports attached to this host
6919 *
6920 * Allocate ATA host and initialize with info from @ppi. If NULL
6921 * terminated, @ppi may contain fewer entries than @n_ports. The
6922 * last entry will be used for the remaining ports.
6923 *
6924 * RETURNS:
6925 * Allocate ATA host on success, NULL on failure.
6926 *
6927 * LOCKING:
6928 * Inherited from calling layer (may sleep).
6929 */
6930struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6931 const struct ata_port_info * const * ppi,
6932 int n_ports)
6933{
6934 const struct ata_port_info *pi;
6935 struct ata_host *host;
6936 int i, j;
6937
6938 host = ata_host_alloc(dev, n_ports);
6939 if (!host)
6940 return NULL;
6941
6942 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6943 struct ata_port *ap = host->ports[i];
6944
6945 if (ppi[j])
6946 pi = ppi[j++];
6947
6948 ap->pio_mask = pi->pio_mask;
6949 ap->mwdma_mask = pi->mwdma_mask;
6950 ap->udma_mask = pi->udma_mask;
6951 ap->flags |= pi->flags;
0c88758b 6952 ap->link.flags |= pi->link_flags;
f5cda257
TH
6953 ap->ops = pi->port_ops;
6954
6955 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6956 host->ops = pi->port_ops;
6957 if (!host->private_data && pi->private_data)
6958 host->private_data = pi->private_data;
6959 }
6960
6961 return host;
6962}
6963
32ebbc0c
TH
6964static void ata_host_stop(struct device *gendev, void *res)
6965{
6966 struct ata_host *host = dev_get_drvdata(gendev);
6967 int i;
6968
6969 WARN_ON(!(host->flags & ATA_HOST_STARTED));
6970
6971 for (i = 0; i < host->n_ports; i++) {
6972 struct ata_port *ap = host->ports[i];
6973
6974 if (ap->ops->port_stop)
6975 ap->ops->port_stop(ap);
6976 }
6977
6978 if (host->ops->host_stop)
6979 host->ops->host_stop(host);
6980}
6981
ecef7253
TH
6982/**
6983 * ata_host_start - start and freeze ports of an ATA host
6984 * @host: ATA host to start ports for
6985 *
6986 * Start and then freeze ports of @host. Started status is
6987 * recorded in host->flags, so this function can be called
6988 * multiple times. Ports are guaranteed to get started only
f3187195
TH
6989 * once. If host->ops isn't initialized yet, its set to the
6990 * first non-dummy port ops.
ecef7253
TH
6991 *
6992 * LOCKING:
6993 * Inherited from calling layer (may sleep).
6994 *
6995 * RETURNS:
6996 * 0 if all ports are started successfully, -errno otherwise.
6997 */
6998int ata_host_start(struct ata_host *host)
6999{
32ebbc0c
TH
7000 int have_stop = 0;
7001 void *start_dr = NULL;
ecef7253
TH
7002 int i, rc;
7003
7004 if (host->flags & ATA_HOST_STARTED)
7005 return 0;
7006
7007 for (i = 0; i < host->n_ports; i++) {
7008 struct ata_port *ap = host->ports[i];
7009
f3187195
TH
7010 if (!host->ops && !ata_port_is_dummy(ap))
7011 host->ops = ap->ops;
7012
32ebbc0c
TH
7013 if (ap->ops->port_stop)
7014 have_stop = 1;
7015 }
7016
7017 if (host->ops->host_stop)
7018 have_stop = 1;
7019
7020 if (have_stop) {
7021 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
7022 if (!start_dr)
7023 return -ENOMEM;
7024 }
7025
7026 for (i = 0; i < host->n_ports; i++) {
7027 struct ata_port *ap = host->ports[i];
7028
ecef7253
TH
7029 if (ap->ops->port_start) {
7030 rc = ap->ops->port_start(ap);
7031 if (rc) {
0f9fe9b7 7032 if (rc != -ENODEV)
0f757743
AM
7033 dev_printk(KERN_ERR, host->dev,
7034 "failed to start port %d "
7035 "(errno=%d)\n", i, rc);
ecef7253
TH
7036 goto err_out;
7037 }
7038 }
ecef7253
TH
7039 ata_eh_freeze_port(ap);
7040 }
7041
32ebbc0c
TH
7042 if (start_dr)
7043 devres_add(host->dev, start_dr);
ecef7253
TH
7044 host->flags |= ATA_HOST_STARTED;
7045 return 0;
7046
7047 err_out:
7048 while (--i >= 0) {
7049 struct ata_port *ap = host->ports[i];
7050
7051 if (ap->ops->port_stop)
7052 ap->ops->port_stop(ap);
7053 }
32ebbc0c 7054 devres_free(start_dr);
ecef7253
TH
7055 return rc;
7056}
7057
b03732f0 7058/**
cca3974e
JG
7059 * ata_sas_host_init - Initialize a host struct
7060 * @host: host to initialize
7061 * @dev: device host is attached to
7062 * @flags: host flags
7063 * @ops: port_ops
b03732f0
BK
7064 *
7065 * LOCKING:
7066 * PCI/etc. bus probe sem.
7067 *
7068 */
f3187195 7069/* KILLME - the only user left is ipr */
cca3974e
JG
7070void ata_host_init(struct ata_host *host, struct device *dev,
7071 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 7072{
cca3974e
JG
7073 spin_lock_init(&host->lock);
7074 host->dev = dev;
7075 host->flags = flags;
7076 host->ops = ops;
b03732f0
BK
7077}
7078
f3187195
TH
7079/**
7080 * ata_host_register - register initialized ATA host
7081 * @host: ATA host to register
7082 * @sht: template for SCSI host
7083 *
7084 * Register initialized ATA host. @host is allocated using
7085 * ata_host_alloc() and fully initialized by LLD. This function
7086 * starts ports, registers @host with ATA and SCSI layers and
7087 * probe registered devices.
7088 *
7089 * LOCKING:
7090 * Inherited from calling layer (may sleep).
7091 *
7092 * RETURNS:
7093 * 0 on success, -errno otherwise.
7094 */
7095int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
7096{
7097 int i, rc;
7098
7099 /* host must have been started */
7100 if (!(host->flags & ATA_HOST_STARTED)) {
7101 dev_printk(KERN_ERR, host->dev,
7102 "BUG: trying to register unstarted host\n");
7103 WARN_ON(1);
7104 return -EINVAL;
7105 }
7106
7107 /* Blow away unused ports. This happens when LLD can't
7108 * determine the exact number of ports to allocate at
7109 * allocation time.
7110 */
7111 for (i = host->n_ports; host->ports[i]; i++)
7112 kfree(host->ports[i]);
7113
7114 /* give ports names and add SCSI hosts */
7115 for (i = 0; i < host->n_ports; i++)
7116 host->ports[i]->print_id = ata_print_id++;
7117
7118 rc = ata_scsi_add_hosts(host, sht);
7119 if (rc)
7120 return rc;
7121
fafbae87
TH
7122 /* associate with ACPI nodes */
7123 ata_acpi_associate(host);
7124
f3187195
TH
7125 /* set cable, sata_spd_limit and report */
7126 for (i = 0; i < host->n_ports; i++) {
7127 struct ata_port *ap = host->ports[i];
f3187195
TH
7128 unsigned long xfer_mask;
7129
7130 /* set SATA cable type if still unset */
7131 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
7132 ap->cbl = ATA_CBL_SATA;
7133
7134 /* init sata_spd_limit to the current value */
4fb37a25 7135 sata_link_init_spd(&ap->link);
f3187195 7136
cbcdd875 7137 /* print per-port info to dmesg */
f3187195
TH
7138 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
7139 ap->udma_mask);
7140
abf6e8ed 7141 if (!ata_port_is_dummy(ap)) {
cbcdd875
TH
7142 ata_port_printk(ap, KERN_INFO,
7143 "%cATA max %s %s\n",
a16abc0b 7144 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
f3187195 7145 ata_mode_string(xfer_mask),
cbcdd875 7146 ap->link.eh_info.desc);
abf6e8ed
TH
7147 ata_ehi_clear_desc(&ap->link.eh_info);
7148 } else
f3187195
TH
7149 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
7150 }
7151
7152 /* perform each probe synchronously */
7153 DPRINTK("probe begin\n");
7154 for (i = 0; i < host->n_ports; i++) {
7155 struct ata_port *ap = host->ports[i];
f3187195
TH
7156
7157 /* probe */
7158 if (ap->ops->error_handler) {
9af5c9c9 7159 struct ata_eh_info *ehi = &ap->link.eh_info;
f3187195
TH
7160 unsigned long flags;
7161
7162 ata_port_probe(ap);
7163
7164 /* kick EH for boot probing */
7165 spin_lock_irqsave(ap->lock, flags);
7166
f58229f8
TH
7167 ehi->probe_mask =
7168 (1 << ata_link_max_devices(&ap->link)) - 1;
f3187195
TH
7169 ehi->action |= ATA_EH_SOFTRESET;
7170 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
7171
f4d6d004 7172 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
f3187195
TH
7173 ap->pflags |= ATA_PFLAG_LOADING;
7174 ata_port_schedule_eh(ap);
7175
7176 spin_unlock_irqrestore(ap->lock, flags);
7177
7178 /* wait for EH to finish */
7179 ata_port_wait_eh(ap);
7180 } else {
7181 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
7182 rc = ata_bus_probe(ap);
7183 DPRINTK("ata%u: bus probe end\n", ap->print_id);
7184
7185 if (rc) {
7186 /* FIXME: do something useful here?
7187 * Current libata behavior will
7188 * tear down everything when
7189 * the module is removed
7190 * or the h/w is unplugged.
7191 */
7192 }
7193 }
7194 }
7195
7196 /* probes are done, now scan each port's disk(s) */
7197 DPRINTK("host probe begin\n");
7198 for (i = 0; i < host->n_ports; i++) {
7199 struct ata_port *ap = host->ports[i];
7200
1ae46317 7201 ata_scsi_scan_host(ap, 1);
ca77329f 7202 ata_lpm_schedule(ap, ap->pm_policy);
f3187195
TH
7203 }
7204
7205 return 0;
7206}
7207
f5cda257
TH
7208/**
7209 * ata_host_activate - start host, request IRQ and register it
7210 * @host: target ATA host
7211 * @irq: IRQ to request
7212 * @irq_handler: irq_handler used when requesting IRQ
7213 * @irq_flags: irq_flags used when requesting IRQ
7214 * @sht: scsi_host_template to use when registering the host
7215 *
7216 * After allocating an ATA host and initializing it, most libata
7217 * LLDs perform three steps to activate the host - start host,
7218 * request IRQ and register it. This helper takes necessasry
7219 * arguments and performs the three steps in one go.
7220 *
3d46b2e2
PM
7221 * An invalid IRQ skips the IRQ registration and expects the host to
7222 * have set polling mode on the port. In this case, @irq_handler
7223 * should be NULL.
7224 *
f5cda257
TH
7225 * LOCKING:
7226 * Inherited from calling layer (may sleep).
7227 *
7228 * RETURNS:
7229 * 0 on success, -errno otherwise.
7230 */
7231int ata_host_activate(struct ata_host *host, int irq,
7232 irq_handler_t irq_handler, unsigned long irq_flags,
7233 struct scsi_host_template *sht)
7234{
cbcdd875 7235 int i, rc;
f5cda257
TH
7236
7237 rc = ata_host_start(host);
7238 if (rc)
7239 return rc;
7240
3d46b2e2
PM
7241 /* Special case for polling mode */
7242 if (!irq) {
7243 WARN_ON(irq_handler);
7244 return ata_host_register(host, sht);
7245 }
7246
f5cda257
TH
7247 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
7248 dev_driver_string(host->dev), host);
7249 if (rc)
7250 return rc;
7251
cbcdd875
TH
7252 for (i = 0; i < host->n_ports; i++)
7253 ata_port_desc(host->ports[i], "irq %d", irq);
4031826b 7254
f5cda257
TH
7255 rc = ata_host_register(host, sht);
7256 /* if failed, just free the IRQ and leave ports alone */
7257 if (rc)
7258 devm_free_irq(host->dev, irq, host);
7259
7260 return rc;
7261}
7262
720ba126
TH
7263/**
7264 * ata_port_detach - Detach ATA port in prepration of device removal
7265 * @ap: ATA port to be detached
7266 *
7267 * Detach all ATA devices and the associated SCSI devices of @ap;
7268 * then, remove the associated SCSI host. @ap is guaranteed to
7269 * be quiescent on return from this function.
7270 *
7271 * LOCKING:
7272 * Kernel thread context (may sleep).
7273 */
741b7763 7274static void ata_port_detach(struct ata_port *ap)
720ba126
TH
7275{
7276 unsigned long flags;
41bda9c9 7277 struct ata_link *link;
f58229f8 7278 struct ata_device *dev;
720ba126
TH
7279
7280 if (!ap->ops->error_handler)
c3cf30a9 7281 goto skip_eh;
720ba126
TH
7282
7283 /* tell EH we're leaving & flush EH */
ba6a1308 7284 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 7285 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 7286 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
7287
7288 ata_port_wait_eh(ap);
7289
7f9ad9b8
TH
7290 /* EH is now guaranteed to see UNLOADING - EH context belongs
7291 * to us. Disable all existing devices.
720ba126 7292 */
41bda9c9
TH
7293 ata_port_for_each_link(link, ap) {
7294 ata_link_for_each_dev(dev, link)
7295 ata_dev_disable(dev);
7296 }
720ba126 7297
720ba126
TH
7298 /* Final freeze & EH. All in-flight commands are aborted. EH
7299 * will be skipped and retrials will be terminated with bad
7300 * target.
7301 */
ba6a1308 7302 spin_lock_irqsave(ap->lock, flags);
720ba126 7303 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 7304 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
7305
7306 ata_port_wait_eh(ap);
45a66c1c 7307 cancel_rearming_delayed_work(&ap->hotplug_task);
720ba126 7308
c3cf30a9 7309 skip_eh:
720ba126 7310 /* remove the associated SCSI host */
cca3974e 7311 scsi_remove_host(ap->scsi_host);
720ba126
TH
7312}
7313
0529c159
TH
7314/**
7315 * ata_host_detach - Detach all ports of an ATA host
7316 * @host: Host to detach
7317 *
7318 * Detach all ports of @host.
7319 *
7320 * LOCKING:
7321 * Kernel thread context (may sleep).
7322 */
7323void ata_host_detach(struct ata_host *host)
7324{
7325 int i;
7326
7327 for (i = 0; i < host->n_ports; i++)
7328 ata_port_detach(host->ports[i]);
562f0c2d
TH
7329
7330 /* the host is dead now, dissociate ACPI */
7331 ata_acpi_dissociate(host);
0529c159
TH
7332}
7333
1da177e4
LT
7334/**
7335 * ata_std_ports - initialize ioaddr with standard port offsets.
7336 * @ioaddr: IO address structure to be initialized
0baab86b
EF
7337 *
7338 * Utility function which initializes data_addr, error_addr,
7339 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
7340 * device_addr, status_addr, and command_addr to standard offsets
7341 * relative to cmd_addr.
7342 *
7343 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 7344 */
0baab86b 7345
1da177e4
LT
7346void ata_std_ports(struct ata_ioports *ioaddr)
7347{
7348 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
7349 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
7350 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
7351 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
7352 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
7353 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
7354 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
7355 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
7356 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
7357 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
7358}
7359
0baab86b 7360
374b1873
JG
7361#ifdef CONFIG_PCI
7362
1da177e4
LT
7363/**
7364 * ata_pci_remove_one - PCI layer callback for device removal
7365 * @pdev: PCI device that was removed
7366 *
b878ca5d
TH
7367 * PCI layer indicates to libata via this hook that hot-unplug or
7368 * module unload event has occurred. Detach all ports. Resource
7369 * release is handled via devres.
1da177e4
LT
7370 *
7371 * LOCKING:
7372 * Inherited from PCI layer (may sleep).
7373 */
f0d36efd 7374void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4 7375{
2855568b 7376 struct device *dev = &pdev->dev;
cca3974e 7377 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 7378
b878ca5d 7379 ata_host_detach(host);
1da177e4
LT
7380}
7381
7382/* move to PCI subsystem */
057ace5e 7383int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
7384{
7385 unsigned long tmp = 0;
7386
7387 switch (bits->width) {
7388 case 1: {
7389 u8 tmp8 = 0;
7390 pci_read_config_byte(pdev, bits->reg, &tmp8);
7391 tmp = tmp8;
7392 break;
7393 }
7394 case 2: {
7395 u16 tmp16 = 0;
7396 pci_read_config_word(pdev, bits->reg, &tmp16);
7397 tmp = tmp16;
7398 break;
7399 }
7400 case 4: {
7401 u32 tmp32 = 0;
7402 pci_read_config_dword(pdev, bits->reg, &tmp32);
7403 tmp = tmp32;
7404 break;
7405 }
7406
7407 default:
7408 return -EINVAL;
7409 }
7410
7411 tmp &= bits->mask;
7412
7413 return (tmp == bits->val) ? 1 : 0;
7414}
9b847548 7415
6ffa01d8 7416#ifdef CONFIG_PM
3c5100c1 7417void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
7418{
7419 pci_save_state(pdev);
4c90d971 7420 pci_disable_device(pdev);
500530f6 7421
3a2d5b70 7422 if (mesg.event & PM_EVENT_SLEEP)
500530f6 7423 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
7424}
7425
553c4aa6 7426int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 7427{
553c4aa6
TH
7428 int rc;
7429
9b847548
JA
7430 pci_set_power_state(pdev, PCI_D0);
7431 pci_restore_state(pdev);
553c4aa6 7432
b878ca5d 7433 rc = pcim_enable_device(pdev);
553c4aa6
TH
7434 if (rc) {
7435 dev_printk(KERN_ERR, &pdev->dev,
7436 "failed to enable device after resume (%d)\n", rc);
7437 return rc;
7438 }
7439
9b847548 7440 pci_set_master(pdev);
553c4aa6 7441 return 0;
500530f6
TH
7442}
7443
3c5100c1 7444int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 7445{
cca3974e 7446 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
7447 int rc = 0;
7448
cca3974e 7449 rc = ata_host_suspend(host, mesg);
500530f6
TH
7450 if (rc)
7451 return rc;
7452
3c5100c1 7453 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
7454
7455 return 0;
7456}
7457
7458int ata_pci_device_resume(struct pci_dev *pdev)
7459{
cca3974e 7460 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 7461 int rc;
500530f6 7462
553c4aa6
TH
7463 rc = ata_pci_device_do_resume(pdev);
7464 if (rc == 0)
7465 ata_host_resume(host);
7466 return rc;
9b847548 7467}
6ffa01d8
TH
7468#endif /* CONFIG_PM */
7469
1da177e4
LT
7470#endif /* CONFIG_PCI */
7471
33267325
TH
7472static int __init ata_parse_force_one(char **cur,
7473 struct ata_force_ent *force_ent,
7474 const char **reason)
7475{
7476 /* FIXME: Currently, there's no way to tag init const data and
7477 * using __initdata causes build failure on some versions of
7478 * gcc. Once __initdataconst is implemented, add const to the
7479 * following structure.
7480 */
7481 static struct ata_force_param force_tbl[] __initdata = {
7482 { "40c", .cbl = ATA_CBL_PATA40 },
7483 { "80c", .cbl = ATA_CBL_PATA80 },
7484 { "short40c", .cbl = ATA_CBL_PATA40_SHORT },
7485 { "unk", .cbl = ATA_CBL_PATA_UNK },
7486 { "ign", .cbl = ATA_CBL_PATA_IGN },
7487 { "sata", .cbl = ATA_CBL_SATA },
7488 { "1.5Gbps", .spd_limit = 1 },
7489 { "3.0Gbps", .spd_limit = 2 },
7490 { "noncq", .horkage_on = ATA_HORKAGE_NONCQ },
7491 { "ncq", .horkage_off = ATA_HORKAGE_NONCQ },
7492 { "pio0", .xfer_mask = 1 << (ATA_SHIFT_PIO + 0) },
7493 { "pio1", .xfer_mask = 1 << (ATA_SHIFT_PIO + 1) },
7494 { "pio2", .xfer_mask = 1 << (ATA_SHIFT_PIO + 2) },
7495 { "pio3", .xfer_mask = 1 << (ATA_SHIFT_PIO + 3) },
7496 { "pio4", .xfer_mask = 1 << (ATA_SHIFT_PIO + 4) },
7497 { "pio5", .xfer_mask = 1 << (ATA_SHIFT_PIO + 5) },
7498 { "pio6", .xfer_mask = 1 << (ATA_SHIFT_PIO + 6) },
7499 { "mwdma0", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 0) },
7500 { "mwdma1", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 1) },
7501 { "mwdma2", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 2) },
7502 { "mwdma3", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 3) },
7503 { "mwdma4", .xfer_mask = 1 << (ATA_SHIFT_MWDMA + 4) },
7504 { "udma0", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
7505 { "udma16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
7506 { "udma/16", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 0) },
7507 { "udma1", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
7508 { "udma25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
7509 { "udma/25", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 1) },
7510 { "udma2", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
7511 { "udma33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
7512 { "udma/33", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 2) },
7513 { "udma3", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
7514 { "udma44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
7515 { "udma/44", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 3) },
7516 { "udma4", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
7517 { "udma66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
7518 { "udma/66", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 4) },
7519 { "udma5", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
7520 { "udma100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
7521 { "udma/100", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 5) },
7522 { "udma6", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
7523 { "udma133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
7524 { "udma/133", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 6) },
7525 { "udma7", .xfer_mask = 1 << (ATA_SHIFT_UDMA + 7) },
7526 };
7527 char *start = *cur, *p = *cur;
7528 char *id, *val, *endp;
7529 const struct ata_force_param *match_fp = NULL;
7530 int nr_matches = 0, i;
7531
7532 /* find where this param ends and update *cur */
7533 while (*p != '\0' && *p != ',')
7534 p++;
7535
7536 if (*p == '\0')
7537 *cur = p;
7538 else
7539 *cur = p + 1;
7540
7541 *p = '\0';
7542
7543 /* parse */
7544 p = strchr(start, ':');
7545 if (!p) {
7546 val = strstrip(start);
7547 goto parse_val;
7548 }
7549 *p = '\0';
7550
7551 id = strstrip(start);
7552 val = strstrip(p + 1);
7553
7554 /* parse id */
7555 p = strchr(id, '.');
7556 if (p) {
7557 *p++ = '\0';
7558 force_ent->device = simple_strtoul(p, &endp, 10);
7559 if (p == endp || *endp != '\0') {
7560 *reason = "invalid device";
7561 return -EINVAL;
7562 }
7563 }
7564
7565 force_ent->port = simple_strtoul(id, &endp, 10);
7566 if (p == endp || *endp != '\0') {
7567 *reason = "invalid port/link";
7568 return -EINVAL;
7569 }
7570
7571 parse_val:
7572 /* parse val, allow shortcuts so that both 1.5 and 1.5Gbps work */
7573 for (i = 0; i < ARRAY_SIZE(force_tbl); i++) {
7574 const struct ata_force_param *fp = &force_tbl[i];
7575
7576 if (strncasecmp(val, fp->name, strlen(val)))
7577 continue;
7578
7579 nr_matches++;
7580 match_fp = fp;
7581
7582 if (strcasecmp(val, fp->name) == 0) {
7583 nr_matches = 1;
7584 break;
7585 }
7586 }
7587
7588 if (!nr_matches) {
7589 *reason = "unknown value";
7590 return -EINVAL;
7591 }
7592 if (nr_matches > 1) {
7593 *reason = "ambigious value";
7594 return -EINVAL;
7595 }
7596
7597 force_ent->param = *match_fp;
7598
7599 return 0;
7600}
7601
7602static void __init ata_parse_force_param(void)
7603{
7604 int idx = 0, size = 1;
7605 int last_port = -1, last_device = -1;
7606 char *p, *cur, *next;
7607
7608 /* calculate maximum number of params and allocate force_tbl */
7609 for (p = ata_force_param_buf; *p; p++)
7610 if (*p == ',')
7611 size++;
7612
7613 ata_force_tbl = kzalloc(sizeof(ata_force_tbl[0]) * size, GFP_KERNEL);
7614 if (!ata_force_tbl) {
7615 printk(KERN_WARNING "ata: failed to extend force table, "
7616 "libata.force ignored\n");
7617 return;
7618 }
7619
7620 /* parse and populate the table */
7621 for (cur = ata_force_param_buf; *cur != '\0'; cur = next) {
7622 const char *reason = "";
7623 struct ata_force_ent te = { .port = -1, .device = -1 };
7624
7625 next = cur;
7626 if (ata_parse_force_one(&next, &te, &reason)) {
7627 printk(KERN_WARNING "ata: failed to parse force "
7628 "parameter \"%s\" (%s)\n",
7629 cur, reason);
7630 continue;
7631 }
7632
7633 if (te.port == -1) {
7634 te.port = last_port;
7635 te.device = last_device;
7636 }
7637
7638 ata_force_tbl[idx++] = te;
7639
7640 last_port = te.port;
7641 last_device = te.device;
7642 }
7643
7644 ata_force_tbl_size = idx;
7645}
1da177e4 7646
1da177e4
LT
7647static int __init ata_init(void)
7648{
a8601e5f 7649 ata_probe_timeout *= HZ;
33267325
TH
7650
7651 ata_parse_force_param();
7652
1da177e4
LT
7653 ata_wq = create_workqueue("ata");
7654 if (!ata_wq)
7655 return -ENOMEM;
7656
453b07ac
TH
7657 ata_aux_wq = create_singlethread_workqueue("ata_aux");
7658 if (!ata_aux_wq) {
7659 destroy_workqueue(ata_wq);
7660 return -ENOMEM;
7661 }
7662
1da177e4
LT
7663 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
7664 return 0;
7665}
7666
7667static void __exit ata_exit(void)
7668{
33267325 7669 kfree(ata_force_tbl);
1da177e4 7670 destroy_workqueue(ata_wq);
453b07ac 7671 destroy_workqueue(ata_aux_wq);
1da177e4
LT
7672}
7673
a4625085 7674subsys_initcall(ata_init);
1da177e4
LT
7675module_exit(ata_exit);
7676
67846b30 7677static unsigned long ratelimit_time;
34af946a 7678static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
7679
7680int ata_ratelimit(void)
7681{
7682 int rc;
7683 unsigned long flags;
7684
7685 spin_lock_irqsave(&ata_ratelimit_lock, flags);
7686
7687 if (time_after(jiffies, ratelimit_time)) {
7688 rc = 1;
7689 ratelimit_time = jiffies + (HZ/5);
7690 } else
7691 rc = 0;
7692
7693 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
7694
7695 return rc;
7696}
7697
c22daff4
TH
7698/**
7699 * ata_wait_register - wait until register value changes
7700 * @reg: IO-mapped register
7701 * @mask: Mask to apply to read register value
7702 * @val: Wait condition
7703 * @interval_msec: polling interval in milliseconds
7704 * @timeout_msec: timeout in milliseconds
7705 *
7706 * Waiting for some bits of register to change is a common
7707 * operation for ATA controllers. This function reads 32bit LE
7708 * IO-mapped register @reg and tests for the following condition.
7709 *
7710 * (*@reg & mask) != val
7711 *
7712 * If the condition is met, it returns; otherwise, the process is
7713 * repeated after @interval_msec until timeout.
7714 *
7715 * LOCKING:
7716 * Kernel thread context (may sleep)
7717 *
7718 * RETURNS:
7719 * The final register value.
7720 */
7721u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
7722 unsigned long interval_msec,
7723 unsigned long timeout_msec)
7724{
7725 unsigned long timeout;
7726 u32 tmp;
7727
7728 tmp = ioread32(reg);
7729
7730 /* Calculate timeout _after_ the first read to make sure
7731 * preceding writes reach the controller before starting to
7732 * eat away the timeout.
7733 */
7734 timeout = jiffies + (timeout_msec * HZ) / 1000;
7735
7736 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
7737 msleep(interval_msec);
7738 tmp = ioread32(reg);
7739 }
7740
7741 return tmp;
7742}
7743
dd5b06c4
TH
7744/*
7745 * Dummy port_ops
7746 */
7747static void ata_dummy_noret(struct ata_port *ap) { }
7748static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
7749static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
7750
7751static u8 ata_dummy_check_status(struct ata_port *ap)
7752{
7753 return ATA_DRDY;
7754}
7755
7756static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7757{
7758 return AC_ERR_SYSTEM;
7759}
7760
7761const struct ata_port_operations ata_dummy_port_ops = {
dd5b06c4
TH
7762 .check_status = ata_dummy_check_status,
7763 .check_altstatus = ata_dummy_check_status,
7764 .dev_select = ata_noop_dev_select,
7765 .qc_prep = ata_noop_qc_prep,
7766 .qc_issue = ata_dummy_qc_issue,
7767 .freeze = ata_dummy_noret,
7768 .thaw = ata_dummy_noret,
7769 .error_handler = ata_dummy_noret,
7770 .post_internal_cmd = ata_dummy_qc_noret,
7771 .irq_clear = ata_dummy_noret,
7772 .port_start = ata_dummy_ret0,
7773 .port_stop = ata_dummy_noret,
7774};
7775
21b0ad4f
TH
7776const struct ata_port_info ata_dummy_port_info = {
7777 .port_ops = &ata_dummy_port_ops,
7778};
7779
1da177e4
LT
7780/*
7781 * libata is essentially a library of internal helper functions for
7782 * low-level ATA host controller drivers. As such, the API/ABI is
7783 * likely to change as new drivers are added and updated.
7784 * Do not depend on ABI/API stability.
7785 */
e9c83914
TH
7786EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7787EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7788EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 7789EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
21b0ad4f 7790EXPORT_SYMBOL_GPL(ata_dummy_port_info);
1da177e4
LT
7791EXPORT_SYMBOL_GPL(ata_std_bios_param);
7792EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 7793EXPORT_SYMBOL_GPL(ata_host_init);
f3187195 7794EXPORT_SYMBOL_GPL(ata_host_alloc);
f5cda257 7795EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
ecef7253 7796EXPORT_SYMBOL_GPL(ata_host_start);
f3187195 7797EXPORT_SYMBOL_GPL(ata_host_register);
f5cda257 7798EXPORT_SYMBOL_GPL(ata_host_activate);
0529c159 7799EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4 7800EXPORT_SYMBOL_GPL(ata_sg_init);
9a1004d0 7801EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 7802EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 7803EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 7804EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
7805EXPORT_SYMBOL_GPL(ata_tf_load);
7806EXPORT_SYMBOL_GPL(ata_tf_read);
7807EXPORT_SYMBOL_GPL(ata_noop_dev_select);
7808EXPORT_SYMBOL_GPL(ata_std_dev_select);
43727fbc 7809EXPORT_SYMBOL_GPL(sata_print_link_status);
436d34b3 7810EXPORT_SYMBOL_GPL(atapi_cmd_type);
1da177e4
LT
7811EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7812EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6357357c
TH
7813EXPORT_SYMBOL_GPL(ata_pack_xfermask);
7814EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
7815EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
7816EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
7817EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
7818EXPORT_SYMBOL_GPL(ata_mode_string);
7819EXPORT_SYMBOL_GPL(ata_id_xfermask);
1da177e4
LT
7820EXPORT_SYMBOL_GPL(ata_check_status);
7821EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
7822EXPORT_SYMBOL_GPL(ata_exec_command);
7823EXPORT_SYMBOL_GPL(ata_port_start);
d92e74d3 7824EXPORT_SYMBOL_GPL(ata_sff_port_start);
1da177e4 7825EXPORT_SYMBOL_GPL(ata_interrupt);
04351821 7826EXPORT_SYMBOL_GPL(ata_do_set_mode);
0d5ff566
TH
7827EXPORT_SYMBOL_GPL(ata_data_xfer);
7828EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
31cc23b3 7829EXPORT_SYMBOL_GPL(ata_std_qc_defer);
1da177e4 7830EXPORT_SYMBOL_GPL(ata_qc_prep);
d26fc955 7831EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
e46834cd 7832EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
7833EXPORT_SYMBOL_GPL(ata_bmdma_setup);
7834EXPORT_SYMBOL_GPL(ata_bmdma_start);
7835EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
7836EXPORT_SYMBOL_GPL(ata_bmdma_status);
7837EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
7838EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
7839EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
7840EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
7841EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
7842EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 7843EXPORT_SYMBOL_GPL(ata_port_probe);
10305f0f 7844EXPORT_SYMBOL_GPL(ata_dev_disable);
3c567b7d 7845EXPORT_SYMBOL_GPL(sata_set_spd);
936fd732
TH
7846EXPORT_SYMBOL_GPL(sata_link_debounce);
7847EXPORT_SYMBOL_GPL(sata_link_resume);
1da177e4 7848EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 7849EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 7850EXPORT_SYMBOL_GPL(ata_std_softreset);
cc0680a5 7851EXPORT_SYMBOL_GPL(sata_link_hardreset);
c2bd5804
TH
7852EXPORT_SYMBOL_GPL(sata_std_hardreset);
7853EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
7854EXPORT_SYMBOL_GPL(ata_dev_classify);
7855EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 7856EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 7857EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 7858EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 7859EXPORT_SYMBOL_GPL(ata_busy_sleep);
88ff6eaf 7860EXPORT_SYMBOL_GPL(ata_wait_after_reset);
d4b2bab4 7861EXPORT_SYMBOL_GPL(ata_wait_ready);
1da177e4
LT
7862EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
7863EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 7864EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 7865EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 7866EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4 7867EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
7868EXPORT_SYMBOL_GPL(sata_scr_valid);
7869EXPORT_SYMBOL_GPL(sata_scr_read);
7870EXPORT_SYMBOL_GPL(sata_scr_write);
7871EXPORT_SYMBOL_GPL(sata_scr_write_flush);
936fd732
TH
7872EXPORT_SYMBOL_GPL(ata_link_online);
7873EXPORT_SYMBOL_GPL(ata_link_offline);
6ffa01d8 7874#ifdef CONFIG_PM
cca3974e
JG
7875EXPORT_SYMBOL_GPL(ata_host_suspend);
7876EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 7877#endif /* CONFIG_PM */
6a62a04d
TH
7878EXPORT_SYMBOL_GPL(ata_id_string);
7879EXPORT_SYMBOL_GPL(ata_id_c_string);
1da177e4
LT
7880EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7881
1bc4ccff 7882EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6357357c 7883EXPORT_SYMBOL_GPL(ata_timing_find_mode);
452503f9
AC
7884EXPORT_SYMBOL_GPL(ata_timing_compute);
7885EXPORT_SYMBOL_GPL(ata_timing_merge);
a0f79b92 7886EXPORT_SYMBOL_GPL(ata_timing_cycle2mode);
452503f9 7887
1da177e4
LT
7888#ifdef CONFIG_PCI
7889EXPORT_SYMBOL_GPL(pci_test_config_bits);
d583bc18 7890EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
1626aeb8 7891EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
d583bc18 7892EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
4e6b79fa 7893EXPORT_SYMBOL_GPL(ata_pci_activate_sff_host);
1da177e4
LT
7894EXPORT_SYMBOL_GPL(ata_pci_init_one);
7895EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6ffa01d8 7896#ifdef CONFIG_PM
500530f6
TH
7897EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7898EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
7899EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7900EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 7901#endif /* CONFIG_PM */
67951ade
AC
7902EXPORT_SYMBOL_GPL(ata_pci_default_filter);
7903EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 7904#endif /* CONFIG_PCI */
9b847548 7905
31f88384 7906EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch);
3af9a77a
TH
7907EXPORT_SYMBOL_GPL(sata_pmp_std_prereset);
7908EXPORT_SYMBOL_GPL(sata_pmp_std_hardreset);
7909EXPORT_SYMBOL_GPL(sata_pmp_std_postreset);
7910EXPORT_SYMBOL_GPL(sata_pmp_do_eh);
7911
b64bbc39
TH
7912EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7913EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7914EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
cbcdd875
TH
7915EXPORT_SYMBOL_GPL(ata_port_desc);
7916#ifdef CONFIG_PCI
7917EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7918#endif /* CONFIG_PCI */
7b70fc03 7919EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
dbd82616 7920EXPORT_SYMBOL_GPL(ata_link_abort);
7b70fc03 7921EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499 7922EXPORT_SYMBOL_GPL(ata_port_freeze);
7d77b247 7923EXPORT_SYMBOL_GPL(sata_async_notification);
e3180499
TH
7924EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7925EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
7926EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7927EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 7928EXPORT_SYMBOL_GPL(ata_do_eh);
83625006 7929EXPORT_SYMBOL_GPL(ata_irq_on);
a619f981 7930EXPORT_SYMBOL_GPL(ata_dev_try_classify);
be0d18df
AC
7931
7932EXPORT_SYMBOL_GPL(ata_cable_40wire);
7933EXPORT_SYMBOL_GPL(ata_cable_80wire);
7934EXPORT_SYMBOL_GPL(ata_cable_unknown);
c88f90c3 7935EXPORT_SYMBOL_GPL(ata_cable_ignore);
be0d18df 7936EXPORT_SYMBOL_GPL(ata_cable_sata);