]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/ata/libata-core.c
libata: skip 0xff polling for PATA controllers
[net-next-2.6.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
1da177e4
LT
33 */
34
1da177e4
LT
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/mm.h>
41#include <linux/highmem.h>
42#include <linux/spinlock.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/timer.h>
46#include <linux/interrupt.h>
47#include <linux/completion.h>
48#include <linux/suspend.h>
49#include <linux/workqueue.h>
67846b30 50#include <linux/jiffies.h>
378f058c 51#include <linux/scatterlist.h>
2dcb407e 52#include <linux/io.h>
1da177e4 53#include <scsi/scsi.h>
193515d5 54#include <scsi/scsi_cmnd.h>
1da177e4
LT
55#include <scsi/scsi_host.h>
56#include <linux/libata.h>
1da177e4
LT
57#include <asm/semaphore.h>
58#include <asm/byteorder.h>
59
60#include "libata.h"
61
fda0efc5 62
d7bb4cc7 63/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
64const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
65const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
66const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 67
3373efd8
TH
68static unsigned int ata_dev_init_params(struct ata_device *dev,
69 u16 heads, u16 sectors);
70static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
218f3d30
JG
71static unsigned int ata_dev_set_feature(struct ata_device *dev,
72 u8 enable, u8 feature);
3373efd8 73static void ata_dev_xfermask(struct ata_device *dev);
75683fe7 74static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
1da177e4 75
f3187195 76unsigned int ata_print_id = 1;
1da177e4
LT
77static struct workqueue_struct *ata_wq;
78
453b07ac
TH
79struct workqueue_struct *ata_aux_wq;
80
418dc1f5 81int atapi_enabled = 1;
1623c81e
JG
82module_param(atapi_enabled, int, 0444);
83MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
84
95de719a
AL
85int atapi_dmadir = 0;
86module_param(atapi_dmadir, int, 0444);
87MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
88
baf4fdfa
ML
89int atapi_passthru16 = 1;
90module_param(atapi_passthru16, int, 0444);
91MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
92
c3c013a2
JG
93int libata_fua = 0;
94module_param_named(fua, libata_fua, int, 0444);
95MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
96
2dcb407e 97static int ata_ignore_hpa;
1e999736
AC
98module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
99MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
100
b3a70601
AC
101static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
102module_param_named(dma, libata_dma_mask, int, 0444);
103MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
104
a8601e5f
AM
105static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
106module_param(ata_probe_timeout, int, 0444);
107MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
108
6ebe9d86 109int libata_noacpi = 0;
d7d0dad6 110module_param_named(noacpi, libata_noacpi, int, 0444);
6ebe9d86 111MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
11ef697b 112
1da177e4
LT
113MODULE_AUTHOR("Jeff Garzik");
114MODULE_DESCRIPTION("Library module for ATA devices");
115MODULE_LICENSE("GPL");
116MODULE_VERSION(DRV_VERSION);
117
0baab86b 118
1da177e4
LT
119/**
120 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
121 * @tf: Taskfile to convert
1da177e4 122 * @pmp: Port multiplier port
9977126c
TH
123 * @is_cmd: This FIS is for command
124 * @fis: Buffer into which data will output
1da177e4
LT
125 *
126 * Converts a standard ATA taskfile to a Serial ATA
127 * FIS structure (Register - Host to Device).
128 *
129 * LOCKING:
130 * Inherited from caller.
131 */
9977126c 132void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
1da177e4 133{
9977126c
TH
134 fis[0] = 0x27; /* Register - Host to Device FIS */
135 fis[1] = pmp & 0xf; /* Port multiplier number*/
136 if (is_cmd)
137 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
138
1da177e4
LT
139 fis[2] = tf->command;
140 fis[3] = tf->feature;
141
142 fis[4] = tf->lbal;
143 fis[5] = tf->lbam;
144 fis[6] = tf->lbah;
145 fis[7] = tf->device;
146
147 fis[8] = tf->hob_lbal;
148 fis[9] = tf->hob_lbam;
149 fis[10] = tf->hob_lbah;
150 fis[11] = tf->hob_feature;
151
152 fis[12] = tf->nsect;
153 fis[13] = tf->hob_nsect;
154 fis[14] = 0;
155 fis[15] = tf->ctl;
156
157 fis[16] = 0;
158 fis[17] = 0;
159 fis[18] = 0;
160 fis[19] = 0;
161}
162
163/**
164 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
165 * @fis: Buffer from which data will be input
166 * @tf: Taskfile to output
167 *
e12a1be6 168 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
169 *
170 * LOCKING:
171 * Inherited from caller.
172 */
173
057ace5e 174void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
175{
176 tf->command = fis[2]; /* status */
177 tf->feature = fis[3]; /* error */
178
179 tf->lbal = fis[4];
180 tf->lbam = fis[5];
181 tf->lbah = fis[6];
182 tf->device = fis[7];
183
184 tf->hob_lbal = fis[8];
185 tf->hob_lbam = fis[9];
186 tf->hob_lbah = fis[10];
187
188 tf->nsect = fis[12];
189 tf->hob_nsect = fis[13];
190}
191
8cbd6df1
AL
192static const u8 ata_rw_cmds[] = {
193 /* pio multi */
194 ATA_CMD_READ_MULTI,
195 ATA_CMD_WRITE_MULTI,
196 ATA_CMD_READ_MULTI_EXT,
197 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
198 0,
199 0,
200 0,
201 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
202 /* pio */
203 ATA_CMD_PIO_READ,
204 ATA_CMD_PIO_WRITE,
205 ATA_CMD_PIO_READ_EXT,
206 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
207 0,
208 0,
209 0,
210 0,
8cbd6df1
AL
211 /* dma */
212 ATA_CMD_READ,
213 ATA_CMD_WRITE,
214 ATA_CMD_READ_EXT,
9a3dccc4
TH
215 ATA_CMD_WRITE_EXT,
216 0,
217 0,
218 0,
219 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 220};
1da177e4
LT
221
222/**
8cbd6df1 223 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
224 * @tf: command to examine and configure
225 * @dev: device tf belongs to
1da177e4 226 *
2e9edbf8 227 * Examine the device configuration and tf->flags to calculate
8cbd6df1 228 * the proper read/write commands and protocol to use.
1da177e4
LT
229 *
230 * LOCKING:
231 * caller.
232 */
bd056d7e 233static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 234{
9a3dccc4 235 u8 cmd;
1da177e4 236
9a3dccc4 237 int index, fua, lba48, write;
2e9edbf8 238
9a3dccc4 239 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
240 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
241 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 242
8cbd6df1
AL
243 if (dev->flags & ATA_DFLAG_PIO) {
244 tf->protocol = ATA_PROT_PIO;
9a3dccc4 245 index = dev->multi_count ? 0 : 8;
9af5c9c9 246 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
247 /* Unable to use DMA due to host limitation */
248 tf->protocol = ATA_PROT_PIO;
0565c26d 249 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
250 } else {
251 tf->protocol = ATA_PROT_DMA;
9a3dccc4 252 index = 16;
8cbd6df1 253 }
1da177e4 254
9a3dccc4
TH
255 cmd = ata_rw_cmds[index + fua + lba48 + write];
256 if (cmd) {
257 tf->command = cmd;
258 return 0;
259 }
260 return -1;
1da177e4
LT
261}
262
35b649fe
TH
263/**
264 * ata_tf_read_block - Read block address from ATA taskfile
265 * @tf: ATA taskfile of interest
266 * @dev: ATA device @tf belongs to
267 *
268 * LOCKING:
269 * None.
270 *
271 * Read block address from @tf. This function can handle all
272 * three address formats - LBA, LBA48 and CHS. tf->protocol and
273 * flags select the address format to use.
274 *
275 * RETURNS:
276 * Block address read from @tf.
277 */
278u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
279{
280 u64 block = 0;
281
282 if (tf->flags & ATA_TFLAG_LBA) {
283 if (tf->flags & ATA_TFLAG_LBA48) {
284 block |= (u64)tf->hob_lbah << 40;
285 block |= (u64)tf->hob_lbam << 32;
286 block |= tf->hob_lbal << 24;
287 } else
288 block |= (tf->device & 0xf) << 24;
289
290 block |= tf->lbah << 16;
291 block |= tf->lbam << 8;
292 block |= tf->lbal;
293 } else {
294 u32 cyl, head, sect;
295
296 cyl = tf->lbam | (tf->lbah << 8);
297 head = tf->device & 0xf;
298 sect = tf->lbal;
299
300 block = (cyl * dev->heads + head) * dev->sectors + sect;
301 }
302
303 return block;
304}
305
bd056d7e
TH
306/**
307 * ata_build_rw_tf - Build ATA taskfile for given read/write request
308 * @tf: Target ATA taskfile
309 * @dev: ATA device @tf belongs to
310 * @block: Block address
311 * @n_block: Number of blocks
312 * @tf_flags: RW/FUA etc...
313 * @tag: tag
314 *
315 * LOCKING:
316 * None.
317 *
318 * Build ATA taskfile @tf for read/write request described by
319 * @block, @n_block, @tf_flags and @tag on @dev.
320 *
321 * RETURNS:
322 *
323 * 0 on success, -ERANGE if the request is too large for @dev,
324 * -EINVAL if the request is invalid.
325 */
326int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
327 u64 block, u32 n_block, unsigned int tf_flags,
328 unsigned int tag)
329{
330 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
331 tf->flags |= tf_flags;
332
6d1245bf 333 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
334 /* yay, NCQ */
335 if (!lba_48_ok(block, n_block))
336 return -ERANGE;
337
338 tf->protocol = ATA_PROT_NCQ;
339 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
340
341 if (tf->flags & ATA_TFLAG_WRITE)
342 tf->command = ATA_CMD_FPDMA_WRITE;
343 else
344 tf->command = ATA_CMD_FPDMA_READ;
345
346 tf->nsect = tag << 3;
347 tf->hob_feature = (n_block >> 8) & 0xff;
348 tf->feature = n_block & 0xff;
349
350 tf->hob_lbah = (block >> 40) & 0xff;
351 tf->hob_lbam = (block >> 32) & 0xff;
352 tf->hob_lbal = (block >> 24) & 0xff;
353 tf->lbah = (block >> 16) & 0xff;
354 tf->lbam = (block >> 8) & 0xff;
355 tf->lbal = block & 0xff;
356
357 tf->device = 1 << 6;
358 if (tf->flags & ATA_TFLAG_FUA)
359 tf->device |= 1 << 7;
360 } else if (dev->flags & ATA_DFLAG_LBA) {
361 tf->flags |= ATA_TFLAG_LBA;
362
363 if (lba_28_ok(block, n_block)) {
364 /* use LBA28 */
365 tf->device |= (block >> 24) & 0xf;
366 } else if (lba_48_ok(block, n_block)) {
367 if (!(dev->flags & ATA_DFLAG_LBA48))
368 return -ERANGE;
369
370 /* use LBA48 */
371 tf->flags |= ATA_TFLAG_LBA48;
372
373 tf->hob_nsect = (n_block >> 8) & 0xff;
374
375 tf->hob_lbah = (block >> 40) & 0xff;
376 tf->hob_lbam = (block >> 32) & 0xff;
377 tf->hob_lbal = (block >> 24) & 0xff;
378 } else
379 /* request too large even for LBA48 */
380 return -ERANGE;
381
382 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
383 return -EINVAL;
384
385 tf->nsect = n_block & 0xff;
386
387 tf->lbah = (block >> 16) & 0xff;
388 tf->lbam = (block >> 8) & 0xff;
389 tf->lbal = block & 0xff;
390
391 tf->device |= ATA_LBA;
392 } else {
393 /* CHS */
394 u32 sect, head, cyl, track;
395
396 /* The request -may- be too large for CHS addressing. */
397 if (!lba_28_ok(block, n_block))
398 return -ERANGE;
399
400 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
401 return -EINVAL;
402
403 /* Convert LBA to CHS */
404 track = (u32)block / dev->sectors;
405 cyl = track / dev->heads;
406 head = track % dev->heads;
407 sect = (u32)block % dev->sectors + 1;
408
409 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
410 (u32)block, track, cyl, head, sect);
411
412 /* Check whether the converted CHS can fit.
413 Cylinder: 0-65535
414 Head: 0-15
415 Sector: 1-255*/
416 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
417 return -ERANGE;
418
419 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
420 tf->lbal = sect;
421 tf->lbam = cyl;
422 tf->lbah = cyl >> 8;
423 tf->device |= head;
424 }
425
426 return 0;
427}
428
cb95d562
TH
429/**
430 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
431 * @pio_mask: pio_mask
432 * @mwdma_mask: mwdma_mask
433 * @udma_mask: udma_mask
434 *
435 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
436 * unsigned int xfer_mask.
437 *
438 * LOCKING:
439 * None.
440 *
441 * RETURNS:
442 * Packed xfer_mask.
443 */
444static unsigned int ata_pack_xfermask(unsigned int pio_mask,
445 unsigned int mwdma_mask,
446 unsigned int udma_mask)
447{
448 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
449 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
450 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
451}
452
c0489e4e
TH
453/**
454 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
455 * @xfer_mask: xfer_mask to unpack
456 * @pio_mask: resulting pio_mask
457 * @mwdma_mask: resulting mwdma_mask
458 * @udma_mask: resulting udma_mask
459 *
460 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
461 * Any NULL distination masks will be ignored.
462 */
463static void ata_unpack_xfermask(unsigned int xfer_mask,
464 unsigned int *pio_mask,
465 unsigned int *mwdma_mask,
466 unsigned int *udma_mask)
467{
468 if (pio_mask)
469 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
470 if (mwdma_mask)
471 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
472 if (udma_mask)
473 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
474}
475
cb95d562 476static const struct ata_xfer_ent {
be9a50c8 477 int shift, bits;
cb95d562
TH
478 u8 base;
479} ata_xfer_tbl[] = {
480 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
481 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
482 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
483 { -1, },
484};
485
486/**
487 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
488 * @xfer_mask: xfer_mask of interest
489 *
490 * Return matching XFER_* value for @xfer_mask. Only the highest
491 * bit of @xfer_mask is considered.
492 *
493 * LOCKING:
494 * None.
495 *
496 * RETURNS:
497 * Matching XFER_* value, 0 if no match found.
498 */
499static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
500{
501 int highbit = fls(xfer_mask) - 1;
502 const struct ata_xfer_ent *ent;
503
504 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
505 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
506 return ent->base + highbit - ent->shift;
507 return 0;
508}
509
510/**
511 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
512 * @xfer_mode: XFER_* of interest
513 *
514 * Return matching xfer_mask for @xfer_mode.
515 *
516 * LOCKING:
517 * None.
518 *
519 * RETURNS:
520 * Matching xfer_mask, 0 if no match found.
521 */
522static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
523{
524 const struct ata_xfer_ent *ent;
525
526 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
527 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
528 return 1 << (ent->shift + xfer_mode - ent->base);
529 return 0;
530}
531
532/**
533 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
534 * @xfer_mode: XFER_* of interest
535 *
536 * Return matching xfer_shift for @xfer_mode.
537 *
538 * LOCKING:
539 * None.
540 *
541 * RETURNS:
542 * Matching xfer_shift, -1 if no match found.
543 */
544static int ata_xfer_mode2shift(unsigned int xfer_mode)
545{
546 const struct ata_xfer_ent *ent;
547
548 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
549 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
550 return ent->shift;
551 return -1;
552}
553
1da177e4 554/**
1da7b0d0
TH
555 * ata_mode_string - convert xfer_mask to string
556 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
557 *
558 * Determine string which represents the highest speed
1da7b0d0 559 * (highest bit in @modemask).
1da177e4
LT
560 *
561 * LOCKING:
562 * None.
563 *
564 * RETURNS:
565 * Constant C string representing highest speed listed in
1da7b0d0 566 * @mode_mask, or the constant C string "<n/a>".
1da177e4 567 */
1da7b0d0 568static const char *ata_mode_string(unsigned int xfer_mask)
1da177e4 569{
75f554bc
TH
570 static const char * const xfer_mode_str[] = {
571 "PIO0",
572 "PIO1",
573 "PIO2",
574 "PIO3",
575 "PIO4",
b352e57d
AC
576 "PIO5",
577 "PIO6",
75f554bc
TH
578 "MWDMA0",
579 "MWDMA1",
580 "MWDMA2",
b352e57d
AC
581 "MWDMA3",
582 "MWDMA4",
75f554bc
TH
583 "UDMA/16",
584 "UDMA/25",
585 "UDMA/33",
586 "UDMA/44",
587 "UDMA/66",
588 "UDMA/100",
589 "UDMA/133",
590 "UDMA7",
591 };
1da7b0d0 592 int highbit;
1da177e4 593
1da7b0d0
TH
594 highbit = fls(xfer_mask) - 1;
595 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
596 return xfer_mode_str[highbit];
1da177e4 597 return "<n/a>";
1da177e4
LT
598}
599
4c360c81
TH
600static const char *sata_spd_string(unsigned int spd)
601{
602 static const char * const spd_str[] = {
603 "1.5 Gbps",
604 "3.0 Gbps",
605 };
606
607 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
608 return "<unknown>";
609 return spd_str[spd - 1];
610}
611
3373efd8 612void ata_dev_disable(struct ata_device *dev)
0b8efb0a 613{
09d7f9b0 614 if (ata_dev_enabled(dev)) {
9af5c9c9 615 if (ata_msg_drv(dev->link->ap))
09d7f9b0 616 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
4ae72a1e
TH
617 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
618 ATA_DNXFER_QUIET);
0b8efb0a
TH
619 dev->class++;
620 }
621}
622
ca77329f
KCA
623static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
624{
625 struct ata_link *link = dev->link;
626 struct ata_port *ap = link->ap;
627 u32 scontrol;
628 unsigned int err_mask;
629 int rc;
630
631 /*
632 * disallow DIPM for drivers which haven't set
633 * ATA_FLAG_IPM. This is because when DIPM is enabled,
634 * phy ready will be set in the interrupt status on
635 * state changes, which will cause some drivers to
636 * think there are errors - additionally drivers will
637 * need to disable hot plug.
638 */
639 if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
640 ap->pm_policy = NOT_AVAILABLE;
641 return -EINVAL;
642 }
643
644 /*
645 * For DIPM, we will only enable it for the
646 * min_power setting.
647 *
648 * Why? Because Disks are too stupid to know that
649 * If the host rejects a request to go to SLUMBER
650 * they should retry at PARTIAL, and instead it
651 * just would give up. So, for medium_power to
652 * work at all, we need to only allow HIPM.
653 */
654 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
655 if (rc)
656 return rc;
657
658 switch (policy) {
659 case MIN_POWER:
660 /* no restrictions on IPM transitions */
661 scontrol &= ~(0x3 << 8);
662 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
663 if (rc)
664 return rc;
665
666 /* enable DIPM */
667 if (dev->flags & ATA_DFLAG_DIPM)
668 err_mask = ata_dev_set_feature(dev,
669 SETFEATURES_SATA_ENABLE, SATA_DIPM);
670 break;
671 case MEDIUM_POWER:
672 /* allow IPM to PARTIAL */
673 scontrol &= ~(0x1 << 8);
674 scontrol |= (0x2 << 8);
675 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
676 if (rc)
677 return rc;
678
f5456b63
KCA
679 /*
680 * we don't have to disable DIPM since IPM flags
681 * disallow transitions to SLUMBER, which effectively
682 * disable DIPM if it does not support PARTIAL
683 */
ca77329f
KCA
684 break;
685 case NOT_AVAILABLE:
686 case MAX_PERFORMANCE:
687 /* disable all IPM transitions */
688 scontrol |= (0x3 << 8);
689 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
690 if (rc)
691 return rc;
692
f5456b63
KCA
693 /*
694 * we don't have to disable DIPM since IPM flags
695 * disallow all transitions which effectively
696 * disable DIPM anyway.
697 */
ca77329f
KCA
698 break;
699 }
700
701 /* FIXME: handle SET FEATURES failure */
702 (void) err_mask;
703
704 return 0;
705}
706
707/**
708 * ata_dev_enable_pm - enable SATA interface power management
48166fd9
SH
709 * @dev: device to enable power management
710 * @policy: the link power management policy
ca77329f
KCA
711 *
712 * Enable SATA Interface power management. This will enable
713 * Device Interface Power Management (DIPM) for min_power
714 * policy, and then call driver specific callbacks for
715 * enabling Host Initiated Power management.
716 *
717 * Locking: Caller.
718 * Returns: -EINVAL if IPM is not supported, 0 otherwise.
719 */
720void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
721{
722 int rc = 0;
723 struct ata_port *ap = dev->link->ap;
724
725 /* set HIPM first, then DIPM */
726 if (ap->ops->enable_pm)
727 rc = ap->ops->enable_pm(ap, policy);
728 if (rc)
729 goto enable_pm_out;
730 rc = ata_dev_set_dipm(dev, policy);
731
732enable_pm_out:
733 if (rc)
734 ap->pm_policy = MAX_PERFORMANCE;
735 else
736 ap->pm_policy = policy;
737 return /* rc */; /* hopefully we can use 'rc' eventually */
738}
739
1992a5ed 740#ifdef CONFIG_PM
ca77329f
KCA
741/**
742 * ata_dev_disable_pm - disable SATA interface power management
48166fd9 743 * @dev: device to disable power management
ca77329f
KCA
744 *
745 * Disable SATA Interface power management. This will disable
746 * Device Interface Power Management (DIPM) without changing
747 * policy, call driver specific callbacks for disabling Host
748 * Initiated Power management.
749 *
750 * Locking: Caller.
751 * Returns: void
752 */
753static void ata_dev_disable_pm(struct ata_device *dev)
754{
755 struct ata_port *ap = dev->link->ap;
756
757 ata_dev_set_dipm(dev, MAX_PERFORMANCE);
758 if (ap->ops->disable_pm)
759 ap->ops->disable_pm(ap);
760}
1992a5ed 761#endif /* CONFIG_PM */
ca77329f
KCA
762
763void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
764{
765 ap->pm_policy = policy;
766 ap->link.eh_info.action |= ATA_EHI_LPM;
767 ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
768 ata_port_schedule_eh(ap);
769}
770
1992a5ed 771#ifdef CONFIG_PM
ca77329f
KCA
772static void ata_lpm_enable(struct ata_host *host)
773{
774 struct ata_link *link;
775 struct ata_port *ap;
776 struct ata_device *dev;
777 int i;
778
779 for (i = 0; i < host->n_ports; i++) {
780 ap = host->ports[i];
781 ata_port_for_each_link(link, ap) {
782 ata_link_for_each_dev(dev, link)
783 ata_dev_disable_pm(dev);
784 }
785 }
786}
787
788static void ata_lpm_disable(struct ata_host *host)
789{
790 int i;
791
792 for (i = 0; i < host->n_ports; i++) {
793 struct ata_port *ap = host->ports[i];
794 ata_lpm_schedule(ap, ap->pm_policy);
795 }
796}
1992a5ed 797#endif /* CONFIG_PM */
ca77329f
KCA
798
799
1da177e4 800/**
0d5ff566 801 * ata_devchk - PATA device presence detection
1da177e4
LT
802 * @ap: ATA channel to examine
803 * @device: Device to examine (starting at zero)
804 *
805 * This technique was originally described in
806 * Hale Landis's ATADRVR (www.ata-atapi.com), and
807 * later found its way into the ATA/ATAPI spec.
808 *
809 * Write a pattern to the ATA shadow registers,
810 * and if a device is present, it will respond by
811 * correctly storing and echoing back the
812 * ATA shadow register contents.
813 *
814 * LOCKING:
815 * caller.
816 */
817
0d5ff566 818static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1da177e4
LT
819{
820 struct ata_ioports *ioaddr = &ap->ioaddr;
821 u8 nsect, lbal;
822
823 ap->ops->dev_select(ap, device);
824
0d5ff566
TH
825 iowrite8(0x55, ioaddr->nsect_addr);
826 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 827
0d5ff566
TH
828 iowrite8(0xaa, ioaddr->nsect_addr);
829 iowrite8(0x55, ioaddr->lbal_addr);
1da177e4 830
0d5ff566
TH
831 iowrite8(0x55, ioaddr->nsect_addr);
832 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 833
0d5ff566
TH
834 nsect = ioread8(ioaddr->nsect_addr);
835 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
836
837 if ((nsect == 0x55) && (lbal == 0xaa))
838 return 1; /* we found a device */
839
840 return 0; /* nothing found */
841}
842
1da177e4
LT
843/**
844 * ata_dev_classify - determine device type based on ATA-spec signature
845 * @tf: ATA taskfile register set for device to be identified
846 *
847 * Determine from taskfile register contents whether a device is
848 * ATA or ATAPI, as per "Signature and persistence" section
849 * of ATA/PI spec (volume 1, sect 5.14).
850 *
851 * LOCKING:
852 * None.
853 *
854 * RETURNS:
633273a3
TH
855 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
856 * %ATA_DEV_UNKNOWN the event of failure.
1da177e4 857 */
057ace5e 858unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
859{
860 /* Apple's open source Darwin code hints that some devices only
861 * put a proper signature into the LBA mid/high registers,
862 * So, we only check those. It's sufficient for uniqueness.
633273a3
TH
863 *
864 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
865 * signatures for ATA and ATAPI devices attached on SerialATA,
866 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
867 * spec has never mentioned about using different signatures
868 * for ATA/ATAPI devices. Then, Serial ATA II: Port
869 * Multiplier specification began to use 0x69/0x96 to identify
870 * port multpliers and 0x3c/0xc3 to identify SEMB device.
871 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
872 * 0x69/0x96 shortly and described them as reserved for
873 * SerialATA.
874 *
875 * We follow the current spec and consider that 0x69/0x96
876 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1da177e4 877 */
633273a3 878 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1da177e4
LT
879 DPRINTK("found ATA device by sig\n");
880 return ATA_DEV_ATA;
881 }
882
633273a3 883 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1da177e4
LT
884 DPRINTK("found ATAPI device by sig\n");
885 return ATA_DEV_ATAPI;
886 }
887
633273a3
TH
888 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
889 DPRINTK("found PMP device by sig\n");
890 return ATA_DEV_PMP;
891 }
892
893 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
2dcb407e 894 printk(KERN_INFO "ata: SEMB device ignored\n");
633273a3
TH
895 return ATA_DEV_SEMB_UNSUP; /* not yet */
896 }
897
1da177e4
LT
898 DPRINTK("unknown device\n");
899 return ATA_DEV_UNKNOWN;
900}
901
902/**
903 * ata_dev_try_classify - Parse returned ATA device signature
3f19859e
TH
904 * @dev: ATA device to classify (starting at zero)
905 * @present: device seems present
b4dc7623 906 * @r_err: Value of error register on completion
1da177e4
LT
907 *
908 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
909 * an ATA/ATAPI-defined set of values is placed in the ATA
910 * shadow registers, indicating the results of device detection
911 * and diagnostics.
912 *
913 * Select the ATA device, and read the values from the ATA shadow
914 * registers. Then parse according to the Error register value,
915 * and the spec-defined values examined by ata_dev_classify().
916 *
917 * LOCKING:
918 * caller.
b4dc7623
TH
919 *
920 * RETURNS:
921 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4 922 */
3f19859e
TH
923unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
924 u8 *r_err)
1da177e4 925{
3f19859e 926 struct ata_port *ap = dev->link->ap;
1da177e4
LT
927 struct ata_taskfile tf;
928 unsigned int class;
929 u8 err;
930
3f19859e 931 ap->ops->dev_select(ap, dev->devno);
1da177e4
LT
932
933 memset(&tf, 0, sizeof(tf));
934
1da177e4 935 ap->ops->tf_read(ap, &tf);
0169e284 936 err = tf.feature;
b4dc7623
TH
937 if (r_err)
938 *r_err = err;
1da177e4 939
93590859 940 /* see if device passed diags: if master then continue and warn later */
3f19859e 941 if (err == 0 && dev->devno == 0)
93590859 942 /* diagnostic fail : do nothing _YET_ */
3f19859e 943 dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
93590859 944 else if (err == 1)
1da177e4 945 /* do nothing */ ;
3f19859e 946 else if ((dev->devno == 0) && (err == 0x81))
1da177e4
LT
947 /* do nothing */ ;
948 else
b4dc7623 949 return ATA_DEV_NONE;
1da177e4 950
b4dc7623 951 /* determine if device is ATA or ATAPI */
1da177e4 952 class = ata_dev_classify(&tf);
b4dc7623 953
d7fbee05
TH
954 if (class == ATA_DEV_UNKNOWN) {
955 /* If the device failed diagnostic, it's likely to
956 * have reported incorrect device signature too.
957 * Assume ATA device if the device seems present but
958 * device signature is invalid with diagnostic
959 * failure.
960 */
961 if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
962 class = ATA_DEV_ATA;
963 else
964 class = ATA_DEV_NONE;
965 } else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
966 class = ATA_DEV_NONE;
967
b4dc7623 968 return class;
1da177e4
LT
969}
970
971/**
6a62a04d 972 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
973 * @id: IDENTIFY DEVICE results we will examine
974 * @s: string into which data is output
975 * @ofs: offset into identify device page
976 * @len: length of string to return. must be an even number.
977 *
978 * The strings in the IDENTIFY DEVICE page are broken up into
979 * 16-bit chunks. Run through the string, and output each
980 * 8-bit chunk linearly, regardless of platform.
981 *
982 * LOCKING:
983 * caller.
984 */
985
6a62a04d
TH
986void ata_id_string(const u16 *id, unsigned char *s,
987 unsigned int ofs, unsigned int len)
1da177e4
LT
988{
989 unsigned int c;
990
991 while (len > 0) {
992 c = id[ofs] >> 8;
993 *s = c;
994 s++;
995
996 c = id[ofs] & 0xff;
997 *s = c;
998 s++;
999
1000 ofs++;
1001 len -= 2;
1002 }
1003}
1004
0e949ff3 1005/**
6a62a04d 1006 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
1007 * @id: IDENTIFY DEVICE results we will examine
1008 * @s: string into which data is output
1009 * @ofs: offset into identify device page
1010 * @len: length of string to return. must be an odd number.
1011 *
6a62a04d 1012 * This function is identical to ata_id_string except that it
0e949ff3
TH
1013 * trims trailing spaces and terminates the resulting string with
1014 * null. @len must be actual maximum length (even number) + 1.
1015 *
1016 * LOCKING:
1017 * caller.
1018 */
6a62a04d
TH
1019void ata_id_c_string(const u16 *id, unsigned char *s,
1020 unsigned int ofs, unsigned int len)
0e949ff3
TH
1021{
1022 unsigned char *p;
1023
1024 WARN_ON(!(len & 1));
1025
6a62a04d 1026 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
1027
1028 p = s + strnlen(s, len - 1);
1029 while (p > s && p[-1] == ' ')
1030 p--;
1031 *p = '\0';
1032}
0baab86b 1033
db6f8759
TH
1034static u64 ata_id_n_sectors(const u16 *id)
1035{
1036 if (ata_id_has_lba(id)) {
1037 if (ata_id_has_lba48(id))
1038 return ata_id_u64(id, 100);
1039 else
1040 return ata_id_u32(id, 60);
1041 } else {
1042 if (ata_id_current_chs_valid(id))
1043 return ata_id_u32(id, 57);
1044 else
1045 return id[1] * id[3] * id[6];
1046 }
1047}
1048
1e999736
AC
1049static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
1050{
1051 u64 sectors = 0;
1052
1053 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1054 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1055 sectors |= (tf->hob_lbal & 0xff) << 24;
1056 sectors |= (tf->lbah & 0xff) << 16;
1057 sectors |= (tf->lbam & 0xff) << 8;
1058 sectors |= (tf->lbal & 0xff);
1059
1060 return ++sectors;
1061}
1062
1063static u64 ata_tf_to_lba(struct ata_taskfile *tf)
1064{
1065 u64 sectors = 0;
1066
1067 sectors |= (tf->device & 0x0f) << 24;
1068 sectors |= (tf->lbah & 0xff) << 16;
1069 sectors |= (tf->lbam & 0xff) << 8;
1070 sectors |= (tf->lbal & 0xff);
1071
1072 return ++sectors;
1073}
1074
1075/**
c728a914
TH
1076 * ata_read_native_max_address - Read native max address
1077 * @dev: target device
1078 * @max_sectors: out parameter for the result native max address
1e999736 1079 *
c728a914
TH
1080 * Perform an LBA48 or LBA28 native size query upon the device in
1081 * question.
1e999736 1082 *
c728a914
TH
1083 * RETURNS:
1084 * 0 on success, -EACCES if command is aborted by the drive.
1085 * -EIO on other errors.
1e999736 1086 */
c728a914 1087static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1e999736 1088{
c728a914 1089 unsigned int err_mask;
1e999736 1090 struct ata_taskfile tf;
c728a914 1091 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1092
1093 ata_tf_init(dev, &tf);
1094
c728a914 1095 /* always clear all address registers */
1e999736 1096 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1e999736 1097
c728a914
TH
1098 if (lba48) {
1099 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1100 tf.flags |= ATA_TFLAG_LBA48;
1101 } else
1102 tf.command = ATA_CMD_READ_NATIVE_MAX;
1e999736 1103
1e999736 1104 tf.protocol |= ATA_PROT_NODATA;
c728a914
TH
1105 tf.device |= ATA_LBA;
1106
2b789108 1107 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914
TH
1108 if (err_mask) {
1109 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1110 "max address (err_mask=0x%x)\n", err_mask);
1111 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1112 return -EACCES;
1113 return -EIO;
1114 }
1e999736 1115
c728a914
TH
1116 if (lba48)
1117 *max_sectors = ata_tf_to_lba48(&tf);
1118 else
1119 *max_sectors = ata_tf_to_lba(&tf);
2dcb407e 1120 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
93328e11 1121 (*max_sectors)--;
c728a914 1122 return 0;
1e999736
AC
1123}
1124
1125/**
c728a914
TH
1126 * ata_set_max_sectors - Set max sectors
1127 * @dev: target device
6b38d1d1 1128 * @new_sectors: new max sectors value to set for the device
1e999736 1129 *
c728a914
TH
1130 * Set max sectors of @dev to @new_sectors.
1131 *
1132 * RETURNS:
1133 * 0 on success, -EACCES if command is aborted or denied (due to
1134 * previous non-volatile SET_MAX) by the drive. -EIO on other
1135 * errors.
1e999736 1136 */
05027adc 1137static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1e999736 1138{
c728a914 1139 unsigned int err_mask;
1e999736 1140 struct ata_taskfile tf;
c728a914 1141 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1142
1143 new_sectors--;
1144
1145 ata_tf_init(dev, &tf);
1146
1e999736 1147 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
c728a914
TH
1148
1149 if (lba48) {
1150 tf.command = ATA_CMD_SET_MAX_EXT;
1151 tf.flags |= ATA_TFLAG_LBA48;
1152
1153 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1154 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1155 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1e582ba4 1156 } else {
c728a914
TH
1157 tf.command = ATA_CMD_SET_MAX;
1158
1e582ba4
TH
1159 tf.device |= (new_sectors >> 24) & 0xf;
1160 }
1161
1e999736 1162 tf.protocol |= ATA_PROT_NODATA;
c728a914 1163 tf.device |= ATA_LBA;
1e999736
AC
1164
1165 tf.lbal = (new_sectors >> 0) & 0xff;
1166 tf.lbam = (new_sectors >> 8) & 0xff;
1167 tf.lbah = (new_sectors >> 16) & 0xff;
1e999736 1168
2b789108 1169 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914
TH
1170 if (err_mask) {
1171 ata_dev_printk(dev, KERN_WARNING, "failed to set "
1172 "max address (err_mask=0x%x)\n", err_mask);
1173 if (err_mask == AC_ERR_DEV &&
1174 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1175 return -EACCES;
1176 return -EIO;
1177 }
1178
c728a914 1179 return 0;
1e999736
AC
1180}
1181
1182/**
1183 * ata_hpa_resize - Resize a device with an HPA set
1184 * @dev: Device to resize
1185 *
1186 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1187 * it if required to the full size of the media. The caller must check
1188 * the drive has the HPA feature set enabled.
05027adc
TH
1189 *
1190 * RETURNS:
1191 * 0 on success, -errno on failure.
1e999736 1192 */
05027adc 1193static int ata_hpa_resize(struct ata_device *dev)
1e999736 1194{
05027adc
TH
1195 struct ata_eh_context *ehc = &dev->link->eh_context;
1196 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1197 u64 sectors = ata_id_n_sectors(dev->id);
1198 u64 native_sectors;
c728a914 1199 int rc;
a617c09f 1200
05027adc
TH
1201 /* do we need to do it? */
1202 if (dev->class != ATA_DEV_ATA ||
1203 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1204 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
c728a914 1205 return 0;
1e999736 1206
05027adc
TH
1207 /* read native max address */
1208 rc = ata_read_native_max_address(dev, &native_sectors);
1209 if (rc) {
1210 /* If HPA isn't going to be unlocked, skip HPA
1211 * resizing from the next try.
1212 */
1213 if (!ata_ignore_hpa) {
1214 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1215 "broken, will skip HPA handling\n");
1216 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1217
1218 /* we can continue if device aborted the command */
1219 if (rc == -EACCES)
1220 rc = 0;
1e999736 1221 }
37301a55 1222
05027adc
TH
1223 return rc;
1224 }
1225
1226 /* nothing to do? */
1227 if (native_sectors <= sectors || !ata_ignore_hpa) {
1228 if (!print_info || native_sectors == sectors)
1229 return 0;
1230
1231 if (native_sectors > sectors)
1232 ata_dev_printk(dev, KERN_INFO,
1233 "HPA detected: current %llu, native %llu\n",
1234 (unsigned long long)sectors,
1235 (unsigned long long)native_sectors);
1236 else if (native_sectors < sectors)
1237 ata_dev_printk(dev, KERN_WARNING,
1238 "native sectors (%llu) is smaller than "
1239 "sectors (%llu)\n",
1240 (unsigned long long)native_sectors,
1241 (unsigned long long)sectors);
1242 return 0;
1243 }
1244
1245 /* let's unlock HPA */
1246 rc = ata_set_max_sectors(dev, native_sectors);
1247 if (rc == -EACCES) {
1248 /* if device aborted the command, skip HPA resizing */
1249 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1250 "(%llu -> %llu), skipping HPA handling\n",
1251 (unsigned long long)sectors,
1252 (unsigned long long)native_sectors);
1253 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1254 return 0;
1255 } else if (rc)
1256 return rc;
1257
1258 /* re-read IDENTIFY data */
1259 rc = ata_dev_reread_id(dev, 0);
1260 if (rc) {
1261 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1262 "data after HPA resizing\n");
1263 return rc;
1264 }
1265
1266 if (print_info) {
1267 u64 new_sectors = ata_id_n_sectors(dev->id);
1268 ata_dev_printk(dev, KERN_INFO,
1269 "HPA unlocked: %llu -> %llu, native %llu\n",
1270 (unsigned long long)sectors,
1271 (unsigned long long)new_sectors,
1272 (unsigned long long)native_sectors);
1273 }
1274
1275 return 0;
1e999736
AC
1276}
1277
10305f0f
AC
1278/**
1279 * ata_id_to_dma_mode - Identify DMA mode from id block
1280 * @dev: device to identify
cc261267 1281 * @unknown: mode to assume if we cannot tell
10305f0f
AC
1282 *
1283 * Set up the timing values for the device based upon the identify
1284 * reported values for the DMA mode. This function is used by drivers
1285 * which rely upon firmware configured modes, but wish to report the
1286 * mode correctly when possible.
1287 *
1288 * In addition we emit similarly formatted messages to the default
1289 * ata_dev_set_mode handler, in order to provide consistency of
1290 * presentation.
1291 */
1292
1293void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
1294{
1295 unsigned int mask;
1296 u8 mode;
1297
1298 /* Pack the DMA modes */
1299 mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
1300 if (dev->id[53] & 0x04)
1301 mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
1302
1303 /* Select the mode in use */
1304 mode = ata_xfer_mask2mode(mask);
1305
1306 if (mode != 0) {
1307 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
1308 ata_mode_string(mask));
1309 } else {
1310 /* SWDMA perhaps ? */
1311 mode = unknown;
1312 ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
1313 }
1314
1315 /* Configure the device reporting */
1316 dev->xfer_mode = mode;
1317 dev->xfer_shift = ata_xfer_mode2shift(mode);
1318}
1319
0baab86b
EF
1320/**
1321 * ata_noop_dev_select - Select device 0/1 on ATA bus
1322 * @ap: ATA channel to manipulate
1323 * @device: ATA device (numbered from zero) to select
1324 *
1325 * This function performs no actual function.
1326 *
1327 * May be used as the dev_select() entry in ata_port_operations.
1328 *
1329 * LOCKING:
1330 * caller.
1331 */
2dcb407e 1332void ata_noop_dev_select(struct ata_port *ap, unsigned int device)
1da177e4
LT
1333{
1334}
1335
0baab86b 1336
1da177e4
LT
1337/**
1338 * ata_std_dev_select - Select device 0/1 on ATA bus
1339 * @ap: ATA channel to manipulate
1340 * @device: ATA device (numbered from zero) to select
1341 *
1342 * Use the method defined in the ATA specification to
1343 * make either device 0, or device 1, active on the
0baab86b
EF
1344 * ATA channel. Works with both PIO and MMIO.
1345 *
1346 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
1347 *
1348 * LOCKING:
1349 * caller.
1350 */
1351
2dcb407e 1352void ata_std_dev_select(struct ata_port *ap, unsigned int device)
1da177e4
LT
1353{
1354 u8 tmp;
1355
1356 if (device == 0)
1357 tmp = ATA_DEVICE_OBS;
1358 else
1359 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1360
0d5ff566 1361 iowrite8(tmp, ap->ioaddr.device_addr);
1da177e4
LT
1362 ata_pause(ap); /* needed; also flushes, for mmio */
1363}
1364
1365/**
1366 * ata_dev_select - Select device 0/1 on ATA bus
1367 * @ap: ATA channel to manipulate
1368 * @device: ATA device (numbered from zero) to select
1369 * @wait: non-zero to wait for Status register BSY bit to clear
1370 * @can_sleep: non-zero if context allows sleeping
1371 *
1372 * Use the method defined in the ATA specification to
1373 * make either device 0, or device 1, active on the
1374 * ATA channel.
1375 *
1376 * This is a high-level version of ata_std_dev_select(),
1377 * which additionally provides the services of inserting
1378 * the proper pauses and status polling, where needed.
1379 *
1380 * LOCKING:
1381 * caller.
1382 */
1383
1384void ata_dev_select(struct ata_port *ap, unsigned int device,
1385 unsigned int wait, unsigned int can_sleep)
1386{
88574551 1387 if (ata_msg_probe(ap))
44877b4e
TH
1388 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
1389 "device %u, wait %u\n", device, wait);
1da177e4
LT
1390
1391 if (wait)
1392 ata_wait_idle(ap);
1393
1394 ap->ops->dev_select(ap, device);
1395
1396 if (wait) {
9af5c9c9 1397 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
1da177e4
LT
1398 msleep(150);
1399 ata_wait_idle(ap);
1400 }
1401}
1402
1403/**
1404 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 1405 * @id: IDENTIFY DEVICE page to dump
1da177e4 1406 *
0bd3300a
TH
1407 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1408 * page.
1da177e4
LT
1409 *
1410 * LOCKING:
1411 * caller.
1412 */
1413
0bd3300a 1414static inline void ata_dump_id(const u16 *id)
1da177e4
LT
1415{
1416 DPRINTK("49==0x%04x "
1417 "53==0x%04x "
1418 "63==0x%04x "
1419 "64==0x%04x "
1420 "75==0x%04x \n",
0bd3300a
TH
1421 id[49],
1422 id[53],
1423 id[63],
1424 id[64],
1425 id[75]);
1da177e4
LT
1426 DPRINTK("80==0x%04x "
1427 "81==0x%04x "
1428 "82==0x%04x "
1429 "83==0x%04x "
1430 "84==0x%04x \n",
0bd3300a
TH
1431 id[80],
1432 id[81],
1433 id[82],
1434 id[83],
1435 id[84]);
1da177e4
LT
1436 DPRINTK("88==0x%04x "
1437 "93==0x%04x\n",
0bd3300a
TH
1438 id[88],
1439 id[93]);
1da177e4
LT
1440}
1441
cb95d562
TH
1442/**
1443 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1444 * @id: IDENTIFY data to compute xfer mask from
1445 *
1446 * Compute the xfermask for this device. This is not as trivial
1447 * as it seems if we must consider early devices correctly.
1448 *
1449 * FIXME: pre IDE drive timing (do we care ?).
1450 *
1451 * LOCKING:
1452 * None.
1453 *
1454 * RETURNS:
1455 * Computed xfermask
1456 */
1457static unsigned int ata_id_xfermask(const u16 *id)
1458{
1459 unsigned int pio_mask, mwdma_mask, udma_mask;
1460
1461 /* Usual case. Word 53 indicates word 64 is valid */
1462 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1463 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1464 pio_mask <<= 3;
1465 pio_mask |= 0x7;
1466 } else {
1467 /* If word 64 isn't valid then Word 51 high byte holds
1468 * the PIO timing number for the maximum. Turn it into
1469 * a mask.
1470 */
7a0f1c8a 1471 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb 1472 if (mode < 5) /* Valid PIO range */
2dcb407e 1473 pio_mask = (2 << mode) - 1;
46767aeb
AC
1474 else
1475 pio_mask = 1;
cb95d562
TH
1476
1477 /* But wait.. there's more. Design your standards by
1478 * committee and you too can get a free iordy field to
1479 * process. However its the speeds not the modes that
1480 * are supported... Note drivers using the timing API
1481 * will get this right anyway
1482 */
1483 }
1484
1485 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1486
b352e57d
AC
1487 if (ata_id_is_cfa(id)) {
1488 /*
1489 * Process compact flash extended modes
1490 */
1491 int pio = id[163] & 0x7;
1492 int dma = (id[163] >> 3) & 7;
1493
1494 if (pio)
1495 pio_mask |= (1 << 5);
1496 if (pio > 1)
1497 pio_mask |= (1 << 6);
1498 if (dma)
1499 mwdma_mask |= (1 << 3);
1500 if (dma > 1)
1501 mwdma_mask |= (1 << 4);
1502 }
1503
fb21f0d0
TH
1504 udma_mask = 0;
1505 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1506 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1507
1508 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1509}
1510
86e45b6b
TH
1511/**
1512 * ata_port_queue_task - Queue port_task
1513 * @ap: The ata_port to queue port_task for
e2a7f77a 1514 * @fn: workqueue function to be scheduled
65f27f38 1515 * @data: data for @fn to use
e2a7f77a 1516 * @delay: delay time for workqueue function
86e45b6b
TH
1517 *
1518 * Schedule @fn(@data) for execution after @delay jiffies using
1519 * port_task. There is one port_task per port and it's the
1520 * user(low level driver)'s responsibility to make sure that only
1521 * one task is active at any given time.
1522 *
1523 * libata core layer takes care of synchronization between
1524 * port_task and EH. ata_port_queue_task() may be ignored for EH
1525 * synchronization.
1526 *
1527 * LOCKING:
1528 * Inherited from caller.
1529 */
65f27f38 1530void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
86e45b6b
TH
1531 unsigned long delay)
1532{
65f27f38
DH
1533 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1534 ap->port_task_data = data;
86e45b6b 1535
45a66c1c
ON
1536 /* may fail if ata_port_flush_task() in progress */
1537 queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1538}
1539
1540/**
1541 * ata_port_flush_task - Flush port_task
1542 * @ap: The ata_port to flush port_task for
1543 *
1544 * After this function completes, port_task is guranteed not to
1545 * be running or scheduled.
1546 *
1547 * LOCKING:
1548 * Kernel thread context (may sleep)
1549 */
1550void ata_port_flush_task(struct ata_port *ap)
1551{
86e45b6b
TH
1552 DPRINTK("ENTER\n");
1553
45a66c1c 1554 cancel_rearming_delayed_work(&ap->port_task);
86e45b6b 1555
0dd4b21f
BP
1556 if (ata_msg_ctl(ap))
1557 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
86e45b6b
TH
1558}
1559
7102d230 1560static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1561{
77853bf2 1562 struct completion *waiting = qc->private_data;
a2a7a662 1563
a2a7a662 1564 complete(waiting);
a2a7a662
TH
1565}
1566
1567/**
2432697b 1568 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1569 * @dev: Device to which the command is sent
1570 * @tf: Taskfile registers for the command and the result
d69cf37d 1571 * @cdb: CDB for packet command
a2a7a662 1572 * @dma_dir: Data tranfer direction of the command
5c1ad8b3 1573 * @sgl: sg list for the data buffer of the command
2432697b 1574 * @n_elem: Number of sg entries
2b789108 1575 * @timeout: Timeout in msecs (0 for default)
a2a7a662
TH
1576 *
1577 * Executes libata internal command with timeout. @tf contains
1578 * command on entry and result on return. Timeout and error
1579 * conditions are reported via return value. No recovery action
1580 * is taken after a command times out. It's caller's duty to
1581 * clean up after timeout.
1582 *
1583 * LOCKING:
1584 * None. Should be called with kernel context, might sleep.
551e8889
TH
1585 *
1586 * RETURNS:
1587 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1588 */
2432697b
TH
1589unsigned ata_exec_internal_sg(struct ata_device *dev,
1590 struct ata_taskfile *tf, const u8 *cdb,
87260216 1591 int dma_dir, struct scatterlist *sgl,
2b789108 1592 unsigned int n_elem, unsigned long timeout)
a2a7a662 1593{
9af5c9c9
TH
1594 struct ata_link *link = dev->link;
1595 struct ata_port *ap = link->ap;
a2a7a662
TH
1596 u8 command = tf->command;
1597 struct ata_queued_cmd *qc;
2ab7db1f 1598 unsigned int tag, preempted_tag;
dedaf2b0 1599 u32 preempted_sactive, preempted_qc_active;
da917d69 1600 int preempted_nr_active_links;
60be6b9a 1601 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1602 unsigned long flags;
77853bf2 1603 unsigned int err_mask;
d95a717f 1604 int rc;
a2a7a662 1605
ba6a1308 1606 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1607
e3180499 1608 /* no internal command while frozen */
b51e9e5d 1609 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1610 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1611 return AC_ERR_SYSTEM;
1612 }
1613
2ab7db1f 1614 /* initialize internal qc */
a2a7a662 1615
2ab7db1f
TH
1616 /* XXX: Tag 0 is used for drivers with legacy EH as some
1617 * drivers choke if any other tag is given. This breaks
1618 * ata_tag_internal() test for those drivers. Don't use new
1619 * EH stuff without converting to it.
1620 */
1621 if (ap->ops->error_handler)
1622 tag = ATA_TAG_INTERNAL;
1623 else
1624 tag = 0;
1625
6cec4a39 1626 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1627 BUG();
f69499f4 1628 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1629
1630 qc->tag = tag;
1631 qc->scsicmd = NULL;
1632 qc->ap = ap;
1633 qc->dev = dev;
1634 ata_qc_reinit(qc);
1635
9af5c9c9
TH
1636 preempted_tag = link->active_tag;
1637 preempted_sactive = link->sactive;
dedaf2b0 1638 preempted_qc_active = ap->qc_active;
da917d69 1639 preempted_nr_active_links = ap->nr_active_links;
9af5c9c9
TH
1640 link->active_tag = ATA_TAG_POISON;
1641 link->sactive = 0;
dedaf2b0 1642 ap->qc_active = 0;
da917d69 1643 ap->nr_active_links = 0;
2ab7db1f
TH
1644
1645 /* prepare & issue qc */
a2a7a662 1646 qc->tf = *tf;
d69cf37d
TH
1647 if (cdb)
1648 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1649 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1650 qc->dma_dir = dma_dir;
1651 if (dma_dir != DMA_NONE) {
2432697b 1652 unsigned int i, buflen = 0;
87260216 1653 struct scatterlist *sg;
2432697b 1654
87260216
JA
1655 for_each_sg(sgl, sg, n_elem, i)
1656 buflen += sg->length;
2432697b 1657
87260216 1658 ata_sg_init(qc, sgl, n_elem);
49c80429 1659 qc->nbytes = buflen;
a2a7a662
TH
1660 }
1661
77853bf2 1662 qc->private_data = &wait;
a2a7a662
TH
1663 qc->complete_fn = ata_qc_complete_internal;
1664
8e0e694a 1665 ata_qc_issue(qc);
a2a7a662 1666
ba6a1308 1667 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1668
2b789108
TH
1669 if (!timeout)
1670 timeout = ata_probe_timeout * 1000 / HZ;
1671
1672 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
d95a717f
TH
1673
1674 ata_port_flush_task(ap);
41ade50c 1675
d95a717f 1676 if (!rc) {
ba6a1308 1677 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1678
1679 /* We're racing with irq here. If we lose, the
1680 * following test prevents us from completing the qc
d95a717f
TH
1681 * twice. If we win, the port is frozen and will be
1682 * cleaned up by ->post_internal_cmd().
a2a7a662 1683 */
77853bf2 1684 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1685 qc->err_mask |= AC_ERR_TIMEOUT;
1686
1687 if (ap->ops->error_handler)
1688 ata_port_freeze(ap);
1689 else
1690 ata_qc_complete(qc);
f15a1daf 1691
0dd4b21f
BP
1692 if (ata_msg_warn(ap))
1693 ata_dev_printk(dev, KERN_WARNING,
88574551 1694 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1695 }
1696
ba6a1308 1697 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1698 }
1699
d95a717f
TH
1700 /* do post_internal_cmd */
1701 if (ap->ops->post_internal_cmd)
1702 ap->ops->post_internal_cmd(qc);
1703
a51d644a
TH
1704 /* perform minimal error analysis */
1705 if (qc->flags & ATA_QCFLAG_FAILED) {
1706 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1707 qc->err_mask |= AC_ERR_DEV;
1708
1709 if (!qc->err_mask)
1710 qc->err_mask |= AC_ERR_OTHER;
1711
1712 if (qc->err_mask & ~AC_ERR_OTHER)
1713 qc->err_mask &= ~AC_ERR_OTHER;
d95a717f
TH
1714 }
1715
15869303 1716 /* finish up */
ba6a1308 1717 spin_lock_irqsave(ap->lock, flags);
15869303 1718
e61e0672 1719 *tf = qc->result_tf;
77853bf2
TH
1720 err_mask = qc->err_mask;
1721
1722 ata_qc_free(qc);
9af5c9c9
TH
1723 link->active_tag = preempted_tag;
1724 link->sactive = preempted_sactive;
dedaf2b0 1725 ap->qc_active = preempted_qc_active;
da917d69 1726 ap->nr_active_links = preempted_nr_active_links;
77853bf2 1727
1f7dd3e9
TH
1728 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1729 * Until those drivers are fixed, we detect the condition
1730 * here, fail the command with AC_ERR_SYSTEM and reenable the
1731 * port.
1732 *
1733 * Note that this doesn't change any behavior as internal
1734 * command failure results in disabling the device in the
1735 * higher layer for LLDDs without new reset/EH callbacks.
1736 *
1737 * Kill the following code as soon as those drivers are fixed.
1738 */
198e0fed 1739 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1740 err_mask |= AC_ERR_SYSTEM;
1741 ata_port_probe(ap);
1742 }
1743
ba6a1308 1744 spin_unlock_irqrestore(ap->lock, flags);
15869303 1745
77853bf2 1746 return err_mask;
a2a7a662
TH
1747}
1748
2432697b 1749/**
33480a0e 1750 * ata_exec_internal - execute libata internal command
2432697b
TH
1751 * @dev: Device to which the command is sent
1752 * @tf: Taskfile registers for the command and the result
1753 * @cdb: CDB for packet command
1754 * @dma_dir: Data tranfer direction of the command
1755 * @buf: Data buffer of the command
1756 * @buflen: Length of data buffer
2b789108 1757 * @timeout: Timeout in msecs (0 for default)
2432697b
TH
1758 *
1759 * Wrapper around ata_exec_internal_sg() which takes simple
1760 * buffer instead of sg list.
1761 *
1762 * LOCKING:
1763 * None. Should be called with kernel context, might sleep.
1764 *
1765 * RETURNS:
1766 * Zero on success, AC_ERR_* mask on failure
1767 */
1768unsigned ata_exec_internal(struct ata_device *dev,
1769 struct ata_taskfile *tf, const u8 *cdb,
2b789108
TH
1770 int dma_dir, void *buf, unsigned int buflen,
1771 unsigned long timeout)
2432697b 1772{
33480a0e
TH
1773 struct scatterlist *psg = NULL, sg;
1774 unsigned int n_elem = 0;
2432697b 1775
33480a0e
TH
1776 if (dma_dir != DMA_NONE) {
1777 WARN_ON(!buf);
1778 sg_init_one(&sg, buf, buflen);
1779 psg = &sg;
1780 n_elem++;
1781 }
2432697b 1782
2b789108
TH
1783 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1784 timeout);
2432697b
TH
1785}
1786
977e6b9f
TH
1787/**
1788 * ata_do_simple_cmd - execute simple internal command
1789 * @dev: Device to which the command is sent
1790 * @cmd: Opcode to execute
1791 *
1792 * Execute a 'simple' command, that only consists of the opcode
1793 * 'cmd' itself, without filling any other registers
1794 *
1795 * LOCKING:
1796 * Kernel thread context (may sleep).
1797 *
1798 * RETURNS:
1799 * Zero on success, AC_ERR_* mask on failure
e58eb583 1800 */
77b08fb5 1801unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1802{
1803 struct ata_taskfile tf;
e58eb583
TH
1804
1805 ata_tf_init(dev, &tf);
1806
1807 tf.command = cmd;
1808 tf.flags |= ATA_TFLAG_DEVICE;
1809 tf.protocol = ATA_PROT_NODATA;
1810
2b789108 1811 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
e58eb583
TH
1812}
1813
1bc4ccff
AC
1814/**
1815 * ata_pio_need_iordy - check if iordy needed
1816 * @adev: ATA device
1817 *
1818 * Check if the current speed of the device requires IORDY. Used
1819 * by various controllers for chip configuration.
1820 */
a617c09f 1821
1bc4ccff
AC
1822unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1823{
432729f0
AC
1824 /* Controller doesn't support IORDY. Probably a pointless check
1825 as the caller should know this */
9af5c9c9 1826 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1bc4ccff 1827 return 0;
432729f0
AC
1828 /* PIO3 and higher it is mandatory */
1829 if (adev->pio_mode > XFER_PIO_2)
1830 return 1;
1831 /* We turn it on when possible */
1832 if (ata_id_has_iordy(adev->id))
1bc4ccff 1833 return 1;
432729f0
AC
1834 return 0;
1835}
2e9edbf8 1836
432729f0
AC
1837/**
1838 * ata_pio_mask_no_iordy - Return the non IORDY mask
1839 * @adev: ATA device
1840 *
1841 * Compute the highest mode possible if we are not using iordy. Return
1842 * -1 if no iordy mode is available.
1843 */
a617c09f 1844
432729f0
AC
1845static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1846{
1bc4ccff 1847 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1bc4ccff 1848 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
432729f0 1849 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1bc4ccff
AC
1850 /* Is the speed faster than the drive allows non IORDY ? */
1851 if (pio) {
1852 /* This is cycle times not frequency - watch the logic! */
1853 if (pio > 240) /* PIO2 is 240nS per cycle */
432729f0
AC
1854 return 3 << ATA_SHIFT_PIO;
1855 return 7 << ATA_SHIFT_PIO;
1bc4ccff
AC
1856 }
1857 }
432729f0 1858 return 3 << ATA_SHIFT_PIO;
1bc4ccff
AC
1859}
1860
1da177e4 1861/**
49016aca 1862 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1863 * @dev: target device
1864 * @p_class: pointer to class of the target device (may be changed)
bff04647 1865 * @flags: ATA_READID_* flags
fe635c7e 1866 * @id: buffer to read IDENTIFY data into
1da177e4 1867 *
49016aca
TH
1868 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1869 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1870 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1871 * for pre-ATA4 drives.
1da177e4 1872 *
50a99018 1873 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
2dcb407e 1874 * now we abort if we hit that case.
50a99018 1875 *
1da177e4 1876 * LOCKING:
49016aca
TH
1877 * Kernel thread context (may sleep)
1878 *
1879 * RETURNS:
1880 * 0 on success, -errno otherwise.
1da177e4 1881 */
a9beec95 1882int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1883 unsigned int flags, u16 *id)
1da177e4 1884{
9af5c9c9 1885 struct ata_port *ap = dev->link->ap;
49016aca 1886 unsigned int class = *p_class;
a0123703 1887 struct ata_taskfile tf;
49016aca
TH
1888 unsigned int err_mask = 0;
1889 const char *reason;
54936f8b 1890 int may_fallback = 1, tried_spinup = 0;
49016aca 1891 int rc;
1da177e4 1892
0dd4b21f 1893 if (ata_msg_ctl(ap))
44877b4e 1894 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1895
49016aca 1896 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
49016aca 1897 retry:
3373efd8 1898 ata_tf_init(dev, &tf);
a0123703 1899
49016aca
TH
1900 switch (class) {
1901 case ATA_DEV_ATA:
a0123703 1902 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1903 break;
1904 case ATA_DEV_ATAPI:
a0123703 1905 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1906 break;
1907 default:
1908 rc = -ENODEV;
1909 reason = "unsupported class";
1910 goto err_out;
1da177e4
LT
1911 }
1912
a0123703 1913 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1914
1915 /* Some devices choke if TF registers contain garbage. Make
1916 * sure those are properly initialized.
1917 */
1918 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1919
1920 /* Device presence detection is unreliable on some
1921 * controllers. Always poll IDENTIFY if available.
1922 */
1923 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 1924
3373efd8 1925 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
2b789108 1926 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
a0123703 1927 if (err_mask) {
800b3996 1928 if (err_mask & AC_ERR_NODEV_HINT) {
55a8e2c8 1929 DPRINTK("ata%u.%d: NODEV after polling detection\n",
44877b4e 1930 ap->print_id, dev->devno);
55a8e2c8
TH
1931 return -ENOENT;
1932 }
1933
54936f8b
TH
1934 /* Device or controller might have reported the wrong
1935 * device class. Give a shot at the other IDENTIFY if
1936 * the current one is aborted by the device.
1937 */
1938 if (may_fallback &&
1939 (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1940 may_fallback = 0;
1941
1942 if (class == ATA_DEV_ATA)
1943 class = ATA_DEV_ATAPI;
1944 else
1945 class = ATA_DEV_ATA;
1946 goto retry;
1947 }
1948
49016aca
TH
1949 rc = -EIO;
1950 reason = "I/O error";
1da177e4
LT
1951 goto err_out;
1952 }
1953
54936f8b
TH
1954 /* Falling back doesn't make sense if ID data was read
1955 * successfully at least once.
1956 */
1957 may_fallback = 0;
1958
49016aca 1959 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1960
49016aca 1961 /* sanity check */
a4f5749b 1962 rc = -EINVAL;
6070068b 1963 reason = "device reports invalid type";
a4f5749b
TH
1964
1965 if (class == ATA_DEV_ATA) {
1966 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1967 goto err_out;
1968 } else {
1969 if (ata_id_is_ata(id))
1970 goto err_out;
49016aca
TH
1971 }
1972
169439c2
ML
1973 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1974 tried_spinup = 1;
1975 /*
1976 * Drive powered-up in standby mode, and requires a specific
1977 * SET_FEATURES spin-up subcommand before it will accept
1978 * anything other than the original IDENTIFY command.
1979 */
218f3d30 1980 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
fb0582f9 1981 if (err_mask && id[2] != 0x738c) {
169439c2
ML
1982 rc = -EIO;
1983 reason = "SPINUP failed";
1984 goto err_out;
1985 }
1986 /*
1987 * If the drive initially returned incomplete IDENTIFY info,
1988 * we now must reissue the IDENTIFY command.
1989 */
1990 if (id[2] == 0x37c8)
1991 goto retry;
1992 }
1993
bff04647 1994 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
1995 /*
1996 * The exact sequence expected by certain pre-ATA4 drives is:
1997 * SRST RESET
50a99018
AC
1998 * IDENTIFY (optional in early ATA)
1999 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
49016aca
TH
2000 * anything else..
2001 * Some drives were very specific about that exact sequence.
50a99018
AC
2002 *
2003 * Note that ATA4 says lba is mandatory so the second check
2004 * shoud never trigger.
49016aca
TH
2005 */
2006 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 2007 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
2008 if (err_mask) {
2009 rc = -EIO;
2010 reason = "INIT_DEV_PARAMS failed";
2011 goto err_out;
2012 }
2013
2014 /* current CHS translation info (id[53-58]) might be
2015 * changed. reread the identify device info.
2016 */
bff04647 2017 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
2018 goto retry;
2019 }
2020 }
2021
2022 *p_class = class;
fe635c7e 2023
49016aca
TH
2024 return 0;
2025
2026 err_out:
88574551 2027 if (ata_msg_warn(ap))
0dd4b21f 2028 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 2029 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
2030 return rc;
2031}
2032
3373efd8 2033static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 2034{
9af5c9c9
TH
2035 struct ata_port *ap = dev->link->ap;
2036 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
2037}
2038
a6e6ce8e
TH
2039static void ata_dev_config_ncq(struct ata_device *dev,
2040 char *desc, size_t desc_sz)
2041{
9af5c9c9 2042 struct ata_port *ap = dev->link->ap;
a6e6ce8e
TH
2043 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2044
2045 if (!ata_id_has_ncq(dev->id)) {
2046 desc[0] = '\0';
2047 return;
2048 }
75683fe7 2049 if (dev->horkage & ATA_HORKAGE_NONCQ) {
6919a0a6
AC
2050 snprintf(desc, desc_sz, "NCQ (not used)");
2051 return;
2052 }
a6e6ce8e 2053 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 2054 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
2055 dev->flags |= ATA_DFLAG_NCQ;
2056 }
2057
2058 if (hdepth >= ddepth)
2059 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
2060 else
2061 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
2062}
2063
49016aca 2064/**
ffeae418 2065 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
2066 * @dev: Target device to configure
2067 *
2068 * Configure @dev according to @dev->id. Generic and low-level
2069 * driver specific fixups are also applied.
49016aca
TH
2070 *
2071 * LOCKING:
ffeae418
TH
2072 * Kernel thread context (may sleep)
2073 *
2074 * RETURNS:
2075 * 0 on success, -errno otherwise
49016aca 2076 */
efdaedc4 2077int ata_dev_configure(struct ata_device *dev)
49016aca 2078{
9af5c9c9
TH
2079 struct ata_port *ap = dev->link->ap;
2080 struct ata_eh_context *ehc = &dev->link->eh_context;
6746544c 2081 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1148c3a7 2082 const u16 *id = dev->id;
ff8854b2 2083 unsigned int xfer_mask;
b352e57d 2084 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
2085 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2086 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 2087 int rc;
49016aca 2088
0dd4b21f 2089 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
44877b4e
TH
2090 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
2091 __FUNCTION__);
ffeae418 2092 return 0;
49016aca
TH
2093 }
2094
0dd4b21f 2095 if (ata_msg_probe(ap))
44877b4e 2096 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 2097
75683fe7
TH
2098 /* set horkage */
2099 dev->horkage |= ata_dev_blacklisted(dev);
2100
6746544c
TH
2101 /* let ACPI work its magic */
2102 rc = ata_acpi_on_devcfg(dev);
2103 if (rc)
2104 return rc;
08573a86 2105
05027adc
TH
2106 /* massage HPA, do it early as it might change IDENTIFY data */
2107 rc = ata_hpa_resize(dev);
2108 if (rc)
2109 return rc;
2110
c39f5ebe 2111 /* print device capabilities */
0dd4b21f 2112 if (ata_msg_probe(ap))
88574551
TH
2113 ata_dev_printk(dev, KERN_DEBUG,
2114 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2115 "85:%04x 86:%04x 87:%04x 88:%04x\n",
0dd4b21f 2116 __FUNCTION__,
f15a1daf
TH
2117 id[49], id[82], id[83], id[84],
2118 id[85], id[86], id[87], id[88]);
c39f5ebe 2119
208a9933 2120 /* initialize to-be-configured parameters */
ea1dd4e1 2121 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
2122 dev->max_sectors = 0;
2123 dev->cdb_len = 0;
2124 dev->n_sectors = 0;
2125 dev->cylinders = 0;
2126 dev->heads = 0;
2127 dev->sectors = 0;
2128
1da177e4
LT
2129 /*
2130 * common ATA, ATAPI feature tests
2131 */
2132
ff8854b2 2133 /* find max transfer mode; for printk only */
1148c3a7 2134 xfer_mask = ata_id_xfermask(id);
1da177e4 2135
0dd4b21f
BP
2136 if (ata_msg_probe(ap))
2137 ata_dump_id(id);
1da177e4 2138
ef143d57
AL
2139 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2140 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2141 sizeof(fwrevbuf));
2142
2143 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2144 sizeof(modelbuf));
2145
1da177e4
LT
2146 /* ATA-specific feature tests */
2147 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
2148 if (ata_id_is_cfa(id)) {
2149 if (id[162] & 1) /* CPRM may make this media unusable */
44877b4e
TH
2150 ata_dev_printk(dev, KERN_WARNING,
2151 "supports DRM functions and may "
2152 "not be fully accessable.\n");
b352e57d 2153 snprintf(revbuf, 7, "CFA");
2dcb407e
JG
2154 } else
2155 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
b352e57d 2156
1148c3a7 2157 dev->n_sectors = ata_id_n_sectors(id);
2940740b 2158
3f64f565
EM
2159 if (dev->id[59] & 0x100)
2160 dev->multi_count = dev->id[59] & 0xff;
2161
1148c3a7 2162 if (ata_id_has_lba(id)) {
4c2d721a 2163 const char *lba_desc;
a6e6ce8e 2164 char ncq_desc[20];
8bf62ece 2165
4c2d721a
TH
2166 lba_desc = "LBA";
2167 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 2168 if (ata_id_has_lba48(id)) {
8bf62ece 2169 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 2170 lba_desc = "LBA48";
6fc49adb
TH
2171
2172 if (dev->n_sectors >= (1UL << 28) &&
2173 ata_id_has_flush_ext(id))
2174 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 2175 }
8bf62ece 2176
a6e6ce8e
TH
2177 /* config NCQ */
2178 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2179
8bf62ece 2180 /* print device info to dmesg */
3f64f565
EM
2181 if (ata_msg_drv(ap) && print_info) {
2182 ata_dev_printk(dev, KERN_INFO,
2183 "%s: %s, %s, max %s\n",
2184 revbuf, modelbuf, fwrevbuf,
2185 ata_mode_string(xfer_mask));
2186 ata_dev_printk(dev, KERN_INFO,
2187 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 2188 (unsigned long long)dev->n_sectors,
3f64f565
EM
2189 dev->multi_count, lba_desc, ncq_desc);
2190 }
ffeae418 2191 } else {
8bf62ece
AL
2192 /* CHS */
2193
2194 /* Default translation */
1148c3a7
TH
2195 dev->cylinders = id[1];
2196 dev->heads = id[3];
2197 dev->sectors = id[6];
8bf62ece 2198
1148c3a7 2199 if (ata_id_current_chs_valid(id)) {
8bf62ece 2200 /* Current CHS translation is valid. */
1148c3a7
TH
2201 dev->cylinders = id[54];
2202 dev->heads = id[55];
2203 dev->sectors = id[56];
8bf62ece
AL
2204 }
2205
2206 /* print device info to dmesg */
3f64f565 2207 if (ata_msg_drv(ap) && print_info) {
88574551 2208 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2209 "%s: %s, %s, max %s\n",
2210 revbuf, modelbuf, fwrevbuf,
2211 ata_mode_string(xfer_mask));
a84471fe 2212 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2213 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2214 (unsigned long long)dev->n_sectors,
2215 dev->multi_count, dev->cylinders,
2216 dev->heads, dev->sectors);
2217 }
07f6f7d0
AL
2218 }
2219
6e7846e9 2220 dev->cdb_len = 16;
1da177e4
LT
2221 }
2222
2223 /* ATAPI-specific feature tests */
2c13b7ce 2224 else if (dev->class == ATA_DEV_ATAPI) {
854c73a2
TH
2225 const char *cdb_intr_string = "";
2226 const char *atapi_an_string = "";
7d77b247 2227 u32 sntf;
08a556db 2228
1148c3a7 2229 rc = atapi_cdb_len(id);
1da177e4 2230 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 2231 if (ata_msg_warn(ap))
88574551
TH
2232 ata_dev_printk(dev, KERN_WARNING,
2233 "unsupported CDB len\n");
ffeae418 2234 rc = -EINVAL;
1da177e4
LT
2235 goto err_out_nosup;
2236 }
6e7846e9 2237 dev->cdb_len = (unsigned int) rc;
1da177e4 2238
7d77b247
TH
2239 /* Enable ATAPI AN if both the host and device have
2240 * the support. If PMP is attached, SNTF is required
2241 * to enable ATAPI AN to discern between PHY status
2242 * changed notifications and ATAPI ANs.
9f45cbd3 2243 */
7d77b247
TH
2244 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2245 (!ap->nr_pmp_links ||
2246 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
854c73a2
TH
2247 unsigned int err_mask;
2248
9f45cbd3 2249 /* issue SET feature command to turn this on */
218f3d30
JG
2250 err_mask = ata_dev_set_feature(dev,
2251 SETFEATURES_SATA_ENABLE, SATA_AN);
854c73a2 2252 if (err_mask)
9f45cbd3 2253 ata_dev_printk(dev, KERN_ERR,
854c73a2
TH
2254 "failed to enable ATAPI AN "
2255 "(err_mask=0x%x)\n", err_mask);
2256 else {
9f45cbd3 2257 dev->flags |= ATA_DFLAG_AN;
854c73a2
TH
2258 atapi_an_string = ", ATAPI AN";
2259 }
9f45cbd3
KCA
2260 }
2261
08a556db 2262 if (ata_id_cdb_intr(dev->id)) {
312f7da2 2263 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
2264 cdb_intr_string = ", CDB intr";
2265 }
312f7da2 2266
1da177e4 2267 /* print device info to dmesg */
5afc8142 2268 if (ata_msg_drv(ap) && print_info)
ef143d57 2269 ata_dev_printk(dev, KERN_INFO,
854c73a2 2270 "ATAPI: %s, %s, max %s%s%s\n",
ef143d57 2271 modelbuf, fwrevbuf,
12436c30 2272 ata_mode_string(xfer_mask),
854c73a2 2273 cdb_intr_string, atapi_an_string);
1da177e4
LT
2274 }
2275
914ed354
TH
2276 /* determine max_sectors */
2277 dev->max_sectors = ATA_MAX_SECTORS;
2278 if (dev->flags & ATA_DFLAG_LBA48)
2279 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2280
ca77329f
KCA
2281 if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2282 if (ata_id_has_hipm(dev->id))
2283 dev->flags |= ATA_DFLAG_HIPM;
2284 if (ata_id_has_dipm(dev->id))
2285 dev->flags |= ATA_DFLAG_DIPM;
2286 }
2287
93590859
AC
2288 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2289 /* Let the user know. We don't want to disallow opens for
2290 rescue purposes, or in case the vendor is just a blithering
2291 idiot */
2dcb407e 2292 if (print_info) {
93590859
AC
2293 ata_dev_printk(dev, KERN_WARNING,
2294"Drive reports diagnostics failure. This may indicate a drive\n");
2295 ata_dev_printk(dev, KERN_WARNING,
2296"fault or invalid emulation. Contact drive vendor for information.\n");
2297 }
2298 }
2299
4b2f3ede 2300 /* limit bridge transfers to udma5, 200 sectors */
3373efd8 2301 if (ata_dev_knobble(dev)) {
5afc8142 2302 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
2303 ata_dev_printk(dev, KERN_INFO,
2304 "applying bridge limits\n");
5a529139 2305 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
2306 dev->max_sectors = ATA_MAX_SECTORS;
2307 }
2308
f8d8e579
TB
2309 if ((dev->class == ATA_DEV_ATAPI) &&
2310 (atapi_command_packet_set(id) == TYPE_TAPE))
2311 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2312
75683fe7 2313 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
03ec52de
TH
2314 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2315 dev->max_sectors);
18d6e9d5 2316
ca77329f
KCA
2317 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2318 dev->horkage |= ATA_HORKAGE_IPM;
2319
2320 /* reset link pm_policy for this port to no pm */
2321 ap->pm_policy = MAX_PERFORMANCE;
2322 }
2323
4b2f3ede 2324 if (ap->ops->dev_config)
cd0d3bbc 2325 ap->ops->dev_config(dev);
4b2f3ede 2326
0dd4b21f
BP
2327 if (ata_msg_probe(ap))
2328 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2329 __FUNCTION__, ata_chk_status(ap));
ffeae418 2330 return 0;
1da177e4
LT
2331
2332err_out_nosup:
0dd4b21f 2333 if (ata_msg_probe(ap))
88574551
TH
2334 ata_dev_printk(dev, KERN_DEBUG,
2335 "%s: EXIT, err\n", __FUNCTION__);
ffeae418 2336 return rc;
1da177e4
LT
2337}
2338
be0d18df 2339/**
2e41e8e6 2340 * ata_cable_40wire - return 40 wire cable type
be0d18df
AC
2341 * @ap: port
2342 *
2e41e8e6 2343 * Helper method for drivers which want to hardwire 40 wire cable
be0d18df
AC
2344 * detection.
2345 */
2346
2347int ata_cable_40wire(struct ata_port *ap)
2348{
2349 return ATA_CBL_PATA40;
2350}
2351
2352/**
2e41e8e6 2353 * ata_cable_80wire - return 80 wire cable type
be0d18df
AC
2354 * @ap: port
2355 *
2e41e8e6 2356 * Helper method for drivers which want to hardwire 80 wire cable
be0d18df
AC
2357 * detection.
2358 */
2359
2360int ata_cable_80wire(struct ata_port *ap)
2361{
2362 return ATA_CBL_PATA80;
2363}
2364
2365/**
2366 * ata_cable_unknown - return unknown PATA cable.
2367 * @ap: port
2368 *
2369 * Helper method for drivers which have no PATA cable detection.
2370 */
2371
2372int ata_cable_unknown(struct ata_port *ap)
2373{
2374 return ATA_CBL_PATA_UNK;
2375}
2376
2377/**
2378 * ata_cable_sata - return SATA cable type
2379 * @ap: port
2380 *
2381 * Helper method for drivers which have SATA cables
2382 */
2383
2384int ata_cable_sata(struct ata_port *ap)
2385{
2386 return ATA_CBL_SATA;
2387}
2388
1da177e4
LT
2389/**
2390 * ata_bus_probe - Reset and probe ATA bus
2391 * @ap: Bus to probe
2392 *
0cba632b
JG
2393 * Master ATA bus probing function. Initiates a hardware-dependent
2394 * bus reset, then attempts to identify any devices found on
2395 * the bus.
2396 *
1da177e4 2397 * LOCKING:
0cba632b 2398 * PCI/etc. bus probe sem.
1da177e4
LT
2399 *
2400 * RETURNS:
96072e69 2401 * Zero on success, negative errno otherwise.
1da177e4
LT
2402 */
2403
80289167 2404int ata_bus_probe(struct ata_port *ap)
1da177e4 2405{
28ca5c57 2406 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 2407 int tries[ATA_MAX_DEVICES];
f58229f8 2408 int rc;
e82cbdb9 2409 struct ata_device *dev;
1da177e4 2410
28ca5c57 2411 ata_port_probe(ap);
c19ba8af 2412
f58229f8
TH
2413 ata_link_for_each_dev(dev, &ap->link)
2414 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
14d2bac1
TH
2415
2416 retry:
cdeab114
TH
2417 ata_link_for_each_dev(dev, &ap->link) {
2418 /* If we issue an SRST then an ATA drive (not ATAPI)
2419 * may change configuration and be in PIO0 timing. If
2420 * we do a hard reset (or are coming from power on)
2421 * this is true for ATA or ATAPI. Until we've set a
2422 * suitable controller mode we should not touch the
2423 * bus as we may be talking too fast.
2424 */
2425 dev->pio_mode = XFER_PIO_0;
2426
2427 /* If the controller has a pio mode setup function
2428 * then use it to set the chipset to rights. Don't
2429 * touch the DMA setup as that will be dealt with when
2430 * configuring devices.
2431 */
2432 if (ap->ops->set_piomode)
2433 ap->ops->set_piomode(ap, dev);
2434 }
2435
2044470c 2436 /* reset and determine device classes */
52783c5d 2437 ap->ops->phy_reset(ap);
2061a47a 2438
f58229f8 2439 ata_link_for_each_dev(dev, &ap->link) {
52783c5d
TH
2440 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2441 dev->class != ATA_DEV_UNKNOWN)
2442 classes[dev->devno] = dev->class;
2443 else
2444 classes[dev->devno] = ATA_DEV_NONE;
2044470c 2445
52783c5d 2446 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 2447 }
1da177e4 2448
52783c5d 2449 ata_port_probe(ap);
2044470c 2450
f31f0cc2
JG
2451 /* read IDENTIFY page and configure devices. We have to do the identify
2452 specific sequence bass-ackwards so that PDIAG- is released by
2453 the slave device */
2454
f58229f8
TH
2455 ata_link_for_each_dev(dev, &ap->link) {
2456 if (tries[dev->devno])
2457 dev->class = classes[dev->devno];
ffeae418 2458
14d2bac1 2459 if (!ata_dev_enabled(dev))
ffeae418 2460 continue;
ffeae418 2461
bff04647
TH
2462 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2463 dev->id);
14d2bac1
TH
2464 if (rc)
2465 goto fail;
f31f0cc2
JG
2466 }
2467
be0d18df
AC
2468 /* Now ask for the cable type as PDIAG- should have been released */
2469 if (ap->ops->cable_detect)
2470 ap->cbl = ap->ops->cable_detect(ap);
2471
614fe29b
AC
2472 /* We may have SATA bridge glue hiding here irrespective of the
2473 reported cable types and sensed types */
2474 ata_link_for_each_dev(dev, &ap->link) {
2475 if (!ata_dev_enabled(dev))
2476 continue;
2477 /* SATA drives indicate we have a bridge. We don't know which
2478 end of the link the bridge is which is a problem */
2479 if (ata_id_is_sata(dev->id))
2480 ap->cbl = ATA_CBL_SATA;
2481 }
2482
f31f0cc2
JG
2483 /* After the identify sequence we can now set up the devices. We do
2484 this in the normal order so that the user doesn't get confused */
2485
f58229f8 2486 ata_link_for_each_dev(dev, &ap->link) {
f31f0cc2
JG
2487 if (!ata_dev_enabled(dev))
2488 continue;
14d2bac1 2489
9af5c9c9 2490 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
efdaedc4 2491 rc = ata_dev_configure(dev);
9af5c9c9 2492 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
2493 if (rc)
2494 goto fail;
1da177e4
LT
2495 }
2496
e82cbdb9 2497 /* configure transfer mode */
0260731f 2498 rc = ata_set_mode(&ap->link, &dev);
4ae72a1e 2499 if (rc)
51713d35 2500 goto fail;
1da177e4 2501
f58229f8
TH
2502 ata_link_for_each_dev(dev, &ap->link)
2503 if (ata_dev_enabled(dev))
e82cbdb9 2504 return 0;
1da177e4 2505
e82cbdb9
TH
2506 /* no device present, disable port */
2507 ata_port_disable(ap);
96072e69 2508 return -ENODEV;
14d2bac1
TH
2509
2510 fail:
4ae72a1e
TH
2511 tries[dev->devno]--;
2512
14d2bac1
TH
2513 switch (rc) {
2514 case -EINVAL:
4ae72a1e 2515 /* eeek, something went very wrong, give up */
14d2bac1
TH
2516 tries[dev->devno] = 0;
2517 break;
4ae72a1e
TH
2518
2519 case -ENODEV:
2520 /* give it just one more chance */
2521 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 2522 case -EIO:
4ae72a1e
TH
2523 if (tries[dev->devno] == 1) {
2524 /* This is the last chance, better to slow
2525 * down than lose it.
2526 */
936fd732 2527 sata_down_spd_limit(&ap->link);
4ae72a1e
TH
2528 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2529 }
14d2bac1
TH
2530 }
2531
4ae72a1e 2532 if (!tries[dev->devno])
3373efd8 2533 ata_dev_disable(dev);
ec573755 2534
14d2bac1 2535 goto retry;
1da177e4
LT
2536}
2537
2538/**
0cba632b
JG
2539 * ata_port_probe - Mark port as enabled
2540 * @ap: Port for which we indicate enablement
1da177e4 2541 *
0cba632b
JG
2542 * Modify @ap data structure such that the system
2543 * thinks that the entire port is enabled.
2544 *
cca3974e 2545 * LOCKING: host lock, or some other form of
0cba632b 2546 * serialization.
1da177e4
LT
2547 */
2548
2549void ata_port_probe(struct ata_port *ap)
2550{
198e0fed 2551 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
2552}
2553
3be680b7
TH
2554/**
2555 * sata_print_link_status - Print SATA link status
936fd732 2556 * @link: SATA link to printk link status about
3be680b7
TH
2557 *
2558 * This function prints link speed and status of a SATA link.
2559 *
2560 * LOCKING:
2561 * None.
2562 */
936fd732 2563void sata_print_link_status(struct ata_link *link)
3be680b7 2564{
6d5f9732 2565 u32 sstatus, scontrol, tmp;
3be680b7 2566
936fd732 2567 if (sata_scr_read(link, SCR_STATUS, &sstatus))
3be680b7 2568 return;
936fd732 2569 sata_scr_read(link, SCR_CONTROL, &scontrol);
3be680b7 2570
936fd732 2571 if (ata_link_online(link)) {
3be680b7 2572 tmp = (sstatus >> 4) & 0xf;
936fd732 2573 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2574 "SATA link up %s (SStatus %X SControl %X)\n",
2575 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 2576 } else {
936fd732 2577 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2578 "SATA link down (SStatus %X SControl %X)\n",
2579 sstatus, scontrol);
3be680b7
TH
2580 }
2581}
2582
1da177e4 2583/**
780a87f7
JG
2584 * __sata_phy_reset - Wake/reset a low-level SATA PHY
2585 * @ap: SATA port associated with target SATA PHY.
1da177e4 2586 *
780a87f7
JG
2587 * This function issues commands to standard SATA Sxxx
2588 * PHY registers, to wake up the phy (and device), and
2589 * clear any reset condition.
1da177e4
LT
2590 *
2591 * LOCKING:
0cba632b 2592 * PCI/etc. bus probe sem.
1da177e4
LT
2593 *
2594 */
2595void __sata_phy_reset(struct ata_port *ap)
2596{
936fd732 2597 struct ata_link *link = &ap->link;
1da177e4 2598 unsigned long timeout = jiffies + (HZ * 5);
936fd732 2599 u32 sstatus;
1da177e4
LT
2600
2601 if (ap->flags & ATA_FLAG_SATA_RESET) {
cdcca89e 2602 /* issue phy wake/reset */
936fd732 2603 sata_scr_write_flush(link, SCR_CONTROL, 0x301);
62ba2841
TH
2604 /* Couldn't find anything in SATA I/II specs, but
2605 * AHCI-1.1 10.4.2 says at least 1 ms. */
2606 mdelay(1);
1da177e4 2607 }
81952c54 2608 /* phy wake/clear reset */
936fd732 2609 sata_scr_write_flush(link, SCR_CONTROL, 0x300);
1da177e4
LT
2610
2611 /* wait for phy to become ready, if necessary */
2612 do {
2613 msleep(200);
936fd732 2614 sata_scr_read(link, SCR_STATUS, &sstatus);
1da177e4
LT
2615 if ((sstatus & 0xf) != 1)
2616 break;
2617 } while (time_before(jiffies, timeout));
2618
3be680b7 2619 /* print link status */
936fd732 2620 sata_print_link_status(link);
656563e3 2621
3be680b7 2622 /* TODO: phy layer with polling, timeouts, etc. */
936fd732 2623 if (!ata_link_offline(link))
1da177e4 2624 ata_port_probe(ap);
3be680b7 2625 else
1da177e4 2626 ata_port_disable(ap);
1da177e4 2627
198e0fed 2628 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2629 return;
2630
2631 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2632 ata_port_disable(ap);
2633 return;
2634 }
2635
2636 ap->cbl = ATA_CBL_SATA;
2637}
2638
2639/**
780a87f7
JG
2640 * sata_phy_reset - Reset SATA bus.
2641 * @ap: SATA port associated with target SATA PHY.
1da177e4 2642 *
780a87f7
JG
2643 * This function resets the SATA bus, and then probes
2644 * the bus for devices.
1da177e4
LT
2645 *
2646 * LOCKING:
0cba632b 2647 * PCI/etc. bus probe sem.
1da177e4
LT
2648 *
2649 */
2650void sata_phy_reset(struct ata_port *ap)
2651{
2652 __sata_phy_reset(ap);
198e0fed 2653 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2654 return;
2655 ata_bus_reset(ap);
2656}
2657
ebdfca6e
AC
2658/**
2659 * ata_dev_pair - return other device on cable
ebdfca6e
AC
2660 * @adev: device
2661 *
2662 * Obtain the other device on the same cable, or if none is
2663 * present NULL is returned
2664 */
2e9edbf8 2665
3373efd8 2666struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2667{
9af5c9c9
TH
2668 struct ata_link *link = adev->link;
2669 struct ata_device *pair = &link->device[1 - adev->devno];
e1211e3f 2670 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2671 return NULL;
2672 return pair;
2673}
2674
1da177e4 2675/**
780a87f7
JG
2676 * ata_port_disable - Disable port.
2677 * @ap: Port to be disabled.
1da177e4 2678 *
780a87f7
JG
2679 * Modify @ap data structure such that the system
2680 * thinks that the entire port is disabled, and should
2681 * never attempt to probe or communicate with devices
2682 * on this port.
2683 *
cca3974e 2684 * LOCKING: host lock, or some other form of
780a87f7 2685 * serialization.
1da177e4
LT
2686 */
2687
2688void ata_port_disable(struct ata_port *ap)
2689{
9af5c9c9
TH
2690 ap->link.device[0].class = ATA_DEV_NONE;
2691 ap->link.device[1].class = ATA_DEV_NONE;
198e0fed 2692 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2693}
2694
1c3fae4d 2695/**
3c567b7d 2696 * sata_down_spd_limit - adjust SATA spd limit downward
936fd732 2697 * @link: Link to adjust SATA spd limit for
1c3fae4d 2698 *
936fd732 2699 * Adjust SATA spd limit of @link downward. Note that this
1c3fae4d 2700 * function only adjusts the limit. The change must be applied
3c567b7d 2701 * using sata_set_spd().
1c3fae4d
TH
2702 *
2703 * LOCKING:
2704 * Inherited from caller.
2705 *
2706 * RETURNS:
2707 * 0 on success, negative errno on failure
2708 */
936fd732 2709int sata_down_spd_limit(struct ata_link *link)
1c3fae4d 2710{
81952c54
TH
2711 u32 sstatus, spd, mask;
2712 int rc, highbit;
1c3fae4d 2713
936fd732 2714 if (!sata_scr_valid(link))
008a7896
TH
2715 return -EOPNOTSUPP;
2716
2717 /* If SCR can be read, use it to determine the current SPD.
936fd732 2718 * If not, use cached value in link->sata_spd.
008a7896 2719 */
936fd732 2720 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
008a7896
TH
2721 if (rc == 0)
2722 spd = (sstatus >> 4) & 0xf;
2723 else
936fd732 2724 spd = link->sata_spd;
1c3fae4d 2725
936fd732 2726 mask = link->sata_spd_limit;
1c3fae4d
TH
2727 if (mask <= 1)
2728 return -EINVAL;
008a7896
TH
2729
2730 /* unconditionally mask off the highest bit */
1c3fae4d
TH
2731 highbit = fls(mask) - 1;
2732 mask &= ~(1 << highbit);
2733
008a7896
TH
2734 /* Mask off all speeds higher than or equal to the current
2735 * one. Force 1.5Gbps if current SPD is not available.
2736 */
2737 if (spd > 1)
2738 mask &= (1 << (spd - 1)) - 1;
2739 else
2740 mask &= 1;
2741
2742 /* were we already at the bottom? */
1c3fae4d
TH
2743 if (!mask)
2744 return -EINVAL;
2745
936fd732 2746 link->sata_spd_limit = mask;
1c3fae4d 2747
936fd732 2748 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
f15a1daf 2749 sata_spd_string(fls(mask)));
1c3fae4d
TH
2750
2751 return 0;
2752}
2753
936fd732 2754static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
1c3fae4d 2755{
5270222f
TH
2756 struct ata_link *host_link = &link->ap->link;
2757 u32 limit, target, spd;
1c3fae4d 2758
5270222f
TH
2759 limit = link->sata_spd_limit;
2760
2761 /* Don't configure downstream link faster than upstream link.
2762 * It doesn't speed up anything and some PMPs choke on such
2763 * configuration.
2764 */
2765 if (!ata_is_host_link(link) && host_link->sata_spd)
2766 limit &= (1 << host_link->sata_spd) - 1;
2767
2768 if (limit == UINT_MAX)
2769 target = 0;
1c3fae4d 2770 else
5270222f 2771 target = fls(limit);
1c3fae4d
TH
2772
2773 spd = (*scontrol >> 4) & 0xf;
5270222f 2774 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
1c3fae4d 2775
5270222f 2776 return spd != target;
1c3fae4d
TH
2777}
2778
2779/**
3c567b7d 2780 * sata_set_spd_needed - is SATA spd configuration needed
936fd732 2781 * @link: Link in question
1c3fae4d
TH
2782 *
2783 * Test whether the spd limit in SControl matches
936fd732 2784 * @link->sata_spd_limit. This function is used to determine
1c3fae4d
TH
2785 * whether hardreset is necessary to apply SATA spd
2786 * configuration.
2787 *
2788 * LOCKING:
2789 * Inherited from caller.
2790 *
2791 * RETURNS:
2792 * 1 if SATA spd configuration is needed, 0 otherwise.
2793 */
936fd732 2794int sata_set_spd_needed(struct ata_link *link)
1c3fae4d
TH
2795{
2796 u32 scontrol;
2797
936fd732 2798 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
db64bcf3 2799 return 1;
1c3fae4d 2800
936fd732 2801 return __sata_set_spd_needed(link, &scontrol);
1c3fae4d
TH
2802}
2803
2804/**
3c567b7d 2805 * sata_set_spd - set SATA spd according to spd limit
936fd732 2806 * @link: Link to set SATA spd for
1c3fae4d 2807 *
936fd732 2808 * Set SATA spd of @link according to sata_spd_limit.
1c3fae4d
TH
2809 *
2810 * LOCKING:
2811 * Inherited from caller.
2812 *
2813 * RETURNS:
2814 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2815 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2816 */
936fd732 2817int sata_set_spd(struct ata_link *link)
1c3fae4d
TH
2818{
2819 u32 scontrol;
81952c54 2820 int rc;
1c3fae4d 2821
936fd732 2822 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 2823 return rc;
1c3fae4d 2824
936fd732 2825 if (!__sata_set_spd_needed(link, &scontrol))
1c3fae4d
TH
2826 return 0;
2827
936fd732 2828 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54
TH
2829 return rc;
2830
1c3fae4d
TH
2831 return 1;
2832}
2833
452503f9
AC
2834/*
2835 * This mode timing computation functionality is ported over from
2836 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2837 */
2838/*
b352e57d 2839 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2840 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2841 * for UDMA6, which is currently supported only by Maxtor drives.
2842 *
2843 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2844 */
2845
2846static const struct ata_timing ata_timing[] = {
2847
2848 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2849 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2850 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2851 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2852
b352e57d
AC
2853 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2854 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
452503f9
AC
2855 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2856 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2857 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2858
2859/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2e9edbf8 2860
452503f9
AC
2861 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2862 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2863 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2e9edbf8 2864
452503f9
AC
2865 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2866 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2867 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2868
b352e57d
AC
2869 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2870 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
452503f9
AC
2871 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2872 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2873
2874 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2875 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2876 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2877
2878/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2879
2880 { 0xFF }
2881};
2882
2dcb407e
JG
2883#define ENOUGH(v, unit) (((v)-1)/(unit)+1)
2884#define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
452503f9
AC
2885
2886static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2887{
2888 q->setup = EZ(t->setup * 1000, T);
2889 q->act8b = EZ(t->act8b * 1000, T);
2890 q->rec8b = EZ(t->rec8b * 1000, T);
2891 q->cyc8b = EZ(t->cyc8b * 1000, T);
2892 q->active = EZ(t->active * 1000, T);
2893 q->recover = EZ(t->recover * 1000, T);
2894 q->cycle = EZ(t->cycle * 1000, T);
2895 q->udma = EZ(t->udma * 1000, UT);
2896}
2897
2898void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2899 struct ata_timing *m, unsigned int what)
2900{
2901 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2902 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2903 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2904 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2905 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2906 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2907 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2908 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2909}
2910
2dcb407e 2911static const struct ata_timing *ata_timing_find_mode(unsigned short speed)
452503f9
AC
2912{
2913 const struct ata_timing *t;
2914
2915 for (t = ata_timing; t->mode != speed; t++)
91190758 2916 if (t->mode == 0xFF)
452503f9 2917 return NULL;
2e9edbf8 2918 return t;
452503f9
AC
2919}
2920
2921int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2922 struct ata_timing *t, int T, int UT)
2923{
2924 const struct ata_timing *s;
2925 struct ata_timing p;
2926
2927 /*
2e9edbf8 2928 * Find the mode.
75b1f2f8 2929 */
452503f9
AC
2930
2931 if (!(s = ata_timing_find_mode(speed)))
2932 return -EINVAL;
2933
75b1f2f8
AL
2934 memcpy(t, s, sizeof(*s));
2935
452503f9
AC
2936 /*
2937 * If the drive is an EIDE drive, it can tell us it needs extended
2938 * PIO/MW_DMA cycle timing.
2939 */
2940
2941 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2942 memset(&p, 0, sizeof(p));
2dcb407e 2943 if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
452503f9
AC
2944 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2945 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2dcb407e 2946 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
452503f9
AC
2947 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2948 }
2949 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2950 }
2951
2952 /*
2953 * Convert the timing to bus clock counts.
2954 */
2955
75b1f2f8 2956 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2957
2958 /*
c893a3ae
RD
2959 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2960 * S.M.A.R.T * and some other commands. We have to ensure that the
2961 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2962 */
2963
fd3367af 2964 if (speed > XFER_PIO_6) {
452503f9
AC
2965 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2966 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2967 }
2968
2969 /*
c893a3ae 2970 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2971 */
2972
2973 if (t->act8b + t->rec8b < t->cyc8b) {
2974 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2975 t->rec8b = t->cyc8b - t->act8b;
2976 }
2977
2978 if (t->active + t->recover < t->cycle) {
2979 t->active += (t->cycle - (t->active + t->recover)) / 2;
2980 t->recover = t->cycle - t->active;
2981 }
a617c09f 2982
4f701d1e
AC
2983 /* In a few cases quantisation may produce enough errors to
2984 leave t->cycle too low for the sum of active and recovery
2985 if so we must correct this */
2986 if (t->active + t->recover > t->cycle)
2987 t->cycle = t->active + t->recover;
452503f9
AC
2988
2989 return 0;
2990}
2991
cf176e1a
TH
2992/**
2993 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 2994 * @dev: Device to adjust xfer masks
458337db 2995 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
2996 *
2997 * Adjust xfer masks of @dev downward. Note that this function
2998 * does not apply the change. Invoking ata_set_mode() afterwards
2999 * will apply the limit.
3000 *
3001 * LOCKING:
3002 * Inherited from caller.
3003 *
3004 * RETURNS:
3005 * 0 on success, negative errno on failure
3006 */
458337db 3007int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 3008{
458337db
TH
3009 char buf[32];
3010 unsigned int orig_mask, xfer_mask;
3011 unsigned int pio_mask, mwdma_mask, udma_mask;
3012 int quiet, highbit;
cf176e1a 3013
458337db
TH
3014 quiet = !!(sel & ATA_DNXFER_QUIET);
3015 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 3016
458337db
TH
3017 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3018 dev->mwdma_mask,
3019 dev->udma_mask);
3020 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 3021
458337db
TH
3022 switch (sel) {
3023 case ATA_DNXFER_PIO:
3024 highbit = fls(pio_mask) - 1;
3025 pio_mask &= ~(1 << highbit);
3026 break;
3027
3028 case ATA_DNXFER_DMA:
3029 if (udma_mask) {
3030 highbit = fls(udma_mask) - 1;
3031 udma_mask &= ~(1 << highbit);
3032 if (!udma_mask)
3033 return -ENOENT;
3034 } else if (mwdma_mask) {
3035 highbit = fls(mwdma_mask) - 1;
3036 mwdma_mask &= ~(1 << highbit);
3037 if (!mwdma_mask)
3038 return -ENOENT;
3039 }
3040 break;
3041
3042 case ATA_DNXFER_40C:
3043 udma_mask &= ATA_UDMA_MASK_40C;
3044 break;
3045
3046 case ATA_DNXFER_FORCE_PIO0:
3047 pio_mask &= 1;
3048 case ATA_DNXFER_FORCE_PIO:
3049 mwdma_mask = 0;
3050 udma_mask = 0;
3051 break;
3052
458337db
TH
3053 default:
3054 BUG();
3055 }
3056
3057 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3058
3059 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3060 return -ENOENT;
3061
3062 if (!quiet) {
3063 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3064 snprintf(buf, sizeof(buf), "%s:%s",
3065 ata_mode_string(xfer_mask),
3066 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3067 else
3068 snprintf(buf, sizeof(buf), "%s",
3069 ata_mode_string(xfer_mask));
3070
3071 ata_dev_printk(dev, KERN_WARNING,
3072 "limiting speed to %s\n", buf);
3073 }
cf176e1a
TH
3074
3075 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3076 &dev->udma_mask);
3077
cf176e1a 3078 return 0;
cf176e1a
TH
3079}
3080
3373efd8 3081static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 3082{
9af5c9c9 3083 struct ata_eh_context *ehc = &dev->link->eh_context;
83206a29
TH
3084 unsigned int err_mask;
3085 int rc;
1da177e4 3086
e8384607 3087 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
3088 if (dev->xfer_shift == ATA_SHIFT_PIO)
3089 dev->flags |= ATA_DFLAG_PIO;
3090
3373efd8 3091 err_mask = ata_dev_set_xfermode(dev);
2dcb407e 3092
11750a40
AC
3093 /* Old CFA may refuse this command, which is just fine */
3094 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2dcb407e
JG
3095 err_mask &= ~AC_ERR_DEV;
3096
0bc2a79a
AC
3097 /* Some very old devices and some bad newer ones fail any kind of
3098 SET_XFERMODE request but support PIO0-2 timings and no IORDY */
3099 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
3100 dev->pio_mode <= XFER_PIO_2)
3101 err_mask &= ~AC_ERR_DEV;
2dcb407e 3102
3acaf94b
AC
3103 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3104 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3105 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3106 dev->dma_mode == XFER_MW_DMA_0 &&
3107 (dev->id[63] >> 8) & 1)
3108 err_mask &= ~AC_ERR_DEV;
3109
83206a29 3110 if (err_mask) {
f15a1daf
TH
3111 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3112 "(err_mask=0x%x)\n", err_mask);
83206a29
TH
3113 return -EIO;
3114 }
1da177e4 3115
baa1e78a 3116 ehc->i.flags |= ATA_EHI_POST_SETMODE;
422c9daa 3117 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
baa1e78a 3118 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
5eb45c02 3119 if (rc)
83206a29 3120 return rc;
48a8a14f 3121
23e71c3d
TH
3122 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3123 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 3124
f15a1daf
TH
3125 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
3126 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
83206a29 3127 return 0;
1da177e4
LT
3128}
3129
1da177e4 3130/**
04351821 3131 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 3132 * @link: link on which timings will be programmed
e82cbdb9 3133 * @r_failed_dev: out paramter for failed device
1da177e4 3134 *
04351821
AC
3135 * Standard implementation of the function used to tune and set
3136 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3137 * ata_dev_set_mode() fails, pointer to the failing device is
e82cbdb9 3138 * returned in @r_failed_dev.
780a87f7 3139 *
1da177e4 3140 * LOCKING:
0cba632b 3141 * PCI/etc. bus probe sem.
e82cbdb9
TH
3142 *
3143 * RETURNS:
3144 * 0 on success, negative errno otherwise
1da177e4 3145 */
04351821 3146
0260731f 3147int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
1da177e4 3148{
0260731f 3149 struct ata_port *ap = link->ap;
e8e0619f 3150 struct ata_device *dev;
f58229f8 3151 int rc = 0, used_dma = 0, found = 0;
3adcebb2 3152
a6d5a51c 3153 /* step 1: calculate xfer_mask */
f58229f8 3154 ata_link_for_each_dev(dev, link) {
acf356b1 3155 unsigned int pio_mask, dma_mask;
b3a70601 3156 unsigned int mode_mask;
a6d5a51c 3157
e1211e3f 3158 if (!ata_dev_enabled(dev))
a6d5a51c
TH
3159 continue;
3160
b3a70601
AC
3161 mode_mask = ATA_DMA_MASK_ATA;
3162 if (dev->class == ATA_DEV_ATAPI)
3163 mode_mask = ATA_DMA_MASK_ATAPI;
3164 else if (ata_id_is_cfa(dev->id))
3165 mode_mask = ATA_DMA_MASK_CFA;
3166
3373efd8 3167 ata_dev_xfermask(dev);
1da177e4 3168
acf356b1
TH
3169 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3170 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
b3a70601
AC
3171
3172 if (libata_dma_mask & mode_mask)
3173 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3174 else
3175 dma_mask = 0;
3176
acf356b1
TH
3177 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3178 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 3179
4f65977d 3180 found = 1;
5444a6f4
AC
3181 if (dev->dma_mode)
3182 used_dma = 1;
a6d5a51c 3183 }
4f65977d 3184 if (!found)
e82cbdb9 3185 goto out;
a6d5a51c
TH
3186
3187 /* step 2: always set host PIO timings */
f58229f8 3188 ata_link_for_each_dev(dev, link) {
e8e0619f
TH
3189 if (!ata_dev_enabled(dev))
3190 continue;
3191
3192 if (!dev->pio_mode) {
f15a1daf 3193 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 3194 rc = -EINVAL;
e82cbdb9 3195 goto out;
e8e0619f
TH
3196 }
3197
3198 dev->xfer_mode = dev->pio_mode;
3199 dev->xfer_shift = ATA_SHIFT_PIO;
3200 if (ap->ops->set_piomode)
3201 ap->ops->set_piomode(ap, dev);
3202 }
1da177e4 3203
a6d5a51c 3204 /* step 3: set host DMA timings */
f58229f8 3205 ata_link_for_each_dev(dev, link) {
e8e0619f
TH
3206 if (!ata_dev_enabled(dev) || !dev->dma_mode)
3207 continue;
3208
3209 dev->xfer_mode = dev->dma_mode;
3210 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3211 if (ap->ops->set_dmamode)
3212 ap->ops->set_dmamode(ap, dev);
3213 }
1da177e4
LT
3214
3215 /* step 4: update devices' xfer mode */
f58229f8 3216 ata_link_for_each_dev(dev, link) {
18d90deb 3217 /* don't update suspended devices' xfer mode */
9666f400 3218 if (!ata_dev_enabled(dev))
83206a29
TH
3219 continue;
3220
3373efd8 3221 rc = ata_dev_set_mode(dev);
5bbc53f4 3222 if (rc)
e82cbdb9 3223 goto out;
83206a29 3224 }
1da177e4 3225
e8e0619f
TH
3226 /* Record simplex status. If we selected DMA then the other
3227 * host channels are not permitted to do so.
5444a6f4 3228 */
cca3974e 3229 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 3230 ap->host->simplex_claimed = ap;
5444a6f4 3231
e82cbdb9
TH
3232 out:
3233 if (rc)
3234 *r_failed_dev = dev;
3235 return rc;
1da177e4
LT
3236}
3237
04351821
AC
3238/**
3239 * ata_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 3240 * @link: link on which timings will be programmed
04351821
AC
3241 * @r_failed_dev: out paramter for failed device
3242 *
3243 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3244 * ata_set_mode() fails, pointer to the failing device is
3245 * returned in @r_failed_dev.
3246 *
3247 * LOCKING:
3248 * PCI/etc. bus probe sem.
3249 *
3250 * RETURNS:
3251 * 0 on success, negative errno otherwise
3252 */
0260731f 3253int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
04351821 3254{
0260731f
TH
3255 struct ata_port *ap = link->ap;
3256
04351821
AC
3257 /* has private set_mode? */
3258 if (ap->ops->set_mode)
0260731f
TH
3259 return ap->ops->set_mode(link, r_failed_dev);
3260 return ata_do_set_mode(link, r_failed_dev);
04351821
AC
3261}
3262
1fdffbce
JG
3263/**
3264 * ata_tf_to_host - issue ATA taskfile to host controller
3265 * @ap: port to which command is being issued
3266 * @tf: ATA taskfile register set
3267 *
3268 * Issues ATA taskfile register set to ATA host controller,
3269 * with proper synchronization with interrupt handler and
3270 * other threads.
3271 *
3272 * LOCKING:
cca3974e 3273 * spin_lock_irqsave(host lock)
1fdffbce
JG
3274 */
3275
3276static inline void ata_tf_to_host(struct ata_port *ap,
3277 const struct ata_taskfile *tf)
3278{
3279 ap->ops->tf_load(ap, tf);
3280 ap->ops->exec_command(ap, tf);
3281}
3282
1da177e4
LT
3283/**
3284 * ata_busy_sleep - sleep until BSY clears, or timeout
3285 * @ap: port containing status register to be polled
3286 * @tmout_pat: impatience timeout
3287 * @tmout: overall timeout
3288 *
780a87f7
JG
3289 * Sleep until ATA Status register bit BSY clears,
3290 * or a timeout occurs.
3291 *
d1adc1bb
TH
3292 * LOCKING:
3293 * Kernel thread context (may sleep).
3294 *
3295 * RETURNS:
3296 * 0 on success, -errno otherwise.
1da177e4 3297 */
d1adc1bb
TH
3298int ata_busy_sleep(struct ata_port *ap,
3299 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
3300{
3301 unsigned long timer_start, timeout;
3302 u8 status;
3303
3304 status = ata_busy_wait(ap, ATA_BUSY, 300);
3305 timer_start = jiffies;
3306 timeout = timer_start + tmout_pat;
d1adc1bb
TH
3307 while (status != 0xff && (status & ATA_BUSY) &&
3308 time_before(jiffies, timeout)) {
1da177e4
LT
3309 msleep(50);
3310 status = ata_busy_wait(ap, ATA_BUSY, 3);
3311 }
3312
d1adc1bb 3313 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 3314 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
3315 "port is slow to respond, please be patient "
3316 "(Status 0x%x)\n", status);
1da177e4
LT
3317
3318 timeout = timer_start + tmout;
d1adc1bb
TH
3319 while (status != 0xff && (status & ATA_BUSY) &&
3320 time_before(jiffies, timeout)) {
1da177e4
LT
3321 msleep(50);
3322 status = ata_chk_status(ap);
3323 }
3324
d1adc1bb
TH
3325 if (status == 0xff)
3326 return -ENODEV;
3327
1da177e4 3328 if (status & ATA_BUSY) {
f15a1daf 3329 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
3330 "(%lu secs, Status 0x%x)\n",
3331 tmout / HZ, status);
d1adc1bb 3332 return -EBUSY;
1da177e4
LT
3333 }
3334
3335 return 0;
3336}
3337
88ff6eaf
TH
3338/**
3339 * ata_wait_after_reset - wait before checking status after reset
3340 * @ap: port containing status register to be polled
3341 * @deadline: deadline jiffies for the operation
3342 *
3343 * After reset, we need to pause a while before reading status.
3344 * Also, certain combination of controller and device report 0xff
3345 * for some duration (e.g. until SATA PHY is up and running)
3346 * which is interpreted as empty port in ATA world. This
3347 * function also waits for such devices to get out of 0xff
3348 * status.
3349 *
3350 * LOCKING:
3351 * Kernel thread context (may sleep).
3352 */
3353void ata_wait_after_reset(struct ata_port *ap, unsigned long deadline)
3354{
3355 unsigned long until = jiffies + ATA_TMOUT_FF_WAIT;
3356
3357 if (time_before(until, deadline))
3358 deadline = until;
3359
3360 /* Spec mandates ">= 2ms" before checking status. We wait
3361 * 150ms, because that was the magic delay used for ATAPI
3362 * devices in Hale Landis's ATADRVR, for the period of time
3363 * between when the ATA command register is written, and then
3364 * status is checked. Because waiting for "a while" before
3365 * checking status is fine, post SRST, we perform this magic
3366 * delay here as well.
3367 *
3368 * Old drivers/ide uses the 2mS rule and then waits for ready.
3369 */
3370 msleep(150);
3371
3372 /* Wait for 0xff to clear. Some SATA devices take a long time
3373 * to clear 0xff after reset. For example, HHD424020F7SV00
3374 * iVDR needs >= 800ms while. Quantum GoVault needs even more
3375 * than that.
1974e201
TH
3376 *
3377 * Note that some PATA controllers (pata_ali) explode if
3378 * status register is read more than once when there's no
3379 * device attached.
88ff6eaf 3380 */
1974e201
TH
3381 if (ap->flags & ATA_FLAG_SATA) {
3382 while (1) {
3383 u8 status = ata_chk_status(ap);
88ff6eaf 3384
1974e201
TH
3385 if (status != 0xff || time_after(jiffies, deadline))
3386 return;
88ff6eaf 3387
1974e201
TH
3388 msleep(50);
3389 }
88ff6eaf
TH
3390 }
3391}
3392
d4b2bab4
TH
3393/**
3394 * ata_wait_ready - sleep until BSY clears, or timeout
3395 * @ap: port containing status register to be polled
3396 * @deadline: deadline jiffies for the operation
3397 *
3398 * Sleep until ATA Status register bit BSY clears, or timeout
3399 * occurs.
3400 *
3401 * LOCKING:
3402 * Kernel thread context (may sleep).
3403 *
3404 * RETURNS:
3405 * 0 on success, -errno otherwise.
3406 */
3407int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3408{
3409 unsigned long start = jiffies;
3410 int warned = 0;
3411
3412 while (1) {
3413 u8 status = ata_chk_status(ap);
3414 unsigned long now = jiffies;
3415
3416 if (!(status & ATA_BUSY))
3417 return 0;
936fd732 3418 if (!ata_link_online(&ap->link) && status == 0xff)
d4b2bab4
TH
3419 return -ENODEV;
3420 if (time_after(now, deadline))
3421 return -EBUSY;
3422
3423 if (!warned && time_after(now, start + 5 * HZ) &&
3424 (deadline - now > 3 * HZ)) {
3425 ata_port_printk(ap, KERN_WARNING,
3426 "port is slow to respond, please be patient "
3427 "(Status 0x%x)\n", status);
3428 warned = 1;
3429 }
3430
3431 msleep(50);
3432 }
3433}
3434
3435static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3436 unsigned long deadline)
1da177e4
LT
3437{
3438 struct ata_ioports *ioaddr = &ap->ioaddr;
3439 unsigned int dev0 = devmask & (1 << 0);
3440 unsigned int dev1 = devmask & (1 << 1);
9b89391c 3441 int rc, ret = 0;
1da177e4
LT
3442
3443 /* if device 0 was found in ata_devchk, wait for its
3444 * BSY bit to clear
3445 */
d4b2bab4
TH
3446 if (dev0) {
3447 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3448 if (rc) {
3449 if (rc != -ENODEV)
3450 return rc;
3451 ret = rc;
3452 }
d4b2bab4 3453 }
1da177e4 3454
e141d999
TH
3455 /* if device 1 was found in ata_devchk, wait for register
3456 * access briefly, then wait for BSY to clear.
1da177e4 3457 */
e141d999
TH
3458 if (dev1) {
3459 int i;
1da177e4
LT
3460
3461 ap->ops->dev_select(ap, 1);
e141d999
TH
3462
3463 /* Wait for register access. Some ATAPI devices fail
3464 * to set nsect/lbal after reset, so don't waste too
3465 * much time on it. We're gonna wait for !BSY anyway.
3466 */
3467 for (i = 0; i < 2; i++) {
3468 u8 nsect, lbal;
3469
3470 nsect = ioread8(ioaddr->nsect_addr);
3471 lbal = ioread8(ioaddr->lbal_addr);
3472 if ((nsect == 1) && (lbal == 1))
3473 break;
3474 msleep(50); /* give drive a breather */
3475 }
3476
d4b2bab4 3477 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3478 if (rc) {
3479 if (rc != -ENODEV)
3480 return rc;
3481 ret = rc;
3482 }
d4b2bab4 3483 }
1da177e4
LT
3484
3485 /* is all this really necessary? */
3486 ap->ops->dev_select(ap, 0);
3487 if (dev1)
3488 ap->ops->dev_select(ap, 1);
3489 if (dev0)
3490 ap->ops->dev_select(ap, 0);
d4b2bab4 3491
9b89391c 3492 return ret;
1da177e4
LT
3493}
3494
d4b2bab4
TH
3495static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3496 unsigned long deadline)
1da177e4
LT
3497{
3498 struct ata_ioports *ioaddr = &ap->ioaddr;
3499
44877b4e 3500 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
1da177e4
LT
3501
3502 /* software reset. causes dev0 to be selected */
0d5ff566
TH
3503 iowrite8(ap->ctl, ioaddr->ctl_addr);
3504 udelay(20); /* FIXME: flush */
3505 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3506 udelay(20); /* FIXME: flush */
3507 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4 3508
88ff6eaf
TH
3509 /* wait a while before checking status */
3510 ata_wait_after_reset(ap, deadline);
1da177e4 3511
2e9edbf8 3512 /* Before we perform post reset processing we want to see if
298a41ca
TH
3513 * the bus shows 0xFF because the odd clown forgets the D7
3514 * pulldown resistor.
3515 */
150981b0 3516 if (ata_chk_status(ap) == 0xFF)
9b89391c 3517 return -ENODEV;
09c7ad79 3518
d4b2bab4 3519 return ata_bus_post_reset(ap, devmask, deadline);
1da177e4
LT
3520}
3521
3522/**
3523 * ata_bus_reset - reset host port and associated ATA channel
3524 * @ap: port to reset
3525 *
3526 * This is typically the first time we actually start issuing
3527 * commands to the ATA channel. We wait for BSY to clear, then
3528 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3529 * result. Determine what devices, if any, are on the channel
3530 * by looking at the device 0/1 error register. Look at the signature
3531 * stored in each device's taskfile registers, to determine if
3532 * the device is ATA or ATAPI.
3533 *
3534 * LOCKING:
0cba632b 3535 * PCI/etc. bus probe sem.
cca3974e 3536 * Obtains host lock.
1da177e4
LT
3537 *
3538 * SIDE EFFECTS:
198e0fed 3539 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
3540 */
3541
3542void ata_bus_reset(struct ata_port *ap)
3543{
9af5c9c9 3544 struct ata_device *device = ap->link.device;
1da177e4
LT
3545 struct ata_ioports *ioaddr = &ap->ioaddr;
3546 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3547 u8 err;
aec5c3c1 3548 unsigned int dev0, dev1 = 0, devmask = 0;
9b89391c 3549 int rc;
1da177e4 3550
44877b4e 3551 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
1da177e4
LT
3552
3553 /* determine if device 0/1 are present */
3554 if (ap->flags & ATA_FLAG_SATA_RESET)
3555 dev0 = 1;
3556 else {
3557 dev0 = ata_devchk(ap, 0);
3558 if (slave_possible)
3559 dev1 = ata_devchk(ap, 1);
3560 }
3561
3562 if (dev0)
3563 devmask |= (1 << 0);
3564 if (dev1)
3565 devmask |= (1 << 1);
3566
3567 /* select device 0 again */
3568 ap->ops->dev_select(ap, 0);
3569
3570 /* issue bus reset */
9b89391c
TH
3571 if (ap->flags & ATA_FLAG_SRST) {
3572 rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
3573 if (rc && rc != -ENODEV)
aec5c3c1 3574 goto err_out;
9b89391c 3575 }
1da177e4
LT
3576
3577 /*
3578 * determine by signature whether we have ATA or ATAPI devices
3579 */
3f19859e 3580 device[0].class = ata_dev_try_classify(&device[0], dev0, &err);
1da177e4 3581 if ((slave_possible) && (err != 0x81))
3f19859e 3582 device[1].class = ata_dev_try_classify(&device[1], dev1, &err);
1da177e4 3583
1da177e4 3584 /* is double-select really necessary? */
9af5c9c9 3585 if (device[1].class != ATA_DEV_NONE)
1da177e4 3586 ap->ops->dev_select(ap, 1);
9af5c9c9 3587 if (device[0].class != ATA_DEV_NONE)
1da177e4
LT
3588 ap->ops->dev_select(ap, 0);
3589
3590 /* if no devices were detected, disable this port */
9af5c9c9
TH
3591 if ((device[0].class == ATA_DEV_NONE) &&
3592 (device[1].class == ATA_DEV_NONE))
1da177e4
LT
3593 goto err_out;
3594
3595 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3596 /* set up device control for ATA_FLAG_SATA_RESET */
0d5ff566 3597 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3598 }
3599
3600 DPRINTK("EXIT\n");
3601 return;
3602
3603err_out:
f15a1daf 3604 ata_port_printk(ap, KERN_ERR, "disabling port\n");
ac8869d5 3605 ata_port_disable(ap);
1da177e4
LT
3606
3607 DPRINTK("EXIT\n");
3608}
3609
d7bb4cc7 3610/**
936fd732
TH
3611 * sata_link_debounce - debounce SATA phy status
3612 * @link: ATA link to debounce SATA phy status for
d7bb4cc7 3613 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3614 * @deadline: deadline jiffies for the operation
d7bb4cc7 3615 *
936fd732 3616* Make sure SStatus of @link reaches stable state, determined by
d7bb4cc7
TH
3617 * holding the same value where DET is not 1 for @duration polled
3618 * every @interval, before @timeout. Timeout constraints the
d4b2bab4
TH
3619 * beginning of the stable state. Because DET gets stuck at 1 on
3620 * some controllers after hot unplugging, this functions waits
d7bb4cc7
TH
3621 * until timeout then returns 0 if DET is stable at 1.
3622 *
d4b2bab4
TH
3623 * @timeout is further limited by @deadline. The sooner of the
3624 * two is used.
3625 *
d7bb4cc7
TH
3626 * LOCKING:
3627 * Kernel thread context (may sleep)
3628 *
3629 * RETURNS:
3630 * 0 on success, -errno on failure.
3631 */
936fd732
TH
3632int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3633 unsigned long deadline)
7a7921e8 3634{
d7bb4cc7 3635 unsigned long interval_msec = params[0];
d4b2bab4
TH
3636 unsigned long duration = msecs_to_jiffies(params[1]);
3637 unsigned long last_jiffies, t;
d7bb4cc7
TH
3638 u32 last, cur;
3639 int rc;
3640
d4b2bab4
TH
3641 t = jiffies + msecs_to_jiffies(params[2]);
3642 if (time_before(t, deadline))
3643 deadline = t;
3644
936fd732 3645 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3646 return rc;
3647 cur &= 0xf;
3648
3649 last = cur;
3650 last_jiffies = jiffies;
3651
3652 while (1) {
3653 msleep(interval_msec);
936fd732 3654 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3655 return rc;
3656 cur &= 0xf;
3657
3658 /* DET stable? */
3659 if (cur == last) {
d4b2bab4 3660 if (cur == 1 && time_before(jiffies, deadline))
d7bb4cc7
TH
3661 continue;
3662 if (time_after(jiffies, last_jiffies + duration))
3663 return 0;
3664 continue;
3665 }
3666
3667 /* unstable, start over */
3668 last = cur;
3669 last_jiffies = jiffies;
3670
f1545154
TH
3671 /* Check deadline. If debouncing failed, return
3672 * -EPIPE to tell upper layer to lower link speed.
3673 */
d4b2bab4 3674 if (time_after(jiffies, deadline))
f1545154 3675 return -EPIPE;
d7bb4cc7
TH
3676 }
3677}
3678
3679/**
936fd732
TH
3680 * sata_link_resume - resume SATA link
3681 * @link: ATA link to resume SATA
d7bb4cc7 3682 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3683 * @deadline: deadline jiffies for the operation
d7bb4cc7 3684 *
936fd732 3685 * Resume SATA phy @link and debounce it.
d7bb4cc7
TH
3686 *
3687 * LOCKING:
3688 * Kernel thread context (may sleep)
3689 *
3690 * RETURNS:
3691 * 0 on success, -errno on failure.
3692 */
936fd732
TH
3693int sata_link_resume(struct ata_link *link, const unsigned long *params,
3694 unsigned long deadline)
d7bb4cc7
TH
3695{
3696 u32 scontrol;
81952c54
TH
3697 int rc;
3698
936fd732 3699 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 3700 return rc;
7a7921e8 3701
852ee16a 3702 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54 3703
936fd732 3704 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54 3705 return rc;
7a7921e8 3706
d7bb4cc7
TH
3707 /* Some PHYs react badly if SStatus is pounded immediately
3708 * after resuming. Delay 200ms before debouncing.
3709 */
3710 msleep(200);
7a7921e8 3711
936fd732 3712 return sata_link_debounce(link, params, deadline);
7a7921e8
TH
3713}
3714
f5914a46
TH
3715/**
3716 * ata_std_prereset - prepare for reset
cc0680a5 3717 * @link: ATA link to be reset
d4b2bab4 3718 * @deadline: deadline jiffies for the operation
f5914a46 3719 *
cc0680a5 3720 * @link is about to be reset. Initialize it. Failure from
b8cffc6a
TH
3721 * prereset makes libata abort whole reset sequence and give up
3722 * that port, so prereset should be best-effort. It does its
3723 * best to prepare for reset sequence but if things go wrong, it
3724 * should just whine, not fail.
f5914a46
TH
3725 *
3726 * LOCKING:
3727 * Kernel thread context (may sleep)
3728 *
3729 * RETURNS:
3730 * 0 on success, -errno otherwise.
3731 */
cc0680a5 3732int ata_std_prereset(struct ata_link *link, unsigned long deadline)
f5914a46 3733{
cc0680a5 3734 struct ata_port *ap = link->ap;
936fd732 3735 struct ata_eh_context *ehc = &link->eh_context;
e9c83914 3736 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
3737 int rc;
3738
31daabda 3739 /* handle link resume */
28324304 3740 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
0c88758b 3741 (link->flags & ATA_LFLAG_HRST_TO_RESUME))
28324304
TH
3742 ehc->i.action |= ATA_EH_HARDRESET;
3743
633273a3
TH
3744 /* Some PMPs don't work with only SRST, force hardreset if PMP
3745 * is supported.
3746 */
3747 if (ap->flags & ATA_FLAG_PMP)
3748 ehc->i.action |= ATA_EH_HARDRESET;
3749
f5914a46
TH
3750 /* if we're about to do hardreset, nothing more to do */
3751 if (ehc->i.action & ATA_EH_HARDRESET)
3752 return 0;
3753
936fd732 3754 /* if SATA, resume link */
a16abc0b 3755 if (ap->flags & ATA_FLAG_SATA) {
936fd732 3756 rc = sata_link_resume(link, timing, deadline);
b8cffc6a
TH
3757 /* whine about phy resume failure but proceed */
3758 if (rc && rc != -EOPNOTSUPP)
cc0680a5 3759 ata_link_printk(link, KERN_WARNING, "failed to resume "
f5914a46 3760 "link for reset (errno=%d)\n", rc);
f5914a46
TH
3761 }
3762
3763 /* Wait for !BSY if the controller can wait for the first D2H
3764 * Reg FIS and we don't know that no device is attached.
3765 */
0c88758b 3766 if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) {
b8cffc6a 3767 rc = ata_wait_ready(ap, deadline);
6dffaf61 3768 if (rc && rc != -ENODEV) {
cc0680a5 3769 ata_link_printk(link, KERN_WARNING, "device not ready "
b8cffc6a
TH
3770 "(errno=%d), forcing hardreset\n", rc);
3771 ehc->i.action |= ATA_EH_HARDRESET;
3772 }
3773 }
f5914a46
TH
3774
3775 return 0;
3776}
3777
c2bd5804
TH
3778/**
3779 * ata_std_softreset - reset host port via ATA SRST
cc0680a5 3780 * @link: ATA link to reset
c2bd5804 3781 * @classes: resulting classes of attached devices
d4b2bab4 3782 * @deadline: deadline jiffies for the operation
c2bd5804 3783 *
52783c5d 3784 * Reset host port using ATA SRST.
c2bd5804
TH
3785 *
3786 * LOCKING:
3787 * Kernel thread context (may sleep)
3788 *
3789 * RETURNS:
3790 * 0 on success, -errno otherwise.
3791 */
cc0680a5 3792int ata_std_softreset(struct ata_link *link, unsigned int *classes,
d4b2bab4 3793 unsigned long deadline)
c2bd5804 3794{
cc0680a5 3795 struct ata_port *ap = link->ap;
c2bd5804 3796 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
d4b2bab4
TH
3797 unsigned int devmask = 0;
3798 int rc;
c2bd5804
TH
3799 u8 err;
3800
3801 DPRINTK("ENTER\n");
3802
936fd732 3803 if (ata_link_offline(link)) {
3a39746a
TH
3804 classes[0] = ATA_DEV_NONE;
3805 goto out;
3806 }
3807
c2bd5804
TH
3808 /* determine if device 0/1 are present */
3809 if (ata_devchk(ap, 0))
3810 devmask |= (1 << 0);
3811 if (slave_possible && ata_devchk(ap, 1))
3812 devmask |= (1 << 1);
3813
c2bd5804
TH
3814 /* select device 0 again */
3815 ap->ops->dev_select(ap, 0);
3816
3817 /* issue bus reset */
3818 DPRINTK("about to softreset, devmask=%x\n", devmask);
d4b2bab4 3819 rc = ata_bus_softreset(ap, devmask, deadline);
9b89391c 3820 /* if link is occupied, -ENODEV too is an error */
936fd732 3821 if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
cc0680a5 3822 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
d4b2bab4 3823 return rc;
c2bd5804
TH
3824 }
3825
3826 /* determine by signature whether we have ATA or ATAPI devices */
3f19859e
TH
3827 classes[0] = ata_dev_try_classify(&link->device[0],
3828 devmask & (1 << 0), &err);
c2bd5804 3829 if (slave_possible && err != 0x81)
3f19859e
TH
3830 classes[1] = ata_dev_try_classify(&link->device[1],
3831 devmask & (1 << 1), &err);
c2bd5804 3832
3a39746a 3833 out:
c2bd5804
TH
3834 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3835 return 0;
3836}
3837
3838/**
cc0680a5
TH
3839 * sata_link_hardreset - reset link via SATA phy reset
3840 * @link: link to reset
b6103f6d 3841 * @timing: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3842 * @deadline: deadline jiffies for the operation
c2bd5804 3843 *
cc0680a5 3844 * SATA phy-reset @link using DET bits of SControl register.
c2bd5804
TH
3845 *
3846 * LOCKING:
3847 * Kernel thread context (may sleep)
3848 *
3849 * RETURNS:
3850 * 0 on success, -errno otherwise.
3851 */
cc0680a5 3852int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
d4b2bab4 3853 unsigned long deadline)
c2bd5804 3854{
852ee16a 3855 u32 scontrol;
81952c54 3856 int rc;
852ee16a 3857
c2bd5804
TH
3858 DPRINTK("ENTER\n");
3859
936fd732 3860 if (sata_set_spd_needed(link)) {
1c3fae4d
TH
3861 /* SATA spec says nothing about how to reconfigure
3862 * spd. To be on the safe side, turn off phy during
3863 * reconfiguration. This works for at least ICH7 AHCI
3864 * and Sil3124.
3865 */
936fd732 3866 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3867 goto out;
81952c54 3868
a34b6fc0 3869 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54 3870
936fd732 3871 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
b6103f6d 3872 goto out;
1c3fae4d 3873
936fd732 3874 sata_set_spd(link);
1c3fae4d
TH
3875 }
3876
3877 /* issue phy wake/reset */
936fd732 3878 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3879 goto out;
81952c54 3880
852ee16a 3881 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54 3882
936fd732 3883 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
b6103f6d 3884 goto out;
c2bd5804 3885
1c3fae4d 3886 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
3887 * 10.4.2 says at least 1 ms.
3888 */
3889 msleep(1);
3890
936fd732
TH
3891 /* bring link back */
3892 rc = sata_link_resume(link, timing, deadline);
b6103f6d
TH
3893 out:
3894 DPRINTK("EXIT, rc=%d\n", rc);
3895 return rc;
3896}
3897
3898/**
3899 * sata_std_hardreset - reset host port via SATA phy reset
cc0680a5 3900 * @link: link to reset
b6103f6d 3901 * @class: resulting class of attached device
d4b2bab4 3902 * @deadline: deadline jiffies for the operation
b6103f6d
TH
3903 *
3904 * SATA phy-reset host port using DET bits of SControl register,
3905 * wait for !BSY and classify the attached device.
3906 *
3907 * LOCKING:
3908 * Kernel thread context (may sleep)
3909 *
3910 * RETURNS:
3911 * 0 on success, -errno otherwise.
3912 */
cc0680a5 3913int sata_std_hardreset(struct ata_link *link, unsigned int *class,
d4b2bab4 3914 unsigned long deadline)
b6103f6d 3915{
cc0680a5 3916 struct ata_port *ap = link->ap;
936fd732 3917 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
b6103f6d
TH
3918 int rc;
3919
3920 DPRINTK("ENTER\n");
3921
3922 /* do hardreset */
cc0680a5 3923 rc = sata_link_hardreset(link, timing, deadline);
b6103f6d 3924 if (rc) {
cc0680a5 3925 ata_link_printk(link, KERN_ERR,
b6103f6d
TH
3926 "COMRESET failed (errno=%d)\n", rc);
3927 return rc;
3928 }
c2bd5804 3929
c2bd5804 3930 /* TODO: phy layer with polling, timeouts, etc. */
936fd732 3931 if (ata_link_offline(link)) {
c2bd5804
TH
3932 *class = ATA_DEV_NONE;
3933 DPRINTK("EXIT, link offline\n");
3934 return 0;
3935 }
3936
88ff6eaf
TH
3937 /* wait a while before checking status */
3938 ata_wait_after_reset(ap, deadline);
34fee227 3939
633273a3
TH
3940 /* If PMP is supported, we have to do follow-up SRST. Note
3941 * that some PMPs don't send D2H Reg FIS after hardreset at
3942 * all if the first port is empty. Wait for it just for a
3943 * second and request follow-up SRST.
3944 */
3945 if (ap->flags & ATA_FLAG_PMP) {
3946 ata_wait_ready(ap, jiffies + HZ);
3947 return -EAGAIN;
3948 }
3949
d4b2bab4 3950 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3951 /* link occupied, -ENODEV too is an error */
3952 if (rc) {
cc0680a5 3953 ata_link_printk(link, KERN_ERR,
d4b2bab4
TH
3954 "COMRESET failed (errno=%d)\n", rc);
3955 return rc;
c2bd5804
TH
3956 }
3957
3a39746a
TH
3958 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3959
3f19859e 3960 *class = ata_dev_try_classify(link->device, 1, NULL);
c2bd5804
TH
3961
3962 DPRINTK("EXIT, class=%u\n", *class);
3963 return 0;
3964}
3965
3966/**
3967 * ata_std_postreset - standard postreset callback
cc0680a5 3968 * @link: the target ata_link
c2bd5804
TH
3969 * @classes: classes of attached devices
3970 *
3971 * This function is invoked after a successful reset. Note that
3972 * the device might have been reset more than once using
3973 * different reset methods before postreset is invoked.
c2bd5804 3974 *
c2bd5804
TH
3975 * LOCKING:
3976 * Kernel thread context (may sleep)
3977 */
cc0680a5 3978void ata_std_postreset(struct ata_link *link, unsigned int *classes)
c2bd5804 3979{
cc0680a5 3980 struct ata_port *ap = link->ap;
dc2b3515
TH
3981 u32 serror;
3982
c2bd5804
TH
3983 DPRINTK("ENTER\n");
3984
c2bd5804 3985 /* print link status */
936fd732 3986 sata_print_link_status(link);
c2bd5804 3987
dc2b3515 3988 /* clear SError */
936fd732
TH
3989 if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
3990 sata_scr_write(link, SCR_ERROR, serror);
dc2b3515 3991
c2bd5804
TH
3992 /* is double-select really necessary? */
3993 if (classes[0] != ATA_DEV_NONE)
3994 ap->ops->dev_select(ap, 1);
3995 if (classes[1] != ATA_DEV_NONE)
3996 ap->ops->dev_select(ap, 0);
3997
3a39746a
TH
3998 /* bail out if no device is present */
3999 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
4000 DPRINTK("EXIT, no device\n");
4001 return;
4002 }
4003
4004 /* set up device control */
0d5ff566
TH
4005 if (ap->ioaddr.ctl_addr)
4006 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
c2bd5804
TH
4007
4008 DPRINTK("EXIT\n");
4009}
4010
623a3128
TH
4011/**
4012 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
4013 * @dev: device to compare against
4014 * @new_class: class of the new device
4015 * @new_id: IDENTIFY page of the new device
4016 *
4017 * Compare @new_class and @new_id against @dev and determine
4018 * whether @dev is the device indicated by @new_class and
4019 * @new_id.
4020 *
4021 * LOCKING:
4022 * None.
4023 *
4024 * RETURNS:
4025 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
4026 */
3373efd8
TH
4027static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
4028 const u16 *new_id)
623a3128
TH
4029{
4030 const u16 *old_id = dev->id;
a0cf733b
TH
4031 unsigned char model[2][ATA_ID_PROD_LEN + 1];
4032 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
4033
4034 if (dev->class != new_class) {
f15a1daf
TH
4035 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
4036 dev->class, new_class);
623a3128
TH
4037 return 0;
4038 }
4039
a0cf733b
TH
4040 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
4041 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
4042 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
4043 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
4044
4045 if (strcmp(model[0], model[1])) {
f15a1daf
TH
4046 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
4047 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
4048 return 0;
4049 }
4050
4051 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
4052 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
4053 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
4054 return 0;
4055 }
4056
623a3128
TH
4057 return 1;
4058}
4059
4060/**
fe30911b 4061 * ata_dev_reread_id - Re-read IDENTIFY data
3fae450c 4062 * @dev: target ATA device
bff04647 4063 * @readid_flags: read ID flags
623a3128
TH
4064 *
4065 * Re-read IDENTIFY page and make sure @dev is still attached to
4066 * the port.
4067 *
4068 * LOCKING:
4069 * Kernel thread context (may sleep)
4070 *
4071 * RETURNS:
4072 * 0 on success, negative errno otherwise
4073 */
fe30911b 4074int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
623a3128 4075{
5eb45c02 4076 unsigned int class = dev->class;
9af5c9c9 4077 u16 *id = (void *)dev->link->ap->sector_buf;
623a3128
TH
4078 int rc;
4079
fe635c7e 4080 /* read ID data */
bff04647 4081 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128 4082 if (rc)
fe30911b 4083 return rc;
623a3128
TH
4084
4085 /* is the device still there? */
fe30911b
TH
4086 if (!ata_dev_same_device(dev, class, id))
4087 return -ENODEV;
623a3128 4088
fe635c7e 4089 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
fe30911b
TH
4090 return 0;
4091}
4092
4093/**
4094 * ata_dev_revalidate - Revalidate ATA device
4095 * @dev: device to revalidate
422c9daa 4096 * @new_class: new class code
fe30911b
TH
4097 * @readid_flags: read ID flags
4098 *
4099 * Re-read IDENTIFY page, make sure @dev is still attached to the
4100 * port and reconfigure it according to the new IDENTIFY page.
4101 *
4102 * LOCKING:
4103 * Kernel thread context (may sleep)
4104 *
4105 * RETURNS:
4106 * 0 on success, negative errno otherwise
4107 */
422c9daa
TH
4108int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4109 unsigned int readid_flags)
fe30911b 4110{
6ddcd3b0 4111 u64 n_sectors = dev->n_sectors;
fe30911b
TH
4112 int rc;
4113
4114 if (!ata_dev_enabled(dev))
4115 return -ENODEV;
4116
422c9daa
TH
4117 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4118 if (ata_class_enabled(new_class) &&
4119 new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
4120 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
4121 dev->class, new_class);
4122 rc = -ENODEV;
4123 goto fail;
4124 }
4125
fe30911b
TH
4126 /* re-read ID */
4127 rc = ata_dev_reread_id(dev, readid_flags);
4128 if (rc)
4129 goto fail;
623a3128
TH
4130
4131 /* configure device according to the new ID */
efdaedc4 4132 rc = ata_dev_configure(dev);
6ddcd3b0
TH
4133 if (rc)
4134 goto fail;
4135
4136 /* verify n_sectors hasn't changed */
b54eebd6
TH
4137 if (dev->class == ATA_DEV_ATA && n_sectors &&
4138 dev->n_sectors != n_sectors) {
6ddcd3b0
TH
4139 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
4140 "%llu != %llu\n",
4141 (unsigned long long)n_sectors,
4142 (unsigned long long)dev->n_sectors);
8270bec4
TH
4143
4144 /* restore original n_sectors */
4145 dev->n_sectors = n_sectors;
4146
6ddcd3b0
TH
4147 rc = -ENODEV;
4148 goto fail;
4149 }
4150
4151 return 0;
623a3128
TH
4152
4153 fail:
f15a1daf 4154 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
4155 return rc;
4156}
4157
6919a0a6
AC
4158struct ata_blacklist_entry {
4159 const char *model_num;
4160 const char *model_rev;
4161 unsigned long horkage;
4162};
4163
4164static const struct ata_blacklist_entry ata_device_blacklist [] = {
4165 /* Devices with DMA related problems under Linux */
4166 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
4167 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
4168 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
4169 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
4170 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
4171 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
4172 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
4173 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4174 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
4175 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
4176 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
4177 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4178 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4179 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4180 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4181 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
4182 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
4183 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
4184 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4185 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4186 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
4187 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
4188 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
4189 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
4190 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
4191 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
4192 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4193 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
2dcb407e 4194 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
39f19886 4195 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
3af9a77a
TH
4196 /* Odd clown on sil3726/4726 PMPs */
4197 { "Config Disk", NULL, ATA_HORKAGE_NODMA |
4198 ATA_HORKAGE_SKIP_PM },
6919a0a6 4199
18d6e9d5 4200 /* Weird ATAPI devices */
40a1d531 4201 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
18d6e9d5 4202
6919a0a6
AC
4203 /* Devices we expect to fail diagnostics */
4204
4205 /* Devices where NCQ should be avoided */
4206 /* NCQ is slow */
2dcb407e 4207 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
09125ea6
TH
4208 /* http://thread.gmane.org/gmane.linux.ide/14907 */
4209 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30 4210 /* NCQ is broken */
539cc7c7 4211 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
0e3dbc01 4212 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
0b0a43e0
DM
4213 { "HITACHI HDS7250SASUN500G*", NULL, ATA_HORKAGE_NONCQ },
4214 { "HITACHI HDS7225SBSUN250G*", NULL, ATA_HORKAGE_NONCQ },
da6f0ec2 4215 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
539cc7c7 4216
36e337d0
RH
4217 /* Blacklist entries taken from Silicon Image 3124/3132
4218 Windows driver .inf file - also several Linux problem reports */
4219 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4220 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4221 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
bd9c5a39
TH
4222 /* Drives which do spurious command completion */
4223 { "HTS541680J9SA00", "SB2IC7EP", ATA_HORKAGE_NONCQ, },
2f8fcebb 4224 { "HTS541612J9SA00", "SBDIC7JP", ATA_HORKAGE_NONCQ, },
70edb185 4225 { "HDT722516DLA380", "V43OA96A", ATA_HORKAGE_NONCQ, },
e14cbfa6 4226 { "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, },
0c173174 4227 { "Hitachi HTS542525K9SA00", "BBFOC31P", ATA_HORKAGE_NONCQ, },
2f8fcebb 4228 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
7f567620 4229 { "WDC WD3200AAJS-00RYA0", "12.01B01", ATA_HORKAGE_NONCQ, },
a520f261 4230 { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ, },
7f567620 4231 { "ST9120822AS", "3.CLF", ATA_HORKAGE_NONCQ, },
3fb6589c 4232 { "ST9160821AS", "3.CLF", ATA_HORKAGE_NONCQ, },
954bb005 4233 { "ST9160821AS", "3.ALD", ATA_HORKAGE_NONCQ, },
13587960 4234 { "ST9160821AS", "3.CCD", ATA_HORKAGE_NONCQ, },
7f567620
TH
4235 { "ST3160812AS", "3.ADJ", ATA_HORKAGE_NONCQ, },
4236 { "ST980813AS", "3.ADB", ATA_HORKAGE_NONCQ, },
5d6aca8d 4237 { "SAMSUNG HD401LJ", "ZZ100-15", ATA_HORKAGE_NONCQ, },
12850ffe 4238 { "Maxtor 7V300F0", "VA111900", ATA_HORKAGE_NONCQ, },
6919a0a6 4239
16c55b03
TH
4240 /* devices which puke on READ_NATIVE_MAX */
4241 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4242 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4243 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4244 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
6919a0a6 4245
93328e11
AC
4246 /* Devices which report 1 sector over size HPA */
4247 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4248 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
4249
6bbfd53d
AC
4250 /* Devices which get the IVB wrong */
4251 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4252 { "TSSTcorp CDDVDW SH-S202J", "SB00", ATA_HORKAGE_IVB, },
4253
6919a0a6
AC
4254 /* End Marker */
4255 { }
1da177e4 4256};
2e9edbf8 4257
741b7763 4258static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
539cc7c7
JG
4259{
4260 const char *p;
4261 int len;
4262
4263 /*
4264 * check for trailing wildcard: *\0
4265 */
4266 p = strchr(patt, wildchar);
4267 if (p && ((*(p + 1)) == 0))
4268 len = p - patt;
317b50b8 4269 else {
539cc7c7 4270 len = strlen(name);
317b50b8
AP
4271 if (!len) {
4272 if (!*patt)
4273 return 0;
4274 return -1;
4275 }
4276 }
539cc7c7
JG
4277
4278 return strncmp(patt, name, len);
4279}
4280
75683fe7 4281static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
1da177e4 4282{
8bfa79fc
TH
4283 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4284 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 4285 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 4286
8bfa79fc
TH
4287 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4288 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 4289
6919a0a6 4290 while (ad->model_num) {
539cc7c7 4291 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
6919a0a6
AC
4292 if (ad->model_rev == NULL)
4293 return ad->horkage;
539cc7c7 4294 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
6919a0a6 4295 return ad->horkage;
f4b15fef 4296 }
6919a0a6 4297 ad++;
f4b15fef 4298 }
1da177e4
LT
4299 return 0;
4300}
4301
6919a0a6
AC
4302static int ata_dma_blacklisted(const struct ata_device *dev)
4303{
4304 /* We don't support polling DMA.
4305 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4306 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4307 */
9af5c9c9 4308 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
6919a0a6
AC
4309 (dev->flags & ATA_DFLAG_CDB_INTR))
4310 return 1;
75683fe7 4311 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
6919a0a6
AC
4312}
4313
6bbfd53d
AC
4314/**
4315 * ata_is_40wire - check drive side detection
4316 * @dev: device
4317 *
4318 * Perform drive side detection decoding, allowing for device vendors
4319 * who can't follow the documentation.
4320 */
4321
4322static int ata_is_40wire(struct ata_device *dev)
4323{
4324 if (dev->horkage & ATA_HORKAGE_IVB)
4325 return ata_drive_40wire_relaxed(dev->id);
4326 return ata_drive_40wire(dev->id);
4327}
4328
a6d5a51c
TH
4329/**
4330 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
4331 * @dev: Device to compute xfermask for
4332 *
acf356b1
TH
4333 * Compute supported xfermask of @dev and store it in
4334 * dev->*_mask. This function is responsible for applying all
4335 * known limits including host controller limits, device
4336 * blacklist, etc...
a6d5a51c
TH
4337 *
4338 * LOCKING:
4339 * None.
a6d5a51c 4340 */
3373efd8 4341static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 4342{
9af5c9c9
TH
4343 struct ata_link *link = dev->link;
4344 struct ata_port *ap = link->ap;
cca3974e 4345 struct ata_host *host = ap->host;
a6d5a51c 4346 unsigned long xfer_mask;
1da177e4 4347
37deecb5 4348 /* controller modes available */
565083e1
TH
4349 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4350 ap->mwdma_mask, ap->udma_mask);
4351
8343f889 4352 /* drive modes available */
37deecb5
TH
4353 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4354 dev->mwdma_mask, dev->udma_mask);
4355 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 4356
b352e57d
AC
4357 /*
4358 * CFA Advanced TrueIDE timings are not allowed on a shared
4359 * cable
4360 */
4361 if (ata_dev_pair(dev)) {
4362 /* No PIO5 or PIO6 */
4363 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4364 /* No MWDMA3 or MWDMA 4 */
4365 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4366 }
4367
37deecb5
TH
4368 if (ata_dma_blacklisted(dev)) {
4369 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
4370 ata_dev_printk(dev, KERN_WARNING,
4371 "device is on DMA blacklist, disabling DMA\n");
37deecb5 4372 }
a6d5a51c 4373
14d66ab7 4374 if ((host->flags & ATA_HOST_SIMPLEX) &&
2dcb407e 4375 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5
TH
4376 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4377 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4378 "other device, disabling DMA\n");
5444a6f4 4379 }
565083e1 4380
e424675f
JG
4381 if (ap->flags & ATA_FLAG_NO_IORDY)
4382 xfer_mask &= ata_pio_mask_no_iordy(dev);
4383
5444a6f4 4384 if (ap->ops->mode_filter)
a76b62ca 4385 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
5444a6f4 4386
8343f889
RH
4387 /* Apply cable rule here. Don't apply it early because when
4388 * we handle hot plug the cable type can itself change.
4389 * Check this last so that we know if the transfer rate was
4390 * solely limited by the cable.
4391 * Unknown or 80 wire cables reported host side are checked
4392 * drive side as well. Cases where we know a 40wire cable
4393 * is used safely for 80 are not checked here.
4394 */
4395 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4396 /* UDMA/44 or higher would be available */
2dcb407e 4397 if ((ap->cbl == ATA_CBL_PATA40) ||
6bbfd53d 4398 (ata_is_40wire(dev) &&
2dcb407e
JG
4399 (ap->cbl == ATA_CBL_PATA_UNK ||
4400 ap->cbl == ATA_CBL_PATA80))) {
4401 ata_dev_printk(dev, KERN_WARNING,
8343f889
RH
4402 "limited to UDMA/33 due to 40-wire cable\n");
4403 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4404 }
4405
565083e1
TH
4406 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4407 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
4408}
4409
1da177e4
LT
4410/**
4411 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
4412 * @dev: Device to which command will be sent
4413 *
780a87f7
JG
4414 * Issue SET FEATURES - XFER MODE command to device @dev
4415 * on port @ap.
4416 *
1da177e4 4417 * LOCKING:
0cba632b 4418 * PCI/etc. bus probe sem.
83206a29
TH
4419 *
4420 * RETURNS:
4421 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
4422 */
4423
3373efd8 4424static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 4425{
a0123703 4426 struct ata_taskfile tf;
83206a29 4427 unsigned int err_mask;
1da177e4
LT
4428
4429 /* set up set-features taskfile */
4430 DPRINTK("set features - xfer mode\n");
4431
464cf177
TH
4432 /* Some controllers and ATAPI devices show flaky interrupt
4433 * behavior after setting xfer mode. Use polling instead.
4434 */
3373efd8 4435 ata_tf_init(dev, &tf);
a0123703
TH
4436 tf.command = ATA_CMD_SET_FEATURES;
4437 tf.feature = SETFEATURES_XFER;
464cf177 4438 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
a0123703
TH
4439 tf.protocol = ATA_PROT_NODATA;
4440 tf.nsect = dev->xfer_mode;
1da177e4 4441
2b789108 4442 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
9f45cbd3
KCA
4443
4444 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4445 return err_mask;
4446}
9f45cbd3 4447/**
218f3d30 4448 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
9f45cbd3
KCA
4449 * @dev: Device to which command will be sent
4450 * @enable: Whether to enable or disable the feature
218f3d30 4451 * @feature: The sector count represents the feature to set
9f45cbd3
KCA
4452 *
4453 * Issue SET FEATURES - SATA FEATURES command to device @dev
218f3d30 4454 * on port @ap with sector count
9f45cbd3
KCA
4455 *
4456 * LOCKING:
4457 * PCI/etc. bus probe sem.
4458 *
4459 * RETURNS:
4460 * 0 on success, AC_ERR_* mask otherwise.
4461 */
218f3d30
JG
4462static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4463 u8 feature)
9f45cbd3
KCA
4464{
4465 struct ata_taskfile tf;
4466 unsigned int err_mask;
4467
4468 /* set up set-features taskfile */
4469 DPRINTK("set features - SATA features\n");
4470
4471 ata_tf_init(dev, &tf);
4472 tf.command = ATA_CMD_SET_FEATURES;
4473 tf.feature = enable;
4474 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4475 tf.protocol = ATA_PROT_NODATA;
218f3d30 4476 tf.nsect = feature;
9f45cbd3 4477
2b789108 4478 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1da177e4 4479
83206a29
TH
4480 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4481 return err_mask;
1da177e4
LT
4482}
4483
8bf62ece
AL
4484/**
4485 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 4486 * @dev: Device to which command will be sent
e2a7f77a
RD
4487 * @heads: Number of heads (taskfile parameter)
4488 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
4489 *
4490 * LOCKING:
6aff8f1f
TH
4491 * Kernel thread context (may sleep)
4492 *
4493 * RETURNS:
4494 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 4495 */
3373efd8
TH
4496static unsigned int ata_dev_init_params(struct ata_device *dev,
4497 u16 heads, u16 sectors)
8bf62ece 4498{
a0123703 4499 struct ata_taskfile tf;
6aff8f1f 4500 unsigned int err_mask;
8bf62ece
AL
4501
4502 /* Number of sectors per track 1-255. Number of heads 1-16 */
4503 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 4504 return AC_ERR_INVALID;
8bf62ece
AL
4505
4506 /* set up init dev params taskfile */
4507 DPRINTK("init dev params \n");
4508
3373efd8 4509 ata_tf_init(dev, &tf);
a0123703
TH
4510 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4511 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4512 tf.protocol = ATA_PROT_NODATA;
4513 tf.nsect = sectors;
4514 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 4515
2b789108 4516 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
18b2466c
AC
4517 /* A clean abort indicates an original or just out of spec drive
4518 and we should continue as we issue the setup based on the
4519 drive reported working geometry */
4520 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4521 err_mask = 0;
8bf62ece 4522
6aff8f1f
TH
4523 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4524 return err_mask;
8bf62ece
AL
4525}
4526
1da177e4 4527/**
0cba632b
JG
4528 * ata_sg_clean - Unmap DMA memory associated with command
4529 * @qc: Command containing DMA memory to be released
4530 *
4531 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
4532 *
4533 * LOCKING:
cca3974e 4534 * spin_lock_irqsave(host lock)
1da177e4 4535 */
70e6ad0c 4536void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
4537{
4538 struct ata_port *ap = qc->ap;
cedc9a47 4539 struct scatterlist *sg = qc->__sg;
1da177e4 4540 int dir = qc->dma_dir;
cedc9a47 4541 void *pad_buf = NULL;
1da177e4 4542
a4631474
TH
4543 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
4544 WARN_ON(sg == NULL);
1da177e4
LT
4545
4546 if (qc->flags & ATA_QCFLAG_SINGLE)
f131883e 4547 WARN_ON(qc->n_elem > 1);
1da177e4 4548
2c13b7ce 4549 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 4550
cedc9a47
JG
4551 /* if we padded the buffer out to 32-bit bound, and data
4552 * xfer direction is from-device, we must copy from the
4553 * pad buffer back into the supplied buffer
4554 */
4555 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4556 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4557
4558 if (qc->flags & ATA_QCFLAG_SG) {
e1410f2d 4559 if (qc->n_elem)
2f1f610b 4560 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
cedc9a47 4561 /* restore last sg */
87260216 4562 sg_last(sg, qc->orig_n_elem)->length += qc->pad_len;
cedc9a47
JG
4563 if (pad_buf) {
4564 struct scatterlist *psg = &qc->pad_sgent;
45711f1a 4565 void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
cedc9a47 4566 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
dfa15988 4567 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4568 }
4569 } else {
2e242fa9 4570 if (qc->n_elem)
2f1f610b 4571 dma_unmap_single(ap->dev,
e1410f2d
JG
4572 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
4573 dir);
cedc9a47
JG
4574 /* restore sg */
4575 sg->length += qc->pad_len;
4576 if (pad_buf)
4577 memcpy(qc->buf_virt + sg->length - qc->pad_len,
4578 pad_buf, qc->pad_len);
4579 }
1da177e4
LT
4580
4581 qc->flags &= ~ATA_QCFLAG_DMAMAP;
cedc9a47 4582 qc->__sg = NULL;
1da177e4
LT
4583}
4584
4585/**
4586 * ata_fill_sg - Fill PCI IDE PRD table
4587 * @qc: Metadata associated with taskfile to be transferred
4588 *
780a87f7
JG
4589 * Fill PCI IDE PRD (scatter-gather) table with segments
4590 * associated with the current disk command.
4591 *
1da177e4 4592 * LOCKING:
cca3974e 4593 * spin_lock_irqsave(host lock)
1da177e4
LT
4594 *
4595 */
4596static void ata_fill_sg(struct ata_queued_cmd *qc)
4597{
1da177e4 4598 struct ata_port *ap = qc->ap;
cedc9a47
JG
4599 struct scatterlist *sg;
4600 unsigned int idx;
1da177e4 4601
a4631474 4602 WARN_ON(qc->__sg == NULL);
f131883e 4603 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1da177e4
LT
4604
4605 idx = 0;
cedc9a47 4606 ata_for_each_sg(sg, qc) {
1da177e4
LT
4607 u32 addr, offset;
4608 u32 sg_len, len;
4609
4610 /* determine if physical DMA addr spans 64K boundary.
4611 * Note h/w doesn't support 64-bit, so we unconditionally
4612 * truncate dma_addr_t to u32.
4613 */
4614 addr = (u32) sg_dma_address(sg);
4615 sg_len = sg_dma_len(sg);
4616
4617 while (sg_len) {
4618 offset = addr & 0xffff;
4619 len = sg_len;
4620 if ((offset + sg_len) > 0x10000)
4621 len = 0x10000 - offset;
4622
4623 ap->prd[idx].addr = cpu_to_le32(addr);
4624 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
4625 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4626
4627 idx++;
4628 sg_len -= len;
4629 addr += len;
4630 }
4631 }
4632
4633 if (idx)
4634 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4635}
b9a4197e 4636
d26fc955
AC
4637/**
4638 * ata_fill_sg_dumb - Fill PCI IDE PRD table
4639 * @qc: Metadata associated with taskfile to be transferred
4640 *
4641 * Fill PCI IDE PRD (scatter-gather) table with segments
4642 * associated with the current disk command. Perform the fill
4643 * so that we avoid writing any length 64K records for
4644 * controllers that don't follow the spec.
4645 *
4646 * LOCKING:
4647 * spin_lock_irqsave(host lock)
4648 *
4649 */
4650static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4651{
4652 struct ata_port *ap = qc->ap;
4653 struct scatterlist *sg;
4654 unsigned int idx;
4655
4656 WARN_ON(qc->__sg == NULL);
4657 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4658
4659 idx = 0;
4660 ata_for_each_sg(sg, qc) {
4661 u32 addr, offset;
4662 u32 sg_len, len, blen;
4663
2dcb407e 4664 /* determine if physical DMA addr spans 64K boundary.
d26fc955
AC
4665 * Note h/w doesn't support 64-bit, so we unconditionally
4666 * truncate dma_addr_t to u32.
4667 */
4668 addr = (u32) sg_dma_address(sg);
4669 sg_len = sg_dma_len(sg);
4670
4671 while (sg_len) {
4672 offset = addr & 0xffff;
4673 len = sg_len;
4674 if ((offset + sg_len) > 0x10000)
4675 len = 0x10000 - offset;
4676
4677 blen = len & 0xffff;
4678 ap->prd[idx].addr = cpu_to_le32(addr);
4679 if (blen == 0) {
4680 /* Some PATA chipsets like the CS5530 can't
4681 cope with 0x0000 meaning 64K as the spec says */
4682 ap->prd[idx].flags_len = cpu_to_le32(0x8000);
4683 blen = 0x8000;
4684 ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000);
4685 }
4686 ap->prd[idx].flags_len = cpu_to_le32(blen);
4687 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4688
4689 idx++;
4690 sg_len -= len;
4691 addr += len;
4692 }
4693 }
4694
4695 if (idx)
4696 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4697}
4698
1da177e4
LT
4699/**
4700 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4701 * @qc: Metadata associated with taskfile to check
4702 *
780a87f7
JG
4703 * Allow low-level driver to filter ATA PACKET commands, returning
4704 * a status indicating whether or not it is OK to use DMA for the
4705 * supplied PACKET command.
4706 *
1da177e4 4707 * LOCKING:
cca3974e 4708 * spin_lock_irqsave(host lock)
0cba632b 4709 *
1da177e4
LT
4710 * RETURNS: 0 when ATAPI DMA can be used
4711 * nonzero otherwise
4712 */
4713int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4714{
4715 struct ata_port *ap = qc->ap;
b9a4197e
TH
4716
4717 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4718 * few ATAPI devices choke on such DMA requests.
4719 */
4720 if (unlikely(qc->nbytes & 15))
4721 return 1;
6f23a31d 4722
1da177e4 4723 if (ap->ops->check_atapi_dma)
b9a4197e 4724 return ap->ops->check_atapi_dma(qc);
1da177e4 4725
b9a4197e 4726 return 0;
1da177e4 4727}
b9a4197e 4728
31cc23b3
TH
4729/**
4730 * ata_std_qc_defer - Check whether a qc needs to be deferred
4731 * @qc: ATA command in question
4732 *
4733 * Non-NCQ commands cannot run with any other command, NCQ or
4734 * not. As upper layer only knows the queue depth, we are
4735 * responsible for maintaining exclusion. This function checks
4736 * whether a new command @qc can be issued.
4737 *
4738 * LOCKING:
4739 * spin_lock_irqsave(host lock)
4740 *
4741 * RETURNS:
4742 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4743 */
4744int ata_std_qc_defer(struct ata_queued_cmd *qc)
4745{
4746 struct ata_link *link = qc->dev->link;
4747
4748 if (qc->tf.protocol == ATA_PROT_NCQ) {
4749 if (!ata_tag_valid(link->active_tag))
4750 return 0;
4751 } else {
4752 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4753 return 0;
4754 }
4755
4756 return ATA_DEFER_LINK;
4757}
4758
1da177e4
LT
4759/**
4760 * ata_qc_prep - Prepare taskfile for submission
4761 * @qc: Metadata associated with taskfile to be prepared
4762 *
780a87f7
JG
4763 * Prepare ATA taskfile for submission.
4764 *
1da177e4 4765 * LOCKING:
cca3974e 4766 * spin_lock_irqsave(host lock)
1da177e4
LT
4767 */
4768void ata_qc_prep(struct ata_queued_cmd *qc)
4769{
4770 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4771 return;
4772
4773 ata_fill_sg(qc);
4774}
4775
d26fc955
AC
4776/**
4777 * ata_dumb_qc_prep - Prepare taskfile for submission
4778 * @qc: Metadata associated with taskfile to be prepared
4779 *
4780 * Prepare ATA taskfile for submission.
4781 *
4782 * LOCKING:
4783 * spin_lock_irqsave(host lock)
4784 */
4785void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4786{
4787 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4788 return;
4789
4790 ata_fill_sg_dumb(qc);
4791}
4792
e46834cd
BK
4793void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4794
0cba632b
JG
4795/**
4796 * ata_sg_init_one - Associate command with memory buffer
4797 * @qc: Command to be associated
4798 * @buf: Memory buffer
4799 * @buflen: Length of memory buffer, in bytes.
4800 *
4801 * Initialize the data-related elements of queued_cmd @qc
4802 * to point to a single memory buffer, @buf of byte length @buflen.
4803 *
4804 * LOCKING:
cca3974e 4805 * spin_lock_irqsave(host lock)
0cba632b
JG
4806 */
4807
1da177e4
LT
4808void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4809{
1da177e4
LT
4810 qc->flags |= ATA_QCFLAG_SINGLE;
4811
cedc9a47 4812 qc->__sg = &qc->sgent;
1da177e4 4813 qc->n_elem = 1;
cedc9a47 4814 qc->orig_n_elem = 1;
1da177e4 4815 qc->buf_virt = buf;
233277ca 4816 qc->nbytes = buflen;
87260216 4817 qc->cursg = qc->__sg;
1da177e4 4818
61c0596c 4819 sg_init_one(&qc->sgent, buf, buflen);
1da177e4
LT
4820}
4821
0cba632b
JG
4822/**
4823 * ata_sg_init - Associate command with scatter-gather table.
4824 * @qc: Command to be associated
4825 * @sg: Scatter-gather table.
4826 * @n_elem: Number of elements in s/g table.
4827 *
4828 * Initialize the data-related elements of queued_cmd @qc
4829 * to point to a scatter-gather table @sg, containing @n_elem
4830 * elements.
4831 *
4832 * LOCKING:
cca3974e 4833 * spin_lock_irqsave(host lock)
0cba632b
JG
4834 */
4835
1da177e4
LT
4836void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4837 unsigned int n_elem)
4838{
4839 qc->flags |= ATA_QCFLAG_SG;
cedc9a47 4840 qc->__sg = sg;
1da177e4 4841 qc->n_elem = n_elem;
cedc9a47 4842 qc->orig_n_elem = n_elem;
87260216 4843 qc->cursg = qc->__sg;
1da177e4
LT
4844}
4845
4846/**
0cba632b
JG
4847 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4848 * @qc: Command with memory buffer to be mapped.
4849 *
4850 * DMA-map the memory buffer associated with queued_cmd @qc.
1da177e4
LT
4851 *
4852 * LOCKING:
cca3974e 4853 * spin_lock_irqsave(host lock)
1da177e4
LT
4854 *
4855 * RETURNS:
0cba632b 4856 * Zero on success, negative on error.
1da177e4
LT
4857 */
4858
4859static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4860{
4861 struct ata_port *ap = qc->ap;
4862 int dir = qc->dma_dir;
cedc9a47 4863 struct scatterlist *sg = qc->__sg;
1da177e4 4864 dma_addr_t dma_address;
2e242fa9 4865 int trim_sg = 0;
1da177e4 4866
cedc9a47
JG
4867 /* we must lengthen transfers to end on a 32-bit boundary */
4868 qc->pad_len = sg->length & 3;
4869 if (qc->pad_len) {
4870 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4871 struct scatterlist *psg = &qc->pad_sgent;
4872
a4631474 4873 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4874
4875 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4876
4877 if (qc->tf.flags & ATA_TFLAG_WRITE)
4878 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
4879 qc->pad_len);
4880
4881 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4882 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4883 /* trim sg */
4884 sg->length -= qc->pad_len;
2e242fa9
TH
4885 if (sg->length == 0)
4886 trim_sg = 1;
cedc9a47
JG
4887
4888 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4889 sg->length, qc->pad_len);
4890 }
4891
2e242fa9
TH
4892 if (trim_sg) {
4893 qc->n_elem--;
e1410f2d
JG
4894 goto skip_map;
4895 }
4896
2f1f610b 4897 dma_address = dma_map_single(ap->dev, qc->buf_virt,
32529e01 4898 sg->length, dir);
537a95d9
TH
4899 if (dma_mapping_error(dma_address)) {
4900 /* restore sg */
4901 sg->length += qc->pad_len;
1da177e4 4902 return -1;
537a95d9 4903 }
1da177e4
LT
4904
4905 sg_dma_address(sg) = dma_address;
32529e01 4906 sg_dma_len(sg) = sg->length;
1da177e4 4907
2e242fa9 4908skip_map:
1da177e4
LT
4909 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
4910 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4911
4912 return 0;
4913}
4914
4915/**
0cba632b
JG
4916 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4917 * @qc: Command with scatter-gather table to be mapped.
4918 *
4919 * DMA-map the scatter-gather table associated with queued_cmd @qc.
1da177e4
LT
4920 *
4921 * LOCKING:
cca3974e 4922 * spin_lock_irqsave(host lock)
1da177e4
LT
4923 *
4924 * RETURNS:
0cba632b 4925 * Zero on success, negative on error.
1da177e4
LT
4926 *
4927 */
4928
4929static int ata_sg_setup(struct ata_queued_cmd *qc)
4930{
4931 struct ata_port *ap = qc->ap;
cedc9a47 4932 struct scatterlist *sg = qc->__sg;
87260216 4933 struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem);
e1410f2d 4934 int n_elem, pre_n_elem, dir, trim_sg = 0;
1da177e4 4935
44877b4e 4936 VPRINTK("ENTER, ata%u\n", ap->print_id);
a4631474 4937 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
1da177e4 4938
cedc9a47
JG
4939 /* we must lengthen transfers to end on a 32-bit boundary */
4940 qc->pad_len = lsg->length & 3;
4941 if (qc->pad_len) {
4942 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4943 struct scatterlist *psg = &qc->pad_sgent;
4944 unsigned int offset;
4945
a4631474 4946 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4947
4948 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4949
4950 /*
4951 * psg->page/offset are used to copy to-be-written
4952 * data in this function or read data in ata_sg_clean.
4953 */
4954 offset = lsg->offset + lsg->length - qc->pad_len;
acd054a5 4955 sg_init_table(psg, 1);
642f1490
JA
4956 sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT),
4957 qc->pad_len, offset_in_page(offset));
cedc9a47
JG
4958
4959 if (qc->tf.flags & ATA_TFLAG_WRITE) {
45711f1a 4960 void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
cedc9a47 4961 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
dfa15988 4962 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4963 }
4964
4965 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4966 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4967 /* trim last sg */
4968 lsg->length -= qc->pad_len;
e1410f2d
JG
4969 if (lsg->length == 0)
4970 trim_sg = 1;
cedc9a47
JG
4971
4972 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4973 qc->n_elem - 1, lsg->length, qc->pad_len);
4974 }
4975
e1410f2d
JG
4976 pre_n_elem = qc->n_elem;
4977 if (trim_sg && pre_n_elem)
4978 pre_n_elem--;
4979
4980 if (!pre_n_elem) {
4981 n_elem = 0;
4982 goto skip_map;
4983 }
4984
1da177e4 4985 dir = qc->dma_dir;
2f1f610b 4986 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
537a95d9
TH
4987 if (n_elem < 1) {
4988 /* restore last sg */
4989 lsg->length += qc->pad_len;
1da177e4 4990 return -1;
537a95d9 4991 }
1da177e4
LT
4992
4993 DPRINTK("%d sg elements mapped\n", n_elem);
4994
e1410f2d 4995skip_map:
1da177e4
LT
4996 qc->n_elem = n_elem;
4997
4998 return 0;
4999}
5000
0baab86b 5001/**
c893a3ae 5002 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
5003 * @buf: Buffer to swap
5004 * @buf_words: Number of 16-bit words in buffer.
5005 *
5006 * Swap halves of 16-bit words if needed to convert from
5007 * little-endian byte order to native cpu byte order, or
5008 * vice-versa.
5009 *
5010 * LOCKING:
6f0ef4fa 5011 * Inherited from caller.
0baab86b 5012 */
1da177e4
LT
5013void swap_buf_le16(u16 *buf, unsigned int buf_words)
5014{
5015#ifdef __BIG_ENDIAN
5016 unsigned int i;
5017
5018 for (i = 0; i < buf_words; i++)
5019 buf[i] = le16_to_cpu(buf[i]);
5020#endif /* __BIG_ENDIAN */
5021}
5022
6ae4cfb5 5023/**
0d5ff566 5024 * ata_data_xfer - Transfer data by PIO
a6b2c5d4 5025 * @adev: device to target
6ae4cfb5
AL
5026 * @buf: data buffer
5027 * @buflen: buffer length
344babaa 5028 * @write_data: read/write
6ae4cfb5
AL
5029 *
5030 * Transfer data from/to the device data register by PIO.
5031 *
5032 * LOCKING:
5033 * Inherited from caller.
6ae4cfb5 5034 */
0d5ff566
TH
5035void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
5036 unsigned int buflen, int write_data)
1da177e4 5037{
9af5c9c9 5038 struct ata_port *ap = adev->link->ap;
6ae4cfb5 5039 unsigned int words = buflen >> 1;
1da177e4 5040
6ae4cfb5 5041 /* Transfer multiple of 2 bytes */
1da177e4 5042 if (write_data)
0d5ff566 5043 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
1da177e4 5044 else
0d5ff566 5045 ioread16_rep(ap->ioaddr.data_addr, buf, words);
6ae4cfb5
AL
5046
5047 /* Transfer trailing 1 byte, if any. */
5048 if (unlikely(buflen & 0x01)) {
5049 u16 align_buf[1] = { 0 };
5050 unsigned char *trailing_buf = buf + buflen - 1;
5051
5052 if (write_data) {
5053 memcpy(align_buf, trailing_buf, 1);
0d5ff566 5054 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
6ae4cfb5 5055 } else {
0d5ff566 5056 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
6ae4cfb5
AL
5057 memcpy(trailing_buf, align_buf, 1);
5058 }
5059 }
1da177e4
LT
5060}
5061
75e99585 5062/**
0d5ff566 5063 * ata_data_xfer_noirq - Transfer data by PIO
75e99585
AC
5064 * @adev: device to target
5065 * @buf: data buffer
5066 * @buflen: buffer length
5067 * @write_data: read/write
5068 *
88574551 5069 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
5070 * transfer with interrupts disabled.
5071 *
5072 * LOCKING:
5073 * Inherited from caller.
5074 */
0d5ff566
TH
5075void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
5076 unsigned int buflen, int write_data)
75e99585
AC
5077{
5078 unsigned long flags;
5079 local_irq_save(flags);
0d5ff566 5080 ata_data_xfer(adev, buf, buflen, write_data);
75e99585
AC
5081 local_irq_restore(flags);
5082}
5083
5084
6ae4cfb5 5085/**
5a5dbd18 5086 * ata_pio_sector - Transfer a sector of data.
6ae4cfb5
AL
5087 * @qc: Command on going
5088 *
5a5dbd18 5089 * Transfer qc->sect_size bytes of data from/to the ATA device.
6ae4cfb5
AL
5090 *
5091 * LOCKING:
5092 * Inherited from caller.
5093 */
5094
1da177e4
LT
5095static void ata_pio_sector(struct ata_queued_cmd *qc)
5096{
5097 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
1da177e4
LT
5098 struct ata_port *ap = qc->ap;
5099 struct page *page;
5100 unsigned int offset;
5101 unsigned char *buf;
5102
5a5dbd18 5103 if (qc->curbytes == qc->nbytes - qc->sect_size)
14be71f4 5104 ap->hsm_task_state = HSM_ST_LAST;
1da177e4 5105
45711f1a 5106 page = sg_page(qc->cursg);
87260216 5107 offset = qc->cursg->offset + qc->cursg_ofs;
1da177e4
LT
5108
5109 /* get the current page and offset */
5110 page = nth_page(page, (offset >> PAGE_SHIFT));
5111 offset %= PAGE_SIZE;
5112
1da177e4
LT
5113 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5114
91b8b313
AL
5115 if (PageHighMem(page)) {
5116 unsigned long flags;
5117
a6b2c5d4 5118 /* FIXME: use a bounce buffer */
91b8b313
AL
5119 local_irq_save(flags);
5120 buf = kmap_atomic(page, KM_IRQ0);
083958d3 5121
91b8b313 5122 /* do the actual data transfer */
5a5dbd18 5123 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
1da177e4 5124
91b8b313
AL
5125 kunmap_atomic(buf, KM_IRQ0);
5126 local_irq_restore(flags);
5127 } else {
5128 buf = page_address(page);
5a5dbd18 5129 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
91b8b313 5130 }
1da177e4 5131
5a5dbd18
ML
5132 qc->curbytes += qc->sect_size;
5133 qc->cursg_ofs += qc->sect_size;
1da177e4 5134
87260216
JA
5135 if (qc->cursg_ofs == qc->cursg->length) {
5136 qc->cursg = sg_next(qc->cursg);
1da177e4
LT
5137 qc->cursg_ofs = 0;
5138 }
1da177e4 5139}
1da177e4 5140
07f6f7d0 5141/**
5a5dbd18 5142 * ata_pio_sectors - Transfer one or many sectors.
07f6f7d0
AL
5143 * @qc: Command on going
5144 *
5a5dbd18 5145 * Transfer one or many sectors of data from/to the
07f6f7d0
AL
5146 * ATA device for the DRQ request.
5147 *
5148 * LOCKING:
5149 * Inherited from caller.
5150 */
1da177e4 5151
07f6f7d0
AL
5152static void ata_pio_sectors(struct ata_queued_cmd *qc)
5153{
5154 if (is_multi_taskfile(&qc->tf)) {
5155 /* READ/WRITE MULTIPLE */
5156 unsigned int nsect;
5157
587005de 5158 WARN_ON(qc->dev->multi_count == 0);
1da177e4 5159
5a5dbd18 5160 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
726f0785 5161 qc->dev->multi_count);
07f6f7d0
AL
5162 while (nsect--)
5163 ata_pio_sector(qc);
5164 } else
5165 ata_pio_sector(qc);
4cc980b3
AL
5166
5167 ata_altstatus(qc->ap); /* flush */
07f6f7d0
AL
5168}
5169
c71c1857
AL
5170/**
5171 * atapi_send_cdb - Write CDB bytes to hardware
5172 * @ap: Port to which ATAPI device is attached.
5173 * @qc: Taskfile currently active
5174 *
5175 * When device has indicated its readiness to accept
5176 * a CDB, this function is called. Send the CDB.
5177 *
5178 * LOCKING:
5179 * caller.
5180 */
5181
5182static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
5183{
5184 /* send SCSI cdb */
5185 DPRINTK("send cdb\n");
db024d53 5186 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 5187
a6b2c5d4 5188 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
5189 ata_altstatus(ap); /* flush */
5190
5191 switch (qc->tf.protocol) {
5192 case ATA_PROT_ATAPI:
5193 ap->hsm_task_state = HSM_ST;
5194 break;
5195 case ATA_PROT_ATAPI_NODATA:
5196 ap->hsm_task_state = HSM_ST_LAST;
5197 break;
5198 case ATA_PROT_ATAPI_DMA:
5199 ap->hsm_task_state = HSM_ST_LAST;
5200 /* initiate bmdma */
5201 ap->ops->bmdma_start(qc);
5202 break;
5203 }
1da177e4
LT
5204}
5205
6ae4cfb5
AL
5206/**
5207 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
5208 * @qc: Command on going
5209 * @bytes: number of bytes
5210 *
5211 * Transfer Transfer data from/to the ATAPI device.
5212 *
5213 * LOCKING:
5214 * Inherited from caller.
5215 *
5216 */
5217
1da177e4
LT
5218static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
5219{
5220 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 5221 struct scatterlist *sg = qc->__sg;
0874ee76 5222 struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem);
1da177e4
LT
5223 struct ata_port *ap = qc->ap;
5224 struct page *page;
5225 unsigned char *buf;
5226 unsigned int offset, count;
0874ee76 5227 int no_more_sg = 0;
1da177e4 5228
563a6e1f 5229 if (qc->curbytes + bytes >= qc->nbytes)
14be71f4 5230 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
5231
5232next_sg:
0874ee76 5233 if (unlikely(no_more_sg)) {
7fb6ec28 5234 /*
563a6e1f
AL
5235 * The end of qc->sg is reached and the device expects
5236 * more data to transfer. In order not to overrun qc->sg
5237 * and fulfill length specified in the byte count register,
5238 * - for read case, discard trailing data from the device
5239 * - for write case, padding zero data to the device
5240 */
5241 u16 pad_buf[1] = { 0 };
5242 unsigned int words = bytes >> 1;
5243 unsigned int i;
5244
5245 if (words) /* warning if bytes > 1 */
f15a1daf
TH
5246 ata_dev_printk(qc->dev, KERN_WARNING,
5247 "%u bytes trailing data\n", bytes);
563a6e1f
AL
5248
5249 for (i = 0; i < words; i++)
2dcb407e 5250 ap->ops->data_xfer(qc->dev, (unsigned char *)pad_buf, 2, do_write);
563a6e1f 5251
14be71f4 5252 ap->hsm_task_state = HSM_ST_LAST;
563a6e1f
AL
5253 return;
5254 }
5255
87260216 5256 sg = qc->cursg;
1da177e4 5257
45711f1a 5258 page = sg_page(sg);
1da177e4
LT
5259 offset = sg->offset + qc->cursg_ofs;
5260
5261 /* get the current page and offset */
5262 page = nth_page(page, (offset >> PAGE_SHIFT));
5263 offset %= PAGE_SIZE;
5264
6952df03 5265 /* don't overrun current sg */
32529e01 5266 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
5267
5268 /* don't cross page boundaries */
5269 count = min(count, (unsigned int)PAGE_SIZE - offset);
5270
7282aa4b
AL
5271 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5272
91b8b313
AL
5273 if (PageHighMem(page)) {
5274 unsigned long flags;
5275
a6b2c5d4 5276 /* FIXME: use bounce buffer */
91b8b313
AL
5277 local_irq_save(flags);
5278 buf = kmap_atomic(page, KM_IRQ0);
083958d3 5279
91b8b313 5280 /* do the actual data transfer */
a6b2c5d4 5281 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
7282aa4b 5282
91b8b313
AL
5283 kunmap_atomic(buf, KM_IRQ0);
5284 local_irq_restore(flags);
5285 } else {
5286 buf = page_address(page);
a6b2c5d4 5287 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
91b8b313 5288 }
1da177e4
LT
5289
5290 bytes -= count;
5291 qc->curbytes += count;
5292 qc->cursg_ofs += count;
5293
32529e01 5294 if (qc->cursg_ofs == sg->length) {
0874ee76
FT
5295 if (qc->cursg == lsg)
5296 no_more_sg = 1;
5297
87260216 5298 qc->cursg = sg_next(qc->cursg);
1da177e4
LT
5299 qc->cursg_ofs = 0;
5300 }
5301
563a6e1f 5302 if (bytes)
1da177e4 5303 goto next_sg;
1da177e4
LT
5304}
5305
6ae4cfb5
AL
5306/**
5307 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
5308 * @qc: Command on going
5309 *
5310 * Transfer Transfer data from/to the ATAPI device.
5311 *
5312 * LOCKING:
5313 * Inherited from caller.
6ae4cfb5
AL
5314 */
5315
1da177e4
LT
5316static void atapi_pio_bytes(struct ata_queued_cmd *qc)
5317{
5318 struct ata_port *ap = qc->ap;
5319 struct ata_device *dev = qc->dev;
5320 unsigned int ireason, bc_lo, bc_hi, bytes;
5321 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
5322
eec4c3f3
AL
5323 /* Abuse qc->result_tf for temp storage of intermediate TF
5324 * here to save some kernel stack usage.
5325 * For normal completion, qc->result_tf is not relevant. For
5326 * error, qc->result_tf is later overwritten by ata_qc_complete().
5327 * So, the correctness of qc->result_tf is not affected.
5328 */
5329 ap->ops->tf_read(ap, &qc->result_tf);
5330 ireason = qc->result_tf.nsect;
5331 bc_lo = qc->result_tf.lbam;
5332 bc_hi = qc->result_tf.lbah;
1da177e4
LT
5333 bytes = (bc_hi << 8) | bc_lo;
5334
5335 /* shall be cleared to zero, indicating xfer of data */
5336 if (ireason & (1 << 0))
5337 goto err_out;
5338
5339 /* make sure transfer direction matches expected */
5340 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
5341 if (do_write != i_write)
5342 goto err_out;
5343
44877b4e 5344 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
312f7da2 5345
1da177e4 5346 __atapi_pio_bytes(qc, bytes);
4cc980b3 5347 ata_altstatus(ap); /* flush */
1da177e4
LT
5348
5349 return;
5350
5351err_out:
f15a1daf 5352 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
11a56d24 5353 qc->err_mask |= AC_ERR_HSM;
14be71f4 5354 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
5355}
5356
5357/**
c234fb00
AL
5358 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
5359 * @ap: the target ata_port
5360 * @qc: qc on going
1da177e4 5361 *
c234fb00
AL
5362 * RETURNS:
5363 * 1 if ok in workqueue, 0 otherwise.
1da177e4 5364 */
c234fb00
AL
5365
5366static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 5367{
c234fb00
AL
5368 if (qc->tf.flags & ATA_TFLAG_POLLING)
5369 return 1;
1da177e4 5370
c234fb00
AL
5371 if (ap->hsm_task_state == HSM_ST_FIRST) {
5372 if (qc->tf.protocol == ATA_PROT_PIO &&
5373 (qc->tf.flags & ATA_TFLAG_WRITE))
5374 return 1;
1da177e4 5375
c234fb00
AL
5376 if (is_atapi_taskfile(&qc->tf) &&
5377 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5378 return 1;
fe79e683
AL
5379 }
5380
c234fb00
AL
5381 return 0;
5382}
1da177e4 5383
c17ea20d
TH
5384/**
5385 * ata_hsm_qc_complete - finish a qc running on standard HSM
5386 * @qc: Command to complete
5387 * @in_wq: 1 if called from workqueue, 0 otherwise
5388 *
5389 * Finish @qc which is running on standard HSM.
5390 *
5391 * LOCKING:
cca3974e 5392 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
5393 * Otherwise, none on entry and grabs host lock.
5394 */
5395static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
5396{
5397 struct ata_port *ap = qc->ap;
5398 unsigned long flags;
5399
5400 if (ap->ops->error_handler) {
5401 if (in_wq) {
ba6a1308 5402 spin_lock_irqsave(ap->lock, flags);
c17ea20d 5403
cca3974e
JG
5404 /* EH might have kicked in while host lock is
5405 * released.
c17ea20d
TH
5406 */
5407 qc = ata_qc_from_tag(ap, qc->tag);
5408 if (qc) {
5409 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
83625006 5410 ap->ops->irq_on(ap);
c17ea20d
TH
5411 ata_qc_complete(qc);
5412 } else
5413 ata_port_freeze(ap);
5414 }
5415
ba6a1308 5416 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
5417 } else {
5418 if (likely(!(qc->err_mask & AC_ERR_HSM)))
5419 ata_qc_complete(qc);
5420 else
5421 ata_port_freeze(ap);
5422 }
5423 } else {
5424 if (in_wq) {
ba6a1308 5425 spin_lock_irqsave(ap->lock, flags);
83625006 5426 ap->ops->irq_on(ap);
c17ea20d 5427 ata_qc_complete(qc);
ba6a1308 5428 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
5429 } else
5430 ata_qc_complete(qc);
5431 }
5432}
5433
bb5cb290
AL
5434/**
5435 * ata_hsm_move - move the HSM to the next state.
5436 * @ap: the target ata_port
5437 * @qc: qc on going
5438 * @status: current device status
5439 * @in_wq: 1 if called from workqueue, 0 otherwise
5440 *
5441 * RETURNS:
5442 * 1 when poll next status needed, 0 otherwise.
5443 */
9a1004d0
TH
5444int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
5445 u8 status, int in_wq)
e2cec771 5446{
bb5cb290
AL
5447 unsigned long flags = 0;
5448 int poll_next;
5449
6912ccd5
AL
5450 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
5451
bb5cb290
AL
5452 /* Make sure ata_qc_issue_prot() does not throw things
5453 * like DMA polling into the workqueue. Notice that
5454 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
5455 */
c234fb00 5456 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 5457
e2cec771 5458fsm_start:
999bb6f4 5459 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
44877b4e 5460 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
999bb6f4 5461
e2cec771
AL
5462 switch (ap->hsm_task_state) {
5463 case HSM_ST_FIRST:
bb5cb290
AL
5464 /* Send first data block or PACKET CDB */
5465
5466 /* If polling, we will stay in the work queue after
5467 * sending the data. Otherwise, interrupt handler
5468 * takes over after sending the data.
5469 */
5470 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
5471
e2cec771 5472 /* check device status */
3655d1d3
AL
5473 if (unlikely((status & ATA_DRQ) == 0)) {
5474 /* handle BSY=0, DRQ=0 as error */
5475 if (likely(status & (ATA_ERR | ATA_DF)))
5476 /* device stops HSM for abort/error */
5477 qc->err_mask |= AC_ERR_DEV;
5478 else
5479 /* HSM violation. Let EH handle this */
5480 qc->err_mask |= AC_ERR_HSM;
5481
14be71f4 5482 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 5483 goto fsm_start;
1da177e4
LT
5484 }
5485
71601958
AL
5486 /* Device should not ask for data transfer (DRQ=1)
5487 * when it finds something wrong.
eee6c32f
AL
5488 * We ignore DRQ here and stop the HSM by
5489 * changing hsm_task_state to HSM_ST_ERR and
5490 * let the EH abort the command or reset the device.
71601958
AL
5491 */
5492 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
5493 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
5494 "error, dev_stat 0x%X\n", status);
3655d1d3 5495 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
5496 ap->hsm_task_state = HSM_ST_ERR;
5497 goto fsm_start;
71601958 5498 }
1da177e4 5499
bb5cb290
AL
5500 /* Send the CDB (atapi) or the first data block (ata pio out).
5501 * During the state transition, interrupt handler shouldn't
5502 * be invoked before the data transfer is complete and
5503 * hsm_task_state is changed. Hence, the following locking.
5504 */
5505 if (in_wq)
ba6a1308 5506 spin_lock_irqsave(ap->lock, flags);
1da177e4 5507
bb5cb290
AL
5508 if (qc->tf.protocol == ATA_PROT_PIO) {
5509 /* PIO data out protocol.
5510 * send first data block.
5511 */
0565c26d 5512
bb5cb290
AL
5513 /* ata_pio_sectors() might change the state
5514 * to HSM_ST_LAST. so, the state is changed here
5515 * before ata_pio_sectors().
5516 */
5517 ap->hsm_task_state = HSM_ST;
5518 ata_pio_sectors(qc);
bb5cb290
AL
5519 } else
5520 /* send CDB */
5521 atapi_send_cdb(ap, qc);
5522
5523 if (in_wq)
ba6a1308 5524 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
5525
5526 /* if polling, ata_pio_task() handles the rest.
5527 * otherwise, interrupt handler takes over from here.
5528 */
e2cec771 5529 break;
1c848984 5530
e2cec771
AL
5531 case HSM_ST:
5532 /* complete command or read/write the data register */
5533 if (qc->tf.protocol == ATA_PROT_ATAPI) {
5534 /* ATAPI PIO protocol */
5535 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
5536 /* No more data to transfer or device error.
5537 * Device error will be tagged in HSM_ST_LAST.
5538 */
e2cec771
AL
5539 ap->hsm_task_state = HSM_ST_LAST;
5540 goto fsm_start;
5541 }
1da177e4 5542
71601958
AL
5543 /* Device should not ask for data transfer (DRQ=1)
5544 * when it finds something wrong.
eee6c32f
AL
5545 * We ignore DRQ here and stop the HSM by
5546 * changing hsm_task_state to HSM_ST_ERR and
5547 * let the EH abort the command or reset the device.
71601958
AL
5548 */
5549 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
5550 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
5551 "device error, dev_stat 0x%X\n",
5552 status);
3655d1d3 5553 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
5554 ap->hsm_task_state = HSM_ST_ERR;
5555 goto fsm_start;
71601958 5556 }
1da177e4 5557
e2cec771 5558 atapi_pio_bytes(qc);
7fb6ec28 5559
e2cec771
AL
5560 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
5561 /* bad ireason reported by device */
5562 goto fsm_start;
1da177e4 5563
e2cec771
AL
5564 } else {
5565 /* ATA PIO protocol */
5566 if (unlikely((status & ATA_DRQ) == 0)) {
5567 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
5568 if (likely(status & (ATA_ERR | ATA_DF)))
5569 /* device stops HSM for abort/error */
5570 qc->err_mask |= AC_ERR_DEV;
5571 else
55a8e2c8
TH
5572 /* HSM violation. Let EH handle this.
5573 * Phantom devices also trigger this
5574 * condition. Mark hint.
5575 */
5576 qc->err_mask |= AC_ERR_HSM |
5577 AC_ERR_NODEV_HINT;
3655d1d3 5578
e2cec771
AL
5579 ap->hsm_task_state = HSM_ST_ERR;
5580 goto fsm_start;
5581 }
1da177e4 5582
eee6c32f
AL
5583 /* For PIO reads, some devices may ask for
5584 * data transfer (DRQ=1) alone with ERR=1.
5585 * We respect DRQ here and transfer one
5586 * block of junk data before changing the
5587 * hsm_task_state to HSM_ST_ERR.
5588 *
5589 * For PIO writes, ERR=1 DRQ=1 doesn't make
5590 * sense since the data block has been
5591 * transferred to the device.
71601958
AL
5592 */
5593 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
5594 /* data might be corrputed */
5595 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
5596
5597 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5598 ata_pio_sectors(qc);
eee6c32f
AL
5599 status = ata_wait_idle(ap);
5600 }
5601
3655d1d3
AL
5602 if (status & (ATA_BUSY | ATA_DRQ))
5603 qc->err_mask |= AC_ERR_HSM;
5604
eee6c32f
AL
5605 /* ata_pio_sectors() might change the
5606 * state to HSM_ST_LAST. so, the state
5607 * is changed after ata_pio_sectors().
5608 */
5609 ap->hsm_task_state = HSM_ST_ERR;
5610 goto fsm_start;
71601958
AL
5611 }
5612
e2cec771
AL
5613 ata_pio_sectors(qc);
5614
5615 if (ap->hsm_task_state == HSM_ST_LAST &&
5616 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5617 /* all data read */
52a32205 5618 status = ata_wait_idle(ap);
e2cec771
AL
5619 goto fsm_start;
5620 }
5621 }
5622
bb5cb290 5623 poll_next = 1;
1da177e4
LT
5624 break;
5625
14be71f4 5626 case HSM_ST_LAST:
6912ccd5
AL
5627 if (unlikely(!ata_ok(status))) {
5628 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
5629 ap->hsm_task_state = HSM_ST_ERR;
5630 goto fsm_start;
5631 }
5632
5633 /* no more data to transfer */
4332a771 5634 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
44877b4e 5635 ap->print_id, qc->dev->devno, status);
e2cec771 5636
6912ccd5
AL
5637 WARN_ON(qc->err_mask);
5638
e2cec771 5639 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 5640
e2cec771 5641 /* complete taskfile transaction */
c17ea20d 5642 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5643
5644 poll_next = 0;
1da177e4
LT
5645 break;
5646
14be71f4 5647 case HSM_ST_ERR:
e2cec771
AL
5648 /* make sure qc->err_mask is available to
5649 * know what's wrong and recover
5650 */
5651 WARN_ON(qc->err_mask == 0);
5652
5653 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 5654
999bb6f4 5655 /* complete taskfile transaction */
c17ea20d 5656 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5657
5658 poll_next = 0;
e2cec771
AL
5659 break;
5660 default:
bb5cb290 5661 poll_next = 0;
6912ccd5 5662 BUG();
1da177e4
LT
5663 }
5664
bb5cb290 5665 return poll_next;
1da177e4
LT
5666}
5667
65f27f38 5668static void ata_pio_task(struct work_struct *work)
8061f5f0 5669{
65f27f38
DH
5670 struct ata_port *ap =
5671 container_of(work, struct ata_port, port_task.work);
5672 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 5673 u8 status;
a1af3734 5674 int poll_next;
8061f5f0 5675
7fb6ec28 5676fsm_start:
a1af3734 5677 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 5678
a1af3734
AL
5679 /*
5680 * This is purely heuristic. This is a fast path.
5681 * Sometimes when we enter, BSY will be cleared in
5682 * a chk-status or two. If not, the drive is probably seeking
5683 * or something. Snooze for a couple msecs, then
5684 * chk-status again. If still busy, queue delayed work.
5685 */
5686 status = ata_busy_wait(ap, ATA_BUSY, 5);
5687 if (status & ATA_BUSY) {
5688 msleep(2);
5689 status = ata_busy_wait(ap, ATA_BUSY, 10);
5690 if (status & ATA_BUSY) {
31ce6dae 5691 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
a1af3734
AL
5692 return;
5693 }
8061f5f0
TH
5694 }
5695
a1af3734
AL
5696 /* move the HSM */
5697 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 5698
a1af3734
AL
5699 /* another command or interrupt handler
5700 * may be running at this point.
5701 */
5702 if (poll_next)
7fb6ec28 5703 goto fsm_start;
8061f5f0
TH
5704}
5705
1da177e4
LT
5706/**
5707 * ata_qc_new - Request an available ATA command, for queueing
5708 * @ap: Port associated with device @dev
5709 * @dev: Device from whom we request an available command structure
5710 *
5711 * LOCKING:
0cba632b 5712 * None.
1da177e4
LT
5713 */
5714
5715static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5716{
5717 struct ata_queued_cmd *qc = NULL;
5718 unsigned int i;
5719
e3180499 5720 /* no command while frozen */
b51e9e5d 5721 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
5722 return NULL;
5723
2ab7db1f
TH
5724 /* the last tag is reserved for internal command. */
5725 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 5726 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 5727 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
5728 break;
5729 }
5730
5731 if (qc)
5732 qc->tag = i;
5733
5734 return qc;
5735}
5736
5737/**
5738 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
5739 * @dev: Device from whom we request an available command structure
5740 *
5741 * LOCKING:
0cba632b 5742 * None.
1da177e4
LT
5743 */
5744
3373efd8 5745struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 5746{
9af5c9c9 5747 struct ata_port *ap = dev->link->ap;
1da177e4
LT
5748 struct ata_queued_cmd *qc;
5749
5750 qc = ata_qc_new(ap);
5751 if (qc) {
1da177e4
LT
5752 qc->scsicmd = NULL;
5753 qc->ap = ap;
5754 qc->dev = dev;
1da177e4 5755
2c13b7ce 5756 ata_qc_reinit(qc);
1da177e4
LT
5757 }
5758
5759 return qc;
5760}
5761
1da177e4
LT
5762/**
5763 * ata_qc_free - free unused ata_queued_cmd
5764 * @qc: Command to complete
5765 *
5766 * Designed to free unused ata_queued_cmd object
5767 * in case something prevents using it.
5768 *
5769 * LOCKING:
cca3974e 5770 * spin_lock_irqsave(host lock)
1da177e4
LT
5771 */
5772void ata_qc_free(struct ata_queued_cmd *qc)
5773{
4ba946e9
TH
5774 struct ata_port *ap = qc->ap;
5775 unsigned int tag;
5776
a4631474 5777 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 5778
4ba946e9
TH
5779 qc->flags = 0;
5780 tag = qc->tag;
5781 if (likely(ata_tag_valid(tag))) {
4ba946e9 5782 qc->tag = ATA_TAG_POISON;
6cec4a39 5783 clear_bit(tag, &ap->qc_allocated);
4ba946e9 5784 }
1da177e4
LT
5785}
5786
76014427 5787void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 5788{
dedaf2b0 5789 struct ata_port *ap = qc->ap;
9af5c9c9 5790 struct ata_link *link = qc->dev->link;
dedaf2b0 5791
a4631474
TH
5792 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5793 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
5794
5795 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5796 ata_sg_clean(qc);
5797
7401abf2 5798 /* command should be marked inactive atomically with qc completion */
da917d69 5799 if (qc->tf.protocol == ATA_PROT_NCQ) {
9af5c9c9 5800 link->sactive &= ~(1 << qc->tag);
da917d69
TH
5801 if (!link->sactive)
5802 ap->nr_active_links--;
5803 } else {
9af5c9c9 5804 link->active_tag = ATA_TAG_POISON;
da917d69
TH
5805 ap->nr_active_links--;
5806 }
5807
5808 /* clear exclusive status */
5809 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5810 ap->excl_link == link))
5811 ap->excl_link = NULL;
7401abf2 5812
3f3791d3
AL
5813 /* atapi: mark qc as inactive to prevent the interrupt handler
5814 * from completing the command twice later, before the error handler
5815 * is called. (when rc != 0 and atapi request sense is needed)
5816 */
5817 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 5818 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 5819
1da177e4 5820 /* call completion callback */
77853bf2 5821 qc->complete_fn(qc);
1da177e4
LT
5822}
5823
39599a53
TH
5824static void fill_result_tf(struct ata_queued_cmd *qc)
5825{
5826 struct ata_port *ap = qc->ap;
5827
39599a53 5828 qc->result_tf.flags = qc->tf.flags;
4742d54f 5829 ap->ops->tf_read(ap, &qc->result_tf);
39599a53
TH
5830}
5831
f686bcb8
TH
5832/**
5833 * ata_qc_complete - Complete an active ATA command
5834 * @qc: Command to complete
5835 * @err_mask: ATA Status register contents
5836 *
5837 * Indicate to the mid and upper layers that an ATA
5838 * command has completed, with either an ok or not-ok status.
5839 *
5840 * LOCKING:
cca3974e 5841 * spin_lock_irqsave(host lock)
f686bcb8
TH
5842 */
5843void ata_qc_complete(struct ata_queued_cmd *qc)
5844{
5845 struct ata_port *ap = qc->ap;
5846
5847 /* XXX: New EH and old EH use different mechanisms to
5848 * synchronize EH with regular execution path.
5849 *
5850 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5851 * Normal execution path is responsible for not accessing a
5852 * failed qc. libata core enforces the rule by returning NULL
5853 * from ata_qc_from_tag() for failed qcs.
5854 *
5855 * Old EH depends on ata_qc_complete() nullifying completion
5856 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5857 * not synchronize with interrupt handler. Only PIO task is
5858 * taken care of.
5859 */
5860 if (ap->ops->error_handler) {
4dbfa39b
TH
5861 struct ata_device *dev = qc->dev;
5862 struct ata_eh_info *ehi = &dev->link->eh_info;
5863
b51e9e5d 5864 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
5865
5866 if (unlikely(qc->err_mask))
5867 qc->flags |= ATA_QCFLAG_FAILED;
5868
5869 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5870 if (!ata_tag_internal(qc->tag)) {
5871 /* always fill result TF for failed qc */
39599a53 5872 fill_result_tf(qc);
f686bcb8
TH
5873 ata_qc_schedule_eh(qc);
5874 return;
5875 }
5876 }
5877
5878 /* read result TF if requested */
5879 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5880 fill_result_tf(qc);
f686bcb8 5881
4dbfa39b
TH
5882 /* Some commands need post-processing after successful
5883 * completion.
5884 */
5885 switch (qc->tf.command) {
5886 case ATA_CMD_SET_FEATURES:
5887 if (qc->tf.feature != SETFEATURES_WC_ON &&
5888 qc->tf.feature != SETFEATURES_WC_OFF)
5889 break;
5890 /* fall through */
5891 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
5892 case ATA_CMD_SET_MULTI: /* multi_count changed */
5893 /* revalidate device */
5894 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
5895 ata_port_schedule_eh(ap);
5896 break;
054a5fba
TH
5897
5898 case ATA_CMD_SLEEP:
5899 dev->flags |= ATA_DFLAG_SLEEPING;
5900 break;
4dbfa39b
TH
5901 }
5902
f686bcb8
TH
5903 __ata_qc_complete(qc);
5904 } else {
5905 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5906 return;
5907
5908 /* read result TF if failed or requested */
5909 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5910 fill_result_tf(qc);
f686bcb8
TH
5911
5912 __ata_qc_complete(qc);
5913 }
5914}
5915
dedaf2b0
TH
5916/**
5917 * ata_qc_complete_multiple - Complete multiple qcs successfully
5918 * @ap: port in question
5919 * @qc_active: new qc_active mask
5920 * @finish_qc: LLDD callback invoked before completing a qc
5921 *
5922 * Complete in-flight commands. This functions is meant to be
5923 * called from low-level driver's interrupt routine to complete
5924 * requests normally. ap->qc_active and @qc_active is compared
5925 * and commands are completed accordingly.
5926 *
5927 * LOCKING:
cca3974e 5928 * spin_lock_irqsave(host lock)
dedaf2b0
TH
5929 *
5930 * RETURNS:
5931 * Number of completed commands on success, -errno otherwise.
5932 */
5933int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5934 void (*finish_qc)(struct ata_queued_cmd *))
5935{
5936 int nr_done = 0;
5937 u32 done_mask;
5938 int i;
5939
5940 done_mask = ap->qc_active ^ qc_active;
5941
5942 if (unlikely(done_mask & qc_active)) {
5943 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5944 "(%08x->%08x)\n", ap->qc_active, qc_active);
5945 return -EINVAL;
5946 }
5947
5948 for (i = 0; i < ATA_MAX_QUEUE; i++) {
5949 struct ata_queued_cmd *qc;
5950
5951 if (!(done_mask & (1 << i)))
5952 continue;
5953
5954 if ((qc = ata_qc_from_tag(ap, i))) {
5955 if (finish_qc)
5956 finish_qc(qc);
5957 ata_qc_complete(qc);
5958 nr_done++;
5959 }
5960 }
5961
5962 return nr_done;
5963}
5964
1da177e4
LT
5965static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
5966{
5967 struct ata_port *ap = qc->ap;
5968
5969 switch (qc->tf.protocol) {
3dc1d881 5970 case ATA_PROT_NCQ:
1da177e4
LT
5971 case ATA_PROT_DMA:
5972 case ATA_PROT_ATAPI_DMA:
5973 return 1;
5974
5975 case ATA_PROT_ATAPI:
5976 case ATA_PROT_PIO:
1da177e4
LT
5977 if (ap->flags & ATA_FLAG_PIO_DMA)
5978 return 1;
5979
5980 /* fall through */
5981
5982 default:
5983 return 0;
5984 }
5985
5986 /* never reached */
5987}
5988
5989/**
5990 * ata_qc_issue - issue taskfile to device
5991 * @qc: command to issue to device
5992 *
5993 * Prepare an ATA command to submission to device.
5994 * This includes mapping the data into a DMA-able
5995 * area, filling in the S/G table, and finally
5996 * writing the taskfile to hardware, starting the command.
5997 *
5998 * LOCKING:
cca3974e 5999 * spin_lock_irqsave(host lock)
1da177e4 6000 */
8e0e694a 6001void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
6002{
6003 struct ata_port *ap = qc->ap;
9af5c9c9 6004 struct ata_link *link = qc->dev->link;
1da177e4 6005
dedaf2b0
TH
6006 /* Make sure only one non-NCQ command is outstanding. The
6007 * check is skipped for old EH because it reuses active qc to
6008 * request ATAPI sense.
6009 */
9af5c9c9 6010 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
dedaf2b0
TH
6011
6012 if (qc->tf.protocol == ATA_PROT_NCQ) {
9af5c9c9 6013 WARN_ON(link->sactive & (1 << qc->tag));
da917d69
TH
6014
6015 if (!link->sactive)
6016 ap->nr_active_links++;
9af5c9c9 6017 link->sactive |= 1 << qc->tag;
dedaf2b0 6018 } else {
9af5c9c9 6019 WARN_ON(link->sactive);
da917d69
TH
6020
6021 ap->nr_active_links++;
9af5c9c9 6022 link->active_tag = qc->tag;
dedaf2b0
TH
6023 }
6024
e4a70e76 6025 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 6026 ap->qc_active |= 1 << qc->tag;
e4a70e76 6027
1da177e4
LT
6028 if (ata_should_dma_map(qc)) {
6029 if (qc->flags & ATA_QCFLAG_SG) {
6030 if (ata_sg_setup(qc))
8e436af9 6031 goto sg_err;
1da177e4
LT
6032 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
6033 if (ata_sg_setup_one(qc))
8e436af9 6034 goto sg_err;
1da177e4
LT
6035 }
6036 } else {
6037 qc->flags &= ~ATA_QCFLAG_DMAMAP;
6038 }
6039
054a5fba
TH
6040 /* if device is sleeping, schedule softreset and abort the link */
6041 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
6042 link->eh_info.action |= ATA_EH_SOFTRESET;
6043 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
6044 ata_link_abort(link);
6045 return;
6046 }
6047
1da177e4
LT
6048 ap->ops->qc_prep(qc);
6049
8e0e694a
TH
6050 qc->err_mask |= ap->ops->qc_issue(qc);
6051 if (unlikely(qc->err_mask))
6052 goto err;
6053 return;
1da177e4 6054
8e436af9
TH
6055sg_err:
6056 qc->flags &= ~ATA_QCFLAG_DMAMAP;
8e0e694a
TH
6057 qc->err_mask |= AC_ERR_SYSTEM;
6058err:
6059 ata_qc_complete(qc);
1da177e4
LT
6060}
6061
6062/**
6063 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
6064 * @qc: command to issue to device
6065 *
6066 * Using various libata functions and hooks, this function
6067 * starts an ATA command. ATA commands are grouped into
6068 * classes called "protocols", and issuing each type of protocol
6069 * is slightly different.
6070 *
0baab86b
EF
6071 * May be used as the qc_issue() entry in ata_port_operations.
6072 *
1da177e4 6073 * LOCKING:
cca3974e 6074 * spin_lock_irqsave(host lock)
1da177e4
LT
6075 *
6076 * RETURNS:
9a3d9eb0 6077 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
6078 */
6079
9a3d9eb0 6080unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
6081{
6082 struct ata_port *ap = qc->ap;
6083
e50362ec
AL
6084 /* Use polling pio if the LLD doesn't handle
6085 * interrupt driven pio and atapi CDB interrupt.
6086 */
6087 if (ap->flags & ATA_FLAG_PIO_POLLING) {
6088 switch (qc->tf.protocol) {
6089 case ATA_PROT_PIO:
e3472cbe 6090 case ATA_PROT_NODATA:
e50362ec
AL
6091 case ATA_PROT_ATAPI:
6092 case ATA_PROT_ATAPI_NODATA:
6093 qc->tf.flags |= ATA_TFLAG_POLLING;
6094 break;
6095 case ATA_PROT_ATAPI_DMA:
6096 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 6097 /* see ata_dma_blacklisted() */
e50362ec
AL
6098 BUG();
6099 break;
6100 default:
6101 break;
6102 }
6103 }
6104
312f7da2 6105 /* select the device */
1da177e4
LT
6106 ata_dev_select(ap, qc->dev->devno, 1, 0);
6107
312f7da2 6108 /* start the command */
1da177e4
LT
6109 switch (qc->tf.protocol) {
6110 case ATA_PROT_NODATA:
312f7da2
AL
6111 if (qc->tf.flags & ATA_TFLAG_POLLING)
6112 ata_qc_set_polling(qc);
6113
e5338254 6114 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
6115 ap->hsm_task_state = HSM_ST_LAST;
6116
6117 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 6118 ata_port_queue_task(ap, ata_pio_task, qc, 0);
312f7da2 6119
1da177e4
LT
6120 break;
6121
6122 case ATA_PROT_DMA:
587005de 6123 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 6124
1da177e4
LT
6125 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
6126 ap->ops->bmdma_setup(qc); /* set up bmdma */
6127 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 6128 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
6129 break;
6130
312f7da2
AL
6131 case ATA_PROT_PIO:
6132 if (qc->tf.flags & ATA_TFLAG_POLLING)
6133 ata_qc_set_polling(qc);
1da177e4 6134
e5338254 6135 ata_tf_to_host(ap, &qc->tf);
312f7da2 6136
54f00389
AL
6137 if (qc->tf.flags & ATA_TFLAG_WRITE) {
6138 /* PIO data out protocol */
6139 ap->hsm_task_state = HSM_ST_FIRST;
31ce6dae 6140 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
6141
6142 /* always send first data block using
e27486db 6143 * the ata_pio_task() codepath.
54f00389 6144 */
312f7da2 6145 } else {
54f00389
AL
6146 /* PIO data in protocol */
6147 ap->hsm_task_state = HSM_ST;
6148
6149 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 6150 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
6151
6152 /* if polling, ata_pio_task() handles the rest.
6153 * otherwise, interrupt handler takes over from here.
6154 */
312f7da2
AL
6155 }
6156
1da177e4
LT
6157 break;
6158
1da177e4 6159 case ATA_PROT_ATAPI:
1da177e4 6160 case ATA_PROT_ATAPI_NODATA:
312f7da2
AL
6161 if (qc->tf.flags & ATA_TFLAG_POLLING)
6162 ata_qc_set_polling(qc);
6163
e5338254 6164 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 6165
312f7da2
AL
6166 ap->hsm_task_state = HSM_ST_FIRST;
6167
6168 /* send cdb by polling if no cdb interrupt */
6169 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
6170 (qc->tf.flags & ATA_TFLAG_POLLING))
31ce6dae 6171 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
6172 break;
6173
6174 case ATA_PROT_ATAPI_DMA:
587005de 6175 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 6176
1da177e4
LT
6177 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
6178 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
6179 ap->hsm_task_state = HSM_ST_FIRST;
6180
6181 /* send cdb by polling if no cdb interrupt */
6182 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
31ce6dae 6183 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
6184 break;
6185
6186 default:
6187 WARN_ON(1);
9a3d9eb0 6188 return AC_ERR_SYSTEM;
1da177e4
LT
6189 }
6190
6191 return 0;
6192}
6193
1da177e4
LT
6194/**
6195 * ata_host_intr - Handle host interrupt for given (port, task)
6196 * @ap: Port on which interrupt arrived (possibly...)
6197 * @qc: Taskfile currently active in engine
6198 *
6199 * Handle host interrupt for given queued command. Currently,
6200 * only DMA interrupts are handled. All other commands are
6201 * handled via polling with interrupts disabled (nIEN bit).
6202 *
6203 * LOCKING:
cca3974e 6204 * spin_lock_irqsave(host lock)
1da177e4
LT
6205 *
6206 * RETURNS:
6207 * One if interrupt was handled, zero if not (shared irq).
6208 */
6209
2dcb407e
JG
6210inline unsigned int ata_host_intr(struct ata_port *ap,
6211 struct ata_queued_cmd *qc)
1da177e4 6212{
9af5c9c9 6213 struct ata_eh_info *ehi = &ap->link.eh_info;
312f7da2 6214 u8 status, host_stat = 0;
1da177e4 6215
312f7da2 6216 VPRINTK("ata%u: protocol %d task_state %d\n",
44877b4e 6217 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 6218
312f7da2
AL
6219 /* Check whether we are expecting interrupt in this state */
6220 switch (ap->hsm_task_state) {
6221 case HSM_ST_FIRST:
6912ccd5
AL
6222 /* Some pre-ATAPI-4 devices assert INTRQ
6223 * at this state when ready to receive CDB.
6224 */
1da177e4 6225
312f7da2
AL
6226 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
6227 * The flag was turned on only for atapi devices.
6228 * No need to check is_atapi_taskfile(&qc->tf) again.
6229 */
6230 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 6231 goto idle_irq;
1da177e4 6232 break;
312f7da2
AL
6233 case HSM_ST_LAST:
6234 if (qc->tf.protocol == ATA_PROT_DMA ||
6235 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
6236 /* check status of DMA engine */
6237 host_stat = ap->ops->bmdma_status(ap);
44877b4e
TH
6238 VPRINTK("ata%u: host_stat 0x%X\n",
6239 ap->print_id, host_stat);
312f7da2
AL
6240
6241 /* if it's not our irq... */
6242 if (!(host_stat & ATA_DMA_INTR))
6243 goto idle_irq;
6244
6245 /* before we do anything else, clear DMA-Start bit */
6246 ap->ops->bmdma_stop(qc);
a4f16610
AL
6247
6248 if (unlikely(host_stat & ATA_DMA_ERR)) {
6249 /* error when transfering data to/from memory */
6250 qc->err_mask |= AC_ERR_HOST_BUS;
6251 ap->hsm_task_state = HSM_ST_ERR;
6252 }
312f7da2
AL
6253 }
6254 break;
6255 case HSM_ST:
6256 break;
1da177e4
LT
6257 default:
6258 goto idle_irq;
6259 }
6260
312f7da2
AL
6261 /* check altstatus */
6262 status = ata_altstatus(ap);
6263 if (status & ATA_BUSY)
6264 goto idle_irq;
1da177e4 6265
312f7da2
AL
6266 /* check main status, clearing INTRQ */
6267 status = ata_chk_status(ap);
6268 if (unlikely(status & ATA_BUSY))
6269 goto idle_irq;
1da177e4 6270
312f7da2
AL
6271 /* ack bmdma irq events */
6272 ap->ops->irq_clear(ap);
1da177e4 6273
bb5cb290 6274 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
6275
6276 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
6277 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
6278 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
6279
1da177e4
LT
6280 return 1; /* irq handled */
6281
6282idle_irq:
6283 ap->stats.idle_irq++;
6284
6285#ifdef ATA_IRQ_TRAP
6286 if ((ap->stats.idle_irq % 1000) == 0) {
6d32d30f
JG
6287 ata_chk_status(ap);
6288 ap->ops->irq_clear(ap);
f15a1daf 6289 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 6290 return 1;
1da177e4
LT
6291 }
6292#endif
6293 return 0; /* irq not handled */
6294}
6295
6296/**
6297 * ata_interrupt - Default ATA host interrupt handler
0cba632b 6298 * @irq: irq line (unused)
cca3974e 6299 * @dev_instance: pointer to our ata_host information structure
1da177e4 6300 *
0cba632b
JG
6301 * Default interrupt handler for PCI IDE devices. Calls
6302 * ata_host_intr() for each port that is not disabled.
6303 *
1da177e4 6304 * LOCKING:
cca3974e 6305 * Obtains host lock during operation.
1da177e4
LT
6306 *
6307 * RETURNS:
0cba632b 6308 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
6309 */
6310
2dcb407e 6311irqreturn_t ata_interrupt(int irq, void *dev_instance)
1da177e4 6312{
cca3974e 6313 struct ata_host *host = dev_instance;
1da177e4
LT
6314 unsigned int i;
6315 unsigned int handled = 0;
6316 unsigned long flags;
6317
6318 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 6319 spin_lock_irqsave(&host->lock, flags);
1da177e4 6320
cca3974e 6321 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
6322 struct ata_port *ap;
6323
cca3974e 6324 ap = host->ports[i];
c1389503 6325 if (ap &&
029f5468 6326 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
6327 struct ata_queued_cmd *qc;
6328
9af5c9c9 6329 qc = ata_qc_from_tag(ap, ap->link.active_tag);
312f7da2 6330 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 6331 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
6332 handled |= ata_host_intr(ap, qc);
6333 }
6334 }
6335
cca3974e 6336 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
6337
6338 return IRQ_RETVAL(handled);
6339}
6340
34bf2170
TH
6341/**
6342 * sata_scr_valid - test whether SCRs are accessible
936fd732 6343 * @link: ATA link to test SCR accessibility for
34bf2170 6344 *
936fd732 6345 * Test whether SCRs are accessible for @link.
34bf2170
TH
6346 *
6347 * LOCKING:
6348 * None.
6349 *
6350 * RETURNS:
6351 * 1 if SCRs are accessible, 0 otherwise.
6352 */
936fd732 6353int sata_scr_valid(struct ata_link *link)
34bf2170 6354{
936fd732
TH
6355 struct ata_port *ap = link->ap;
6356
a16abc0b 6357 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
34bf2170
TH
6358}
6359
6360/**
6361 * sata_scr_read - read SCR register of the specified port
936fd732 6362 * @link: ATA link to read SCR for
34bf2170
TH
6363 * @reg: SCR to read
6364 * @val: Place to store read value
6365 *
936fd732 6366 * Read SCR register @reg of @link into *@val. This function is
633273a3
TH
6367 * guaranteed to succeed if @link is ap->link, the cable type of
6368 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
6369 *
6370 * LOCKING:
633273a3 6371 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6372 *
6373 * RETURNS:
6374 * 0 on success, negative errno on failure.
6375 */
936fd732 6376int sata_scr_read(struct ata_link *link, int reg, u32 *val)
34bf2170 6377{
633273a3
TH
6378 if (ata_is_host_link(link)) {
6379 struct ata_port *ap = link->ap;
936fd732 6380
633273a3
TH
6381 if (sata_scr_valid(link))
6382 return ap->ops->scr_read(ap, reg, val);
6383 return -EOPNOTSUPP;
6384 }
6385
6386 return sata_pmp_scr_read(link, reg, val);
34bf2170
TH
6387}
6388
6389/**
6390 * sata_scr_write - write SCR register of the specified port
936fd732 6391 * @link: ATA link to write SCR for
34bf2170
TH
6392 * @reg: SCR to write
6393 * @val: value to write
6394 *
936fd732 6395 * Write @val to SCR register @reg of @link. This function is
633273a3
TH
6396 * guaranteed to succeed if @link is ap->link, the cable type of
6397 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
6398 *
6399 * LOCKING:
633273a3 6400 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6401 *
6402 * RETURNS:
6403 * 0 on success, negative errno on failure.
6404 */
936fd732 6405int sata_scr_write(struct ata_link *link, int reg, u32 val)
34bf2170 6406{
633273a3
TH
6407 if (ata_is_host_link(link)) {
6408 struct ata_port *ap = link->ap;
6409
6410 if (sata_scr_valid(link))
6411 return ap->ops->scr_write(ap, reg, val);
6412 return -EOPNOTSUPP;
6413 }
936fd732 6414
633273a3 6415 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
6416}
6417
6418/**
6419 * sata_scr_write_flush - write SCR register of the specified port and flush
936fd732 6420 * @link: ATA link to write SCR for
34bf2170
TH
6421 * @reg: SCR to write
6422 * @val: value to write
6423 *
6424 * This function is identical to sata_scr_write() except that this
6425 * function performs flush after writing to the register.
6426 *
6427 * LOCKING:
633273a3 6428 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6429 *
6430 * RETURNS:
6431 * 0 on success, negative errno on failure.
6432 */
936fd732 6433int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
34bf2170 6434{
633273a3
TH
6435 if (ata_is_host_link(link)) {
6436 struct ata_port *ap = link->ap;
6437 int rc;
da3dbb17 6438
633273a3
TH
6439 if (sata_scr_valid(link)) {
6440 rc = ap->ops->scr_write(ap, reg, val);
6441 if (rc == 0)
6442 rc = ap->ops->scr_read(ap, reg, &val);
6443 return rc;
6444 }
6445 return -EOPNOTSUPP;
34bf2170 6446 }
633273a3
TH
6447
6448 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
6449}
6450
6451/**
936fd732
TH
6452 * ata_link_online - test whether the given link is online
6453 * @link: ATA link to test
34bf2170 6454 *
936fd732
TH
6455 * Test whether @link is online. Note that this function returns
6456 * 0 if online status of @link cannot be obtained, so
6457 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
6458 *
6459 * LOCKING:
6460 * None.
6461 *
6462 * RETURNS:
6463 * 1 if the port online status is available and online.
6464 */
936fd732 6465int ata_link_online(struct ata_link *link)
34bf2170
TH
6466{
6467 u32 sstatus;
6468
936fd732
TH
6469 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6470 (sstatus & 0xf) == 0x3)
34bf2170
TH
6471 return 1;
6472 return 0;
6473}
6474
6475/**
936fd732
TH
6476 * ata_link_offline - test whether the given link is offline
6477 * @link: ATA link to test
34bf2170 6478 *
936fd732
TH
6479 * Test whether @link is offline. Note that this function
6480 * returns 0 if offline status of @link cannot be obtained, so
6481 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
6482 *
6483 * LOCKING:
6484 * None.
6485 *
6486 * RETURNS:
6487 * 1 if the port offline status is available and offline.
6488 */
936fd732 6489int ata_link_offline(struct ata_link *link)
34bf2170
TH
6490{
6491 u32 sstatus;
6492
936fd732
TH
6493 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6494 (sstatus & 0xf) != 0x3)
34bf2170
TH
6495 return 1;
6496 return 0;
6497}
0baab86b 6498
77b08fb5 6499int ata_flush_cache(struct ata_device *dev)
9b847548 6500{
977e6b9f 6501 unsigned int err_mask;
9b847548
JA
6502 u8 cmd;
6503
6504 if (!ata_try_flush_cache(dev))
6505 return 0;
6506
6fc49adb 6507 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
6508 cmd = ATA_CMD_FLUSH_EXT;
6509 else
6510 cmd = ATA_CMD_FLUSH;
6511
4f34337b
AC
6512 /* This is wrong. On a failed flush we get back the LBA of the lost
6513 sector and we should (assuming it wasn't aborted as unknown) issue
2dcb407e 6514 a further flush command to continue the writeback until it
4f34337b 6515 does not error */
977e6b9f
TH
6516 err_mask = ata_do_simple_cmd(dev, cmd);
6517 if (err_mask) {
6518 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
6519 return -EIO;
6520 }
6521
6522 return 0;
9b847548
JA
6523}
6524
6ffa01d8 6525#ifdef CONFIG_PM
cca3974e
JG
6526static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
6527 unsigned int action, unsigned int ehi_flags,
6528 int wait)
500530f6
TH
6529{
6530 unsigned long flags;
6531 int i, rc;
6532
cca3974e
JG
6533 for (i = 0; i < host->n_ports; i++) {
6534 struct ata_port *ap = host->ports[i];
e3667ebf 6535 struct ata_link *link;
500530f6
TH
6536
6537 /* Previous resume operation might still be in
6538 * progress. Wait for PM_PENDING to clear.
6539 */
6540 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
6541 ata_port_wait_eh(ap);
6542 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6543 }
6544
6545 /* request PM ops to EH */
6546 spin_lock_irqsave(ap->lock, flags);
6547
6548 ap->pm_mesg = mesg;
6549 if (wait) {
6550 rc = 0;
6551 ap->pm_result = &rc;
6552 }
6553
6554 ap->pflags |= ATA_PFLAG_PM_PENDING;
e3667ebf
TH
6555 __ata_port_for_each_link(link, ap) {
6556 link->eh_info.action |= action;
6557 link->eh_info.flags |= ehi_flags;
6558 }
500530f6
TH
6559
6560 ata_port_schedule_eh(ap);
6561
6562 spin_unlock_irqrestore(ap->lock, flags);
6563
6564 /* wait and check result */
6565 if (wait) {
6566 ata_port_wait_eh(ap);
6567 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6568 if (rc)
6569 return rc;
6570 }
6571 }
6572
6573 return 0;
6574}
6575
6576/**
cca3974e
JG
6577 * ata_host_suspend - suspend host
6578 * @host: host to suspend
500530f6
TH
6579 * @mesg: PM message
6580 *
cca3974e 6581 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
6582 * function requests EH to perform PM operations and waits for EH
6583 * to finish.
6584 *
6585 * LOCKING:
6586 * Kernel thread context (may sleep).
6587 *
6588 * RETURNS:
6589 * 0 on success, -errno on failure.
6590 */
cca3974e 6591int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6 6592{
9666f400 6593 int rc;
500530f6 6594
ca77329f
KCA
6595 /*
6596 * disable link pm on all ports before requesting
6597 * any pm activity
6598 */
6599 ata_lpm_enable(host);
6600
cca3974e 6601 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
9666f400
TH
6602 if (rc == 0)
6603 host->dev->power.power_state = mesg;
500530f6
TH
6604 return rc;
6605}
6606
6607/**
cca3974e
JG
6608 * ata_host_resume - resume host
6609 * @host: host to resume
500530f6 6610 *
cca3974e 6611 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
6612 * function requests EH to perform PM operations and returns.
6613 * Note that all resume operations are performed parallely.
6614 *
6615 * LOCKING:
6616 * Kernel thread context (may sleep).
6617 */
cca3974e 6618void ata_host_resume(struct ata_host *host)
500530f6 6619{
cca3974e
JG
6620 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
6621 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
6622 host->dev->power.power_state = PMSG_ON;
ca77329f
KCA
6623
6624 /* reenable link pm */
6625 ata_lpm_disable(host);
500530f6 6626}
6ffa01d8 6627#endif
500530f6 6628
c893a3ae
RD
6629/**
6630 * ata_port_start - Set port up for dma.
6631 * @ap: Port to initialize
6632 *
6633 * Called just after data structures for each port are
6634 * initialized. Allocates space for PRD table.
6635 *
6636 * May be used as the port_start() entry in ata_port_operations.
6637 *
6638 * LOCKING:
6639 * Inherited from caller.
6640 */
f0d36efd 6641int ata_port_start(struct ata_port *ap)
1da177e4 6642{
2f1f610b 6643 struct device *dev = ap->dev;
6037d6bb 6644 int rc;
1da177e4 6645
f0d36efd
TH
6646 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6647 GFP_KERNEL);
1da177e4
LT
6648 if (!ap->prd)
6649 return -ENOMEM;
6650
6037d6bb 6651 rc = ata_pad_alloc(ap, dev);
f0d36efd 6652 if (rc)
6037d6bb 6653 return rc;
1da177e4 6654
f0d36efd
TH
6655 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
6656 (unsigned long long)ap->prd_dma);
1da177e4
LT
6657 return 0;
6658}
6659
3ef3b43d
TH
6660/**
6661 * ata_dev_init - Initialize an ata_device structure
6662 * @dev: Device structure to initialize
6663 *
6664 * Initialize @dev in preparation for probing.
6665 *
6666 * LOCKING:
6667 * Inherited from caller.
6668 */
6669void ata_dev_init(struct ata_device *dev)
6670{
9af5c9c9
TH
6671 struct ata_link *link = dev->link;
6672 struct ata_port *ap = link->ap;
72fa4b74
TH
6673 unsigned long flags;
6674
5a04bf4b 6675 /* SATA spd limit is bound to the first device */
9af5c9c9
TH
6676 link->sata_spd_limit = link->hw_sata_spd_limit;
6677 link->sata_spd = 0;
5a04bf4b 6678
72fa4b74
TH
6679 /* High bits of dev->flags are used to record warm plug
6680 * requests which occur asynchronously. Synchronize using
cca3974e 6681 * host lock.
72fa4b74 6682 */
ba6a1308 6683 spin_lock_irqsave(ap->lock, flags);
72fa4b74 6684 dev->flags &= ~ATA_DFLAG_INIT_MASK;
3dcc323f 6685 dev->horkage = 0;
ba6a1308 6686 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 6687
72fa4b74
TH
6688 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
6689 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
6690 dev->pio_mask = UINT_MAX;
6691 dev->mwdma_mask = UINT_MAX;
6692 dev->udma_mask = UINT_MAX;
6693}
6694
4fb37a25
TH
6695/**
6696 * ata_link_init - Initialize an ata_link structure
6697 * @ap: ATA port link is attached to
6698 * @link: Link structure to initialize
8989805d 6699 * @pmp: Port multiplier port number
4fb37a25
TH
6700 *
6701 * Initialize @link.
6702 *
6703 * LOCKING:
6704 * Kernel thread context (may sleep)
6705 */
fb7fd614 6706void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
4fb37a25
TH
6707{
6708 int i;
6709
6710 /* clear everything except for devices */
6711 memset(link, 0, offsetof(struct ata_link, device[0]));
6712
6713 link->ap = ap;
8989805d 6714 link->pmp = pmp;
4fb37a25
TH
6715 link->active_tag = ATA_TAG_POISON;
6716 link->hw_sata_spd_limit = UINT_MAX;
6717
6718 /* can't use iterator, ap isn't initialized yet */
6719 for (i = 0; i < ATA_MAX_DEVICES; i++) {
6720 struct ata_device *dev = &link->device[i];
6721
6722 dev->link = link;
6723 dev->devno = dev - link->device;
6724 ata_dev_init(dev);
6725 }
6726}
6727
6728/**
6729 * sata_link_init_spd - Initialize link->sata_spd_limit
6730 * @link: Link to configure sata_spd_limit for
6731 *
6732 * Initialize @link->[hw_]sata_spd_limit to the currently
6733 * configured value.
6734 *
6735 * LOCKING:
6736 * Kernel thread context (may sleep).
6737 *
6738 * RETURNS:
6739 * 0 on success, -errno on failure.
6740 */
fb7fd614 6741int sata_link_init_spd(struct ata_link *link)
4fb37a25
TH
6742{
6743 u32 scontrol, spd;
6744 int rc;
6745
6746 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
6747 if (rc)
6748 return rc;
6749
6750 spd = (scontrol >> 4) & 0xf;
6751 if (spd)
6752 link->hw_sata_spd_limit &= (1 << spd) - 1;
6753
6754 link->sata_spd_limit = link->hw_sata_spd_limit;
6755
6756 return 0;
6757}
6758
1da177e4 6759/**
f3187195
TH
6760 * ata_port_alloc - allocate and initialize basic ATA port resources
6761 * @host: ATA host this allocated port belongs to
1da177e4 6762 *
f3187195
TH
6763 * Allocate and initialize basic ATA port resources.
6764 *
6765 * RETURNS:
6766 * Allocate ATA port on success, NULL on failure.
0cba632b 6767 *
1da177e4 6768 * LOCKING:
f3187195 6769 * Inherited from calling layer (may sleep).
1da177e4 6770 */
f3187195 6771struct ata_port *ata_port_alloc(struct ata_host *host)
1da177e4 6772{
f3187195 6773 struct ata_port *ap;
1da177e4 6774
f3187195
TH
6775 DPRINTK("ENTER\n");
6776
6777 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6778 if (!ap)
6779 return NULL;
6780
f4d6d004 6781 ap->pflags |= ATA_PFLAG_INITIALIZING;
cca3974e 6782 ap->lock = &host->lock;
198e0fed 6783 ap->flags = ATA_FLAG_DISABLED;
f3187195 6784 ap->print_id = -1;
1da177e4 6785 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 6786 ap->host = host;
f3187195 6787 ap->dev = host->dev;
1da177e4 6788 ap->last_ctl = 0xFF;
bd5d825c
BP
6789
6790#if defined(ATA_VERBOSE_DEBUG)
6791 /* turn on all debugging levels */
6792 ap->msg_enable = 0x00FF;
6793#elif defined(ATA_DEBUG)
6794 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 6795#else
0dd4b21f 6796 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 6797#endif
1da177e4 6798
65f27f38
DH
6799 INIT_DELAYED_WORK(&ap->port_task, NULL);
6800 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6801 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 6802 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 6803 init_waitqueue_head(&ap->eh_wait_q);
5ddf24c5
TH
6804 init_timer_deferrable(&ap->fastdrain_timer);
6805 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
6806 ap->fastdrain_timer.data = (unsigned long)ap;
1da177e4 6807
838df628 6808 ap->cbl = ATA_CBL_NONE;
838df628 6809
8989805d 6810 ata_link_init(ap, &ap->link, 0);
1da177e4
LT
6811
6812#ifdef ATA_IRQ_TRAP
6813 ap->stats.unhandled_irq = 1;
6814 ap->stats.idle_irq = 1;
6815#endif
1da177e4 6816 return ap;
1da177e4
LT
6817}
6818
f0d36efd
TH
6819static void ata_host_release(struct device *gendev, void *res)
6820{
6821 struct ata_host *host = dev_get_drvdata(gendev);
6822 int i;
6823
6824 for (i = 0; i < host->n_ports; i++) {
6825 struct ata_port *ap = host->ports[i];
6826
ecef7253
TH
6827 if (!ap)
6828 continue;
6829
6830 if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop)
f0d36efd 6831 ap->ops->port_stop(ap);
f0d36efd
TH
6832 }
6833
ecef7253 6834 if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop)
f0d36efd 6835 host->ops->host_stop(host);
1aa56cca 6836
1aa506e4
TH
6837 for (i = 0; i < host->n_ports; i++) {
6838 struct ata_port *ap = host->ports[i];
6839
4911487a
TH
6840 if (!ap)
6841 continue;
6842
6843 if (ap->scsi_host)
1aa506e4
TH
6844 scsi_host_put(ap->scsi_host);
6845
633273a3 6846 kfree(ap->pmp_link);
4911487a 6847 kfree(ap);
1aa506e4
TH
6848 host->ports[i] = NULL;
6849 }
6850
1aa56cca 6851 dev_set_drvdata(gendev, NULL);
f0d36efd
TH
6852}
6853
f3187195
TH
6854/**
6855 * ata_host_alloc - allocate and init basic ATA host resources
6856 * @dev: generic device this host is associated with
6857 * @max_ports: maximum number of ATA ports associated with this host
6858 *
6859 * Allocate and initialize basic ATA host resources. LLD calls
6860 * this function to allocate a host, initializes it fully and
6861 * attaches it using ata_host_register().
6862 *
6863 * @max_ports ports are allocated and host->n_ports is
6864 * initialized to @max_ports. The caller is allowed to decrease
6865 * host->n_ports before calling ata_host_register(). The unused
6866 * ports will be automatically freed on registration.
6867 *
6868 * RETURNS:
6869 * Allocate ATA host on success, NULL on failure.
6870 *
6871 * LOCKING:
6872 * Inherited from calling layer (may sleep).
6873 */
6874struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6875{
6876 struct ata_host *host;
6877 size_t sz;
6878 int i;
6879
6880 DPRINTK("ENTER\n");
6881
6882 if (!devres_open_group(dev, NULL, GFP_KERNEL))
6883 return NULL;
6884
6885 /* alloc a container for our list of ATA ports (buses) */
6886 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6887 /* alloc a container for our list of ATA ports (buses) */
6888 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6889 if (!host)
6890 goto err_out;
6891
6892 devres_add(dev, host);
6893 dev_set_drvdata(dev, host);
6894
6895 spin_lock_init(&host->lock);
6896 host->dev = dev;
6897 host->n_ports = max_ports;
6898
6899 /* allocate ports bound to this host */
6900 for (i = 0; i < max_ports; i++) {
6901 struct ata_port *ap;
6902
6903 ap = ata_port_alloc(host);
6904 if (!ap)
6905 goto err_out;
6906
6907 ap->port_no = i;
6908 host->ports[i] = ap;
6909 }
6910
6911 devres_remove_group(dev, NULL);
6912 return host;
6913
6914 err_out:
6915 devres_release_group(dev, NULL);
6916 return NULL;
6917}
6918
f5cda257
TH
6919/**
6920 * ata_host_alloc_pinfo - alloc host and init with port_info array
6921 * @dev: generic device this host is associated with
6922 * @ppi: array of ATA port_info to initialize host with
6923 * @n_ports: number of ATA ports attached to this host
6924 *
6925 * Allocate ATA host and initialize with info from @ppi. If NULL
6926 * terminated, @ppi may contain fewer entries than @n_ports. The
6927 * last entry will be used for the remaining ports.
6928 *
6929 * RETURNS:
6930 * Allocate ATA host on success, NULL on failure.
6931 *
6932 * LOCKING:
6933 * Inherited from calling layer (may sleep).
6934 */
6935struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6936 const struct ata_port_info * const * ppi,
6937 int n_ports)
6938{
6939 const struct ata_port_info *pi;
6940 struct ata_host *host;
6941 int i, j;
6942
6943 host = ata_host_alloc(dev, n_ports);
6944 if (!host)
6945 return NULL;
6946
6947 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6948 struct ata_port *ap = host->ports[i];
6949
6950 if (ppi[j])
6951 pi = ppi[j++];
6952
6953 ap->pio_mask = pi->pio_mask;
6954 ap->mwdma_mask = pi->mwdma_mask;
6955 ap->udma_mask = pi->udma_mask;
6956 ap->flags |= pi->flags;
0c88758b 6957 ap->link.flags |= pi->link_flags;
f5cda257
TH
6958 ap->ops = pi->port_ops;
6959
6960 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6961 host->ops = pi->port_ops;
6962 if (!host->private_data && pi->private_data)
6963 host->private_data = pi->private_data;
6964 }
6965
6966 return host;
6967}
6968
ecef7253
TH
6969/**
6970 * ata_host_start - start and freeze ports of an ATA host
6971 * @host: ATA host to start ports for
6972 *
6973 * Start and then freeze ports of @host. Started status is
6974 * recorded in host->flags, so this function can be called
6975 * multiple times. Ports are guaranteed to get started only
f3187195
TH
6976 * once. If host->ops isn't initialized yet, its set to the
6977 * first non-dummy port ops.
ecef7253
TH
6978 *
6979 * LOCKING:
6980 * Inherited from calling layer (may sleep).
6981 *
6982 * RETURNS:
6983 * 0 if all ports are started successfully, -errno otherwise.
6984 */
6985int ata_host_start(struct ata_host *host)
6986{
6987 int i, rc;
6988
6989 if (host->flags & ATA_HOST_STARTED)
6990 return 0;
6991
6992 for (i = 0; i < host->n_ports; i++) {
6993 struct ata_port *ap = host->ports[i];
6994
f3187195
TH
6995 if (!host->ops && !ata_port_is_dummy(ap))
6996 host->ops = ap->ops;
6997
ecef7253
TH
6998 if (ap->ops->port_start) {
6999 rc = ap->ops->port_start(ap);
7000 if (rc) {
7001 ata_port_printk(ap, KERN_ERR, "failed to "
7002 "start port (errno=%d)\n", rc);
7003 goto err_out;
7004 }
7005 }
7006
7007 ata_eh_freeze_port(ap);
7008 }
7009
7010 host->flags |= ATA_HOST_STARTED;
7011 return 0;
7012
7013 err_out:
7014 while (--i >= 0) {
7015 struct ata_port *ap = host->ports[i];
7016
7017 if (ap->ops->port_stop)
7018 ap->ops->port_stop(ap);
7019 }
7020 return rc;
7021}
7022
b03732f0 7023/**
cca3974e
JG
7024 * ata_sas_host_init - Initialize a host struct
7025 * @host: host to initialize
7026 * @dev: device host is attached to
7027 * @flags: host flags
7028 * @ops: port_ops
b03732f0
BK
7029 *
7030 * LOCKING:
7031 * PCI/etc. bus probe sem.
7032 *
7033 */
f3187195 7034/* KILLME - the only user left is ipr */
cca3974e
JG
7035void ata_host_init(struct ata_host *host, struct device *dev,
7036 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 7037{
cca3974e
JG
7038 spin_lock_init(&host->lock);
7039 host->dev = dev;
7040 host->flags = flags;
7041 host->ops = ops;
b03732f0
BK
7042}
7043
f3187195
TH
7044/**
7045 * ata_host_register - register initialized ATA host
7046 * @host: ATA host to register
7047 * @sht: template for SCSI host
7048 *
7049 * Register initialized ATA host. @host is allocated using
7050 * ata_host_alloc() and fully initialized by LLD. This function
7051 * starts ports, registers @host with ATA and SCSI layers and
7052 * probe registered devices.
7053 *
7054 * LOCKING:
7055 * Inherited from calling layer (may sleep).
7056 *
7057 * RETURNS:
7058 * 0 on success, -errno otherwise.
7059 */
7060int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
7061{
7062 int i, rc;
7063
7064 /* host must have been started */
7065 if (!(host->flags & ATA_HOST_STARTED)) {
7066 dev_printk(KERN_ERR, host->dev,
7067 "BUG: trying to register unstarted host\n");
7068 WARN_ON(1);
7069 return -EINVAL;
7070 }
7071
7072 /* Blow away unused ports. This happens when LLD can't
7073 * determine the exact number of ports to allocate at
7074 * allocation time.
7075 */
7076 for (i = host->n_ports; host->ports[i]; i++)
7077 kfree(host->ports[i]);
7078
7079 /* give ports names and add SCSI hosts */
7080 for (i = 0; i < host->n_ports; i++)
7081 host->ports[i]->print_id = ata_print_id++;
7082
7083 rc = ata_scsi_add_hosts(host, sht);
7084 if (rc)
7085 return rc;
7086
fafbae87
TH
7087 /* associate with ACPI nodes */
7088 ata_acpi_associate(host);
7089
f3187195
TH
7090 /* set cable, sata_spd_limit and report */
7091 for (i = 0; i < host->n_ports; i++) {
7092 struct ata_port *ap = host->ports[i];
f3187195
TH
7093 unsigned long xfer_mask;
7094
7095 /* set SATA cable type if still unset */
7096 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
7097 ap->cbl = ATA_CBL_SATA;
7098
7099 /* init sata_spd_limit to the current value */
4fb37a25 7100 sata_link_init_spd(&ap->link);
f3187195 7101
cbcdd875 7102 /* print per-port info to dmesg */
f3187195
TH
7103 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
7104 ap->udma_mask);
7105
abf6e8ed 7106 if (!ata_port_is_dummy(ap)) {
cbcdd875
TH
7107 ata_port_printk(ap, KERN_INFO,
7108 "%cATA max %s %s\n",
a16abc0b 7109 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
f3187195 7110 ata_mode_string(xfer_mask),
cbcdd875 7111 ap->link.eh_info.desc);
abf6e8ed
TH
7112 ata_ehi_clear_desc(&ap->link.eh_info);
7113 } else
f3187195
TH
7114 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
7115 }
7116
7117 /* perform each probe synchronously */
7118 DPRINTK("probe begin\n");
7119 for (i = 0; i < host->n_ports; i++) {
7120 struct ata_port *ap = host->ports[i];
7121 int rc;
7122
7123 /* probe */
7124 if (ap->ops->error_handler) {
9af5c9c9 7125 struct ata_eh_info *ehi = &ap->link.eh_info;
f3187195
TH
7126 unsigned long flags;
7127
7128 ata_port_probe(ap);
7129
7130 /* kick EH for boot probing */
7131 spin_lock_irqsave(ap->lock, flags);
7132
f58229f8
TH
7133 ehi->probe_mask =
7134 (1 << ata_link_max_devices(&ap->link)) - 1;
f3187195
TH
7135 ehi->action |= ATA_EH_SOFTRESET;
7136 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
7137
f4d6d004 7138 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
f3187195
TH
7139 ap->pflags |= ATA_PFLAG_LOADING;
7140 ata_port_schedule_eh(ap);
7141
7142 spin_unlock_irqrestore(ap->lock, flags);
7143
7144 /* wait for EH to finish */
7145 ata_port_wait_eh(ap);
7146 } else {
7147 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
7148 rc = ata_bus_probe(ap);
7149 DPRINTK("ata%u: bus probe end\n", ap->print_id);
7150
7151 if (rc) {
7152 /* FIXME: do something useful here?
7153 * Current libata behavior will
7154 * tear down everything when
7155 * the module is removed
7156 * or the h/w is unplugged.
7157 */
7158 }
7159 }
7160 }
7161
7162 /* probes are done, now scan each port's disk(s) */
7163 DPRINTK("host probe begin\n");
7164 for (i = 0; i < host->n_ports; i++) {
7165 struct ata_port *ap = host->ports[i];
7166
1ae46317 7167 ata_scsi_scan_host(ap, 1);
ca77329f 7168 ata_lpm_schedule(ap, ap->pm_policy);
f3187195
TH
7169 }
7170
7171 return 0;
7172}
7173
f5cda257
TH
7174/**
7175 * ata_host_activate - start host, request IRQ and register it
7176 * @host: target ATA host
7177 * @irq: IRQ to request
7178 * @irq_handler: irq_handler used when requesting IRQ
7179 * @irq_flags: irq_flags used when requesting IRQ
7180 * @sht: scsi_host_template to use when registering the host
7181 *
7182 * After allocating an ATA host and initializing it, most libata
7183 * LLDs perform three steps to activate the host - start host,
7184 * request IRQ and register it. This helper takes necessasry
7185 * arguments and performs the three steps in one go.
7186 *
3d46b2e2
PM
7187 * An invalid IRQ skips the IRQ registration and expects the host to
7188 * have set polling mode on the port. In this case, @irq_handler
7189 * should be NULL.
7190 *
f5cda257
TH
7191 * LOCKING:
7192 * Inherited from calling layer (may sleep).
7193 *
7194 * RETURNS:
7195 * 0 on success, -errno otherwise.
7196 */
7197int ata_host_activate(struct ata_host *host, int irq,
7198 irq_handler_t irq_handler, unsigned long irq_flags,
7199 struct scsi_host_template *sht)
7200{
cbcdd875 7201 int i, rc;
f5cda257
TH
7202
7203 rc = ata_host_start(host);
7204 if (rc)
7205 return rc;
7206
3d46b2e2
PM
7207 /* Special case for polling mode */
7208 if (!irq) {
7209 WARN_ON(irq_handler);
7210 return ata_host_register(host, sht);
7211 }
7212
f5cda257
TH
7213 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
7214 dev_driver_string(host->dev), host);
7215 if (rc)
7216 return rc;
7217
cbcdd875
TH
7218 for (i = 0; i < host->n_ports; i++)
7219 ata_port_desc(host->ports[i], "irq %d", irq);
4031826b 7220
f5cda257
TH
7221 rc = ata_host_register(host, sht);
7222 /* if failed, just free the IRQ and leave ports alone */
7223 if (rc)
7224 devm_free_irq(host->dev, irq, host);
7225
7226 return rc;
7227}
7228
720ba126
TH
7229/**
7230 * ata_port_detach - Detach ATA port in prepration of device removal
7231 * @ap: ATA port to be detached
7232 *
7233 * Detach all ATA devices and the associated SCSI devices of @ap;
7234 * then, remove the associated SCSI host. @ap is guaranteed to
7235 * be quiescent on return from this function.
7236 *
7237 * LOCKING:
7238 * Kernel thread context (may sleep).
7239 */
741b7763 7240static void ata_port_detach(struct ata_port *ap)
720ba126
TH
7241{
7242 unsigned long flags;
41bda9c9 7243 struct ata_link *link;
f58229f8 7244 struct ata_device *dev;
720ba126
TH
7245
7246 if (!ap->ops->error_handler)
c3cf30a9 7247 goto skip_eh;
720ba126
TH
7248
7249 /* tell EH we're leaving & flush EH */
ba6a1308 7250 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 7251 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 7252 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
7253
7254 ata_port_wait_eh(ap);
7255
7256 /* EH is now guaranteed to see UNLOADING, so no new device
7257 * will be attached. Disable all existing devices.
7258 */
ba6a1308 7259 spin_lock_irqsave(ap->lock, flags);
720ba126 7260
41bda9c9
TH
7261 ata_port_for_each_link(link, ap) {
7262 ata_link_for_each_dev(dev, link)
7263 ata_dev_disable(dev);
7264 }
720ba126 7265
ba6a1308 7266 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
7267
7268 /* Final freeze & EH. All in-flight commands are aborted. EH
7269 * will be skipped and retrials will be terminated with bad
7270 * target.
7271 */
ba6a1308 7272 spin_lock_irqsave(ap->lock, flags);
720ba126 7273 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 7274 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
7275
7276 ata_port_wait_eh(ap);
45a66c1c 7277 cancel_rearming_delayed_work(&ap->hotplug_task);
720ba126 7278
c3cf30a9 7279 skip_eh:
720ba126 7280 /* remove the associated SCSI host */
cca3974e 7281 scsi_remove_host(ap->scsi_host);
720ba126
TH
7282}
7283
0529c159
TH
7284/**
7285 * ata_host_detach - Detach all ports of an ATA host
7286 * @host: Host to detach
7287 *
7288 * Detach all ports of @host.
7289 *
7290 * LOCKING:
7291 * Kernel thread context (may sleep).
7292 */
7293void ata_host_detach(struct ata_host *host)
7294{
7295 int i;
7296
7297 for (i = 0; i < host->n_ports; i++)
7298 ata_port_detach(host->ports[i]);
7299}
7300
1da177e4
LT
7301/**
7302 * ata_std_ports - initialize ioaddr with standard port offsets.
7303 * @ioaddr: IO address structure to be initialized
0baab86b
EF
7304 *
7305 * Utility function which initializes data_addr, error_addr,
7306 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
7307 * device_addr, status_addr, and command_addr to standard offsets
7308 * relative to cmd_addr.
7309 *
7310 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 7311 */
0baab86b 7312
1da177e4
LT
7313void ata_std_ports(struct ata_ioports *ioaddr)
7314{
7315 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
7316 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
7317 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
7318 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
7319 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
7320 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
7321 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
7322 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
7323 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
7324 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
7325}
7326
0baab86b 7327
374b1873
JG
7328#ifdef CONFIG_PCI
7329
1da177e4
LT
7330/**
7331 * ata_pci_remove_one - PCI layer callback for device removal
7332 * @pdev: PCI device that was removed
7333 *
b878ca5d
TH
7334 * PCI layer indicates to libata via this hook that hot-unplug or
7335 * module unload event has occurred. Detach all ports. Resource
7336 * release is handled via devres.
1da177e4
LT
7337 *
7338 * LOCKING:
7339 * Inherited from PCI layer (may sleep).
7340 */
f0d36efd 7341void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4 7342{
2855568b 7343 struct device *dev = &pdev->dev;
cca3974e 7344 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 7345
b878ca5d 7346 ata_host_detach(host);
1da177e4
LT
7347}
7348
7349/* move to PCI subsystem */
057ace5e 7350int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
7351{
7352 unsigned long tmp = 0;
7353
7354 switch (bits->width) {
7355 case 1: {
7356 u8 tmp8 = 0;
7357 pci_read_config_byte(pdev, bits->reg, &tmp8);
7358 tmp = tmp8;
7359 break;
7360 }
7361 case 2: {
7362 u16 tmp16 = 0;
7363 pci_read_config_word(pdev, bits->reg, &tmp16);
7364 tmp = tmp16;
7365 break;
7366 }
7367 case 4: {
7368 u32 tmp32 = 0;
7369 pci_read_config_dword(pdev, bits->reg, &tmp32);
7370 tmp = tmp32;
7371 break;
7372 }
7373
7374 default:
7375 return -EINVAL;
7376 }
7377
7378 tmp &= bits->mask;
7379
7380 return (tmp == bits->val) ? 1 : 0;
7381}
9b847548 7382
6ffa01d8 7383#ifdef CONFIG_PM
3c5100c1 7384void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
7385{
7386 pci_save_state(pdev);
4c90d971 7387 pci_disable_device(pdev);
500530f6 7388
4c90d971 7389 if (mesg.event == PM_EVENT_SUSPEND)
500530f6 7390 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
7391}
7392
553c4aa6 7393int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 7394{
553c4aa6
TH
7395 int rc;
7396
9b847548
JA
7397 pci_set_power_state(pdev, PCI_D0);
7398 pci_restore_state(pdev);
553c4aa6 7399
b878ca5d 7400 rc = pcim_enable_device(pdev);
553c4aa6
TH
7401 if (rc) {
7402 dev_printk(KERN_ERR, &pdev->dev,
7403 "failed to enable device after resume (%d)\n", rc);
7404 return rc;
7405 }
7406
9b847548 7407 pci_set_master(pdev);
553c4aa6 7408 return 0;
500530f6
TH
7409}
7410
3c5100c1 7411int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 7412{
cca3974e 7413 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
7414 int rc = 0;
7415
cca3974e 7416 rc = ata_host_suspend(host, mesg);
500530f6
TH
7417 if (rc)
7418 return rc;
7419
3c5100c1 7420 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
7421
7422 return 0;
7423}
7424
7425int ata_pci_device_resume(struct pci_dev *pdev)
7426{
cca3974e 7427 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 7428 int rc;
500530f6 7429
553c4aa6
TH
7430 rc = ata_pci_device_do_resume(pdev);
7431 if (rc == 0)
7432 ata_host_resume(host);
7433 return rc;
9b847548 7434}
6ffa01d8
TH
7435#endif /* CONFIG_PM */
7436
1da177e4
LT
7437#endif /* CONFIG_PCI */
7438
7439
1da177e4
LT
7440static int __init ata_init(void)
7441{
a8601e5f 7442 ata_probe_timeout *= HZ;
1da177e4
LT
7443 ata_wq = create_workqueue("ata");
7444 if (!ata_wq)
7445 return -ENOMEM;
7446
453b07ac
TH
7447 ata_aux_wq = create_singlethread_workqueue("ata_aux");
7448 if (!ata_aux_wq) {
7449 destroy_workqueue(ata_wq);
7450 return -ENOMEM;
7451 }
7452
1da177e4
LT
7453 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
7454 return 0;
7455}
7456
7457static void __exit ata_exit(void)
7458{
7459 destroy_workqueue(ata_wq);
453b07ac 7460 destroy_workqueue(ata_aux_wq);
1da177e4
LT
7461}
7462
a4625085 7463subsys_initcall(ata_init);
1da177e4
LT
7464module_exit(ata_exit);
7465
67846b30 7466static unsigned long ratelimit_time;
34af946a 7467static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
7468
7469int ata_ratelimit(void)
7470{
7471 int rc;
7472 unsigned long flags;
7473
7474 spin_lock_irqsave(&ata_ratelimit_lock, flags);
7475
7476 if (time_after(jiffies, ratelimit_time)) {
7477 rc = 1;
7478 ratelimit_time = jiffies + (HZ/5);
7479 } else
7480 rc = 0;
7481
7482 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
7483
7484 return rc;
7485}
7486
c22daff4
TH
7487/**
7488 * ata_wait_register - wait until register value changes
7489 * @reg: IO-mapped register
7490 * @mask: Mask to apply to read register value
7491 * @val: Wait condition
7492 * @interval_msec: polling interval in milliseconds
7493 * @timeout_msec: timeout in milliseconds
7494 *
7495 * Waiting for some bits of register to change is a common
7496 * operation for ATA controllers. This function reads 32bit LE
7497 * IO-mapped register @reg and tests for the following condition.
7498 *
7499 * (*@reg & mask) != val
7500 *
7501 * If the condition is met, it returns; otherwise, the process is
7502 * repeated after @interval_msec until timeout.
7503 *
7504 * LOCKING:
7505 * Kernel thread context (may sleep)
7506 *
7507 * RETURNS:
7508 * The final register value.
7509 */
7510u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
7511 unsigned long interval_msec,
7512 unsigned long timeout_msec)
7513{
7514 unsigned long timeout;
7515 u32 tmp;
7516
7517 tmp = ioread32(reg);
7518
7519 /* Calculate timeout _after_ the first read to make sure
7520 * preceding writes reach the controller before starting to
7521 * eat away the timeout.
7522 */
7523 timeout = jiffies + (timeout_msec * HZ) / 1000;
7524
7525 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
7526 msleep(interval_msec);
7527 tmp = ioread32(reg);
7528 }
7529
7530 return tmp;
7531}
7532
dd5b06c4
TH
7533/*
7534 * Dummy port_ops
7535 */
7536static void ata_dummy_noret(struct ata_port *ap) { }
7537static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
7538static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
7539
7540static u8 ata_dummy_check_status(struct ata_port *ap)
7541{
7542 return ATA_DRDY;
7543}
7544
7545static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7546{
7547 return AC_ERR_SYSTEM;
7548}
7549
7550const struct ata_port_operations ata_dummy_port_ops = {
dd5b06c4
TH
7551 .check_status = ata_dummy_check_status,
7552 .check_altstatus = ata_dummy_check_status,
7553 .dev_select = ata_noop_dev_select,
7554 .qc_prep = ata_noop_qc_prep,
7555 .qc_issue = ata_dummy_qc_issue,
7556 .freeze = ata_dummy_noret,
7557 .thaw = ata_dummy_noret,
7558 .error_handler = ata_dummy_noret,
7559 .post_internal_cmd = ata_dummy_qc_noret,
7560 .irq_clear = ata_dummy_noret,
7561 .port_start = ata_dummy_ret0,
7562 .port_stop = ata_dummy_noret,
7563};
7564
21b0ad4f
TH
7565const struct ata_port_info ata_dummy_port_info = {
7566 .port_ops = &ata_dummy_port_ops,
7567};
7568
1da177e4
LT
7569/*
7570 * libata is essentially a library of internal helper functions for
7571 * low-level ATA host controller drivers. As such, the API/ABI is
7572 * likely to change as new drivers are added and updated.
7573 * Do not depend on ABI/API stability.
7574 */
e9c83914
TH
7575EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7576EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7577EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 7578EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
21b0ad4f 7579EXPORT_SYMBOL_GPL(ata_dummy_port_info);
1da177e4
LT
7580EXPORT_SYMBOL_GPL(ata_std_bios_param);
7581EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 7582EXPORT_SYMBOL_GPL(ata_host_init);
f3187195 7583EXPORT_SYMBOL_GPL(ata_host_alloc);
f5cda257 7584EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
ecef7253 7585EXPORT_SYMBOL_GPL(ata_host_start);
f3187195 7586EXPORT_SYMBOL_GPL(ata_host_register);
f5cda257 7587EXPORT_SYMBOL_GPL(ata_host_activate);
0529c159 7588EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4
LT
7589EXPORT_SYMBOL_GPL(ata_sg_init);
7590EXPORT_SYMBOL_GPL(ata_sg_init_one);
9a1004d0 7591EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 7592EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 7593EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 7594EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
7595EXPORT_SYMBOL_GPL(ata_tf_load);
7596EXPORT_SYMBOL_GPL(ata_tf_read);
7597EXPORT_SYMBOL_GPL(ata_noop_dev_select);
7598EXPORT_SYMBOL_GPL(ata_std_dev_select);
43727fbc 7599EXPORT_SYMBOL_GPL(sata_print_link_status);
1da177e4
LT
7600EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7601EXPORT_SYMBOL_GPL(ata_tf_from_fis);
7602EXPORT_SYMBOL_GPL(ata_check_status);
7603EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
7604EXPORT_SYMBOL_GPL(ata_exec_command);
7605EXPORT_SYMBOL_GPL(ata_port_start);
d92e74d3 7606EXPORT_SYMBOL_GPL(ata_sff_port_start);
1da177e4 7607EXPORT_SYMBOL_GPL(ata_interrupt);
04351821 7608EXPORT_SYMBOL_GPL(ata_do_set_mode);
0d5ff566
TH
7609EXPORT_SYMBOL_GPL(ata_data_xfer);
7610EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
31cc23b3 7611EXPORT_SYMBOL_GPL(ata_std_qc_defer);
1da177e4 7612EXPORT_SYMBOL_GPL(ata_qc_prep);
d26fc955 7613EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
e46834cd 7614EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
7615EXPORT_SYMBOL_GPL(ata_bmdma_setup);
7616EXPORT_SYMBOL_GPL(ata_bmdma_start);
7617EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
7618EXPORT_SYMBOL_GPL(ata_bmdma_status);
7619EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
7620EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
7621EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
7622EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
7623EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
7624EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 7625EXPORT_SYMBOL_GPL(ata_port_probe);
10305f0f 7626EXPORT_SYMBOL_GPL(ata_dev_disable);
3c567b7d 7627EXPORT_SYMBOL_GPL(sata_set_spd);
936fd732
TH
7628EXPORT_SYMBOL_GPL(sata_link_debounce);
7629EXPORT_SYMBOL_GPL(sata_link_resume);
1da177e4
LT
7630EXPORT_SYMBOL_GPL(sata_phy_reset);
7631EXPORT_SYMBOL_GPL(__sata_phy_reset);
7632EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 7633EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 7634EXPORT_SYMBOL_GPL(ata_std_softreset);
cc0680a5 7635EXPORT_SYMBOL_GPL(sata_link_hardreset);
c2bd5804
TH
7636EXPORT_SYMBOL_GPL(sata_std_hardreset);
7637EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
7638EXPORT_SYMBOL_GPL(ata_dev_classify);
7639EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 7640EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 7641EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 7642EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 7643EXPORT_SYMBOL_GPL(ata_busy_sleep);
88ff6eaf 7644EXPORT_SYMBOL_GPL(ata_wait_after_reset);
d4b2bab4 7645EXPORT_SYMBOL_GPL(ata_wait_ready);
86e45b6b 7646EXPORT_SYMBOL_GPL(ata_port_queue_task);
1da177e4
LT
7647EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
7648EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 7649EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 7650EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 7651EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4 7652EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
7653EXPORT_SYMBOL_GPL(sata_scr_valid);
7654EXPORT_SYMBOL_GPL(sata_scr_read);
7655EXPORT_SYMBOL_GPL(sata_scr_write);
7656EXPORT_SYMBOL_GPL(sata_scr_write_flush);
936fd732
TH
7657EXPORT_SYMBOL_GPL(ata_link_online);
7658EXPORT_SYMBOL_GPL(ata_link_offline);
6ffa01d8 7659#ifdef CONFIG_PM
cca3974e
JG
7660EXPORT_SYMBOL_GPL(ata_host_suspend);
7661EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 7662#endif /* CONFIG_PM */
6a62a04d
TH
7663EXPORT_SYMBOL_GPL(ata_id_string);
7664EXPORT_SYMBOL_GPL(ata_id_c_string);
10305f0f 7665EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
1da177e4
LT
7666EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7667
1bc4ccff 7668EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
452503f9
AC
7669EXPORT_SYMBOL_GPL(ata_timing_compute);
7670EXPORT_SYMBOL_GPL(ata_timing_merge);
7671
1da177e4
LT
7672#ifdef CONFIG_PCI
7673EXPORT_SYMBOL_GPL(pci_test_config_bits);
d583bc18 7674EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
1626aeb8 7675EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
d583bc18 7676EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
1da177e4
LT
7677EXPORT_SYMBOL_GPL(ata_pci_init_one);
7678EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6ffa01d8 7679#ifdef CONFIG_PM
500530f6
TH
7680EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7681EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
7682EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7683EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 7684#endif /* CONFIG_PM */
67951ade
AC
7685EXPORT_SYMBOL_GPL(ata_pci_default_filter);
7686EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 7687#endif /* CONFIG_PCI */
9b847548 7688
31f88384 7689EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch);
3af9a77a
TH
7690EXPORT_SYMBOL_GPL(sata_pmp_std_prereset);
7691EXPORT_SYMBOL_GPL(sata_pmp_std_hardreset);
7692EXPORT_SYMBOL_GPL(sata_pmp_std_postreset);
7693EXPORT_SYMBOL_GPL(sata_pmp_do_eh);
7694
b64bbc39
TH
7695EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7696EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7697EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
cbcdd875
TH
7698EXPORT_SYMBOL_GPL(ata_port_desc);
7699#ifdef CONFIG_PCI
7700EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7701#endif /* CONFIG_PCI */
ece1d636 7702EXPORT_SYMBOL_GPL(ata_eng_timeout);
7b70fc03 7703EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
dbd82616 7704EXPORT_SYMBOL_GPL(ata_link_abort);
7b70fc03 7705EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499 7706EXPORT_SYMBOL_GPL(ata_port_freeze);
7d77b247 7707EXPORT_SYMBOL_GPL(sata_async_notification);
e3180499
TH
7708EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7709EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
7710EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7711EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 7712EXPORT_SYMBOL_GPL(ata_do_eh);
83625006 7713EXPORT_SYMBOL_GPL(ata_irq_on);
a619f981 7714EXPORT_SYMBOL_GPL(ata_dev_try_classify);
be0d18df
AC
7715
7716EXPORT_SYMBOL_GPL(ata_cable_40wire);
7717EXPORT_SYMBOL_GPL(ata_cable_80wire);
7718EXPORT_SYMBOL_GPL(ata_cable_unknown);
7719EXPORT_SYMBOL_GPL(ata_cable_sata);