]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/ata/libata-core.c
Merge branch 'drm-patches' of master.kernel.org:/pub/scm/linux/kernel/git/airlied...
[net-next-2.6.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
1da177e4
LT
33 */
34
1da177e4
LT
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/mm.h>
41#include <linux/highmem.h>
42#include <linux/spinlock.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/timer.h>
46#include <linux/interrupt.h>
47#include <linux/completion.h>
48#include <linux/suspend.h>
49#include <linux/workqueue.h>
67846b30 50#include <linux/jiffies.h>
378f058c 51#include <linux/scatterlist.h>
2dcb407e 52#include <linux/io.h>
1da177e4 53#include <scsi/scsi.h>
193515d5 54#include <scsi/scsi_cmnd.h>
1da177e4
LT
55#include <scsi/scsi_host.h>
56#include <linux/libata.h>
1da177e4
LT
57#include <asm/semaphore.h>
58#include <asm/byteorder.h>
59
60#include "libata.h"
61
fda0efc5 62
d7bb4cc7 63/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
64const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
65const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
66const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 67
3373efd8
TH
68static unsigned int ata_dev_init_params(struct ata_device *dev,
69 u16 heads, u16 sectors);
70static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
218f3d30
JG
71static unsigned int ata_dev_set_feature(struct ata_device *dev,
72 u8 enable, u8 feature);
3373efd8 73static void ata_dev_xfermask(struct ata_device *dev);
75683fe7 74static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
1da177e4 75
f3187195 76unsigned int ata_print_id = 1;
1da177e4
LT
77static struct workqueue_struct *ata_wq;
78
453b07ac
TH
79struct workqueue_struct *ata_aux_wq;
80
418dc1f5 81int atapi_enabled = 1;
1623c81e
JG
82module_param(atapi_enabled, int, 0444);
83MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
84
95de719a
AL
85int atapi_dmadir = 0;
86module_param(atapi_dmadir, int, 0444);
87MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
88
baf4fdfa
ML
89int atapi_passthru16 = 1;
90module_param(atapi_passthru16, int, 0444);
91MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
92
c3c013a2
JG
93int libata_fua = 0;
94module_param_named(fua, libata_fua, int, 0444);
95MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
96
2dcb407e 97static int ata_ignore_hpa;
1e999736
AC
98module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
99MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
100
b3a70601
AC
101static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
102module_param_named(dma, libata_dma_mask, int, 0444);
103MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
104
a8601e5f
AM
105static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
106module_param(ata_probe_timeout, int, 0444);
107MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
108
6ebe9d86 109int libata_noacpi = 0;
d7d0dad6 110module_param_named(noacpi, libata_noacpi, int, 0444);
6ebe9d86 111MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
11ef697b 112
1da177e4
LT
113MODULE_AUTHOR("Jeff Garzik");
114MODULE_DESCRIPTION("Library module for ATA devices");
115MODULE_LICENSE("GPL");
116MODULE_VERSION(DRV_VERSION);
117
0baab86b 118
1da177e4
LT
119/**
120 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
121 * @tf: Taskfile to convert
1da177e4 122 * @pmp: Port multiplier port
9977126c
TH
123 * @is_cmd: This FIS is for command
124 * @fis: Buffer into which data will output
1da177e4
LT
125 *
126 * Converts a standard ATA taskfile to a Serial ATA
127 * FIS structure (Register - Host to Device).
128 *
129 * LOCKING:
130 * Inherited from caller.
131 */
9977126c 132void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
1da177e4 133{
9977126c
TH
134 fis[0] = 0x27; /* Register - Host to Device FIS */
135 fis[1] = pmp & 0xf; /* Port multiplier number*/
136 if (is_cmd)
137 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
138
1da177e4
LT
139 fis[2] = tf->command;
140 fis[3] = tf->feature;
141
142 fis[4] = tf->lbal;
143 fis[5] = tf->lbam;
144 fis[6] = tf->lbah;
145 fis[7] = tf->device;
146
147 fis[8] = tf->hob_lbal;
148 fis[9] = tf->hob_lbam;
149 fis[10] = tf->hob_lbah;
150 fis[11] = tf->hob_feature;
151
152 fis[12] = tf->nsect;
153 fis[13] = tf->hob_nsect;
154 fis[14] = 0;
155 fis[15] = tf->ctl;
156
157 fis[16] = 0;
158 fis[17] = 0;
159 fis[18] = 0;
160 fis[19] = 0;
161}
162
163/**
164 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
165 * @fis: Buffer from which data will be input
166 * @tf: Taskfile to output
167 *
e12a1be6 168 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
169 *
170 * LOCKING:
171 * Inherited from caller.
172 */
173
057ace5e 174void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
175{
176 tf->command = fis[2]; /* status */
177 tf->feature = fis[3]; /* error */
178
179 tf->lbal = fis[4];
180 tf->lbam = fis[5];
181 tf->lbah = fis[6];
182 tf->device = fis[7];
183
184 tf->hob_lbal = fis[8];
185 tf->hob_lbam = fis[9];
186 tf->hob_lbah = fis[10];
187
188 tf->nsect = fis[12];
189 tf->hob_nsect = fis[13];
190}
191
8cbd6df1
AL
192static const u8 ata_rw_cmds[] = {
193 /* pio multi */
194 ATA_CMD_READ_MULTI,
195 ATA_CMD_WRITE_MULTI,
196 ATA_CMD_READ_MULTI_EXT,
197 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
198 0,
199 0,
200 0,
201 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
202 /* pio */
203 ATA_CMD_PIO_READ,
204 ATA_CMD_PIO_WRITE,
205 ATA_CMD_PIO_READ_EXT,
206 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
207 0,
208 0,
209 0,
210 0,
8cbd6df1
AL
211 /* dma */
212 ATA_CMD_READ,
213 ATA_CMD_WRITE,
214 ATA_CMD_READ_EXT,
9a3dccc4
TH
215 ATA_CMD_WRITE_EXT,
216 0,
217 0,
218 0,
219 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 220};
1da177e4
LT
221
222/**
8cbd6df1 223 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
224 * @tf: command to examine and configure
225 * @dev: device tf belongs to
1da177e4 226 *
2e9edbf8 227 * Examine the device configuration and tf->flags to calculate
8cbd6df1 228 * the proper read/write commands and protocol to use.
1da177e4
LT
229 *
230 * LOCKING:
231 * caller.
232 */
bd056d7e 233static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 234{
9a3dccc4 235 u8 cmd;
1da177e4 236
9a3dccc4 237 int index, fua, lba48, write;
2e9edbf8 238
9a3dccc4 239 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
240 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
241 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 242
8cbd6df1
AL
243 if (dev->flags & ATA_DFLAG_PIO) {
244 tf->protocol = ATA_PROT_PIO;
9a3dccc4 245 index = dev->multi_count ? 0 : 8;
9af5c9c9 246 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
247 /* Unable to use DMA due to host limitation */
248 tf->protocol = ATA_PROT_PIO;
0565c26d 249 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
250 } else {
251 tf->protocol = ATA_PROT_DMA;
9a3dccc4 252 index = 16;
8cbd6df1 253 }
1da177e4 254
9a3dccc4
TH
255 cmd = ata_rw_cmds[index + fua + lba48 + write];
256 if (cmd) {
257 tf->command = cmd;
258 return 0;
259 }
260 return -1;
1da177e4
LT
261}
262
35b649fe
TH
263/**
264 * ata_tf_read_block - Read block address from ATA taskfile
265 * @tf: ATA taskfile of interest
266 * @dev: ATA device @tf belongs to
267 *
268 * LOCKING:
269 * None.
270 *
271 * Read block address from @tf. This function can handle all
272 * three address formats - LBA, LBA48 and CHS. tf->protocol and
273 * flags select the address format to use.
274 *
275 * RETURNS:
276 * Block address read from @tf.
277 */
278u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
279{
280 u64 block = 0;
281
282 if (tf->flags & ATA_TFLAG_LBA) {
283 if (tf->flags & ATA_TFLAG_LBA48) {
284 block |= (u64)tf->hob_lbah << 40;
285 block |= (u64)tf->hob_lbam << 32;
286 block |= tf->hob_lbal << 24;
287 } else
288 block |= (tf->device & 0xf) << 24;
289
290 block |= tf->lbah << 16;
291 block |= tf->lbam << 8;
292 block |= tf->lbal;
293 } else {
294 u32 cyl, head, sect;
295
296 cyl = tf->lbam | (tf->lbah << 8);
297 head = tf->device & 0xf;
298 sect = tf->lbal;
299
300 block = (cyl * dev->heads + head) * dev->sectors + sect;
301 }
302
303 return block;
304}
305
bd056d7e
TH
306/**
307 * ata_build_rw_tf - Build ATA taskfile for given read/write request
308 * @tf: Target ATA taskfile
309 * @dev: ATA device @tf belongs to
310 * @block: Block address
311 * @n_block: Number of blocks
312 * @tf_flags: RW/FUA etc...
313 * @tag: tag
314 *
315 * LOCKING:
316 * None.
317 *
318 * Build ATA taskfile @tf for read/write request described by
319 * @block, @n_block, @tf_flags and @tag on @dev.
320 *
321 * RETURNS:
322 *
323 * 0 on success, -ERANGE if the request is too large for @dev,
324 * -EINVAL if the request is invalid.
325 */
326int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
327 u64 block, u32 n_block, unsigned int tf_flags,
328 unsigned int tag)
329{
330 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
331 tf->flags |= tf_flags;
332
6d1245bf 333 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
334 /* yay, NCQ */
335 if (!lba_48_ok(block, n_block))
336 return -ERANGE;
337
338 tf->protocol = ATA_PROT_NCQ;
339 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
340
341 if (tf->flags & ATA_TFLAG_WRITE)
342 tf->command = ATA_CMD_FPDMA_WRITE;
343 else
344 tf->command = ATA_CMD_FPDMA_READ;
345
346 tf->nsect = tag << 3;
347 tf->hob_feature = (n_block >> 8) & 0xff;
348 tf->feature = n_block & 0xff;
349
350 tf->hob_lbah = (block >> 40) & 0xff;
351 tf->hob_lbam = (block >> 32) & 0xff;
352 tf->hob_lbal = (block >> 24) & 0xff;
353 tf->lbah = (block >> 16) & 0xff;
354 tf->lbam = (block >> 8) & 0xff;
355 tf->lbal = block & 0xff;
356
357 tf->device = 1 << 6;
358 if (tf->flags & ATA_TFLAG_FUA)
359 tf->device |= 1 << 7;
360 } else if (dev->flags & ATA_DFLAG_LBA) {
361 tf->flags |= ATA_TFLAG_LBA;
362
363 if (lba_28_ok(block, n_block)) {
364 /* use LBA28 */
365 tf->device |= (block >> 24) & 0xf;
366 } else if (lba_48_ok(block, n_block)) {
367 if (!(dev->flags & ATA_DFLAG_LBA48))
368 return -ERANGE;
369
370 /* use LBA48 */
371 tf->flags |= ATA_TFLAG_LBA48;
372
373 tf->hob_nsect = (n_block >> 8) & 0xff;
374
375 tf->hob_lbah = (block >> 40) & 0xff;
376 tf->hob_lbam = (block >> 32) & 0xff;
377 tf->hob_lbal = (block >> 24) & 0xff;
378 } else
379 /* request too large even for LBA48 */
380 return -ERANGE;
381
382 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
383 return -EINVAL;
384
385 tf->nsect = n_block & 0xff;
386
387 tf->lbah = (block >> 16) & 0xff;
388 tf->lbam = (block >> 8) & 0xff;
389 tf->lbal = block & 0xff;
390
391 tf->device |= ATA_LBA;
392 } else {
393 /* CHS */
394 u32 sect, head, cyl, track;
395
396 /* The request -may- be too large for CHS addressing. */
397 if (!lba_28_ok(block, n_block))
398 return -ERANGE;
399
400 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
401 return -EINVAL;
402
403 /* Convert LBA to CHS */
404 track = (u32)block / dev->sectors;
405 cyl = track / dev->heads;
406 head = track % dev->heads;
407 sect = (u32)block % dev->sectors + 1;
408
409 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
410 (u32)block, track, cyl, head, sect);
411
412 /* Check whether the converted CHS can fit.
413 Cylinder: 0-65535
414 Head: 0-15
415 Sector: 1-255*/
416 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
417 return -ERANGE;
418
419 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
420 tf->lbal = sect;
421 tf->lbam = cyl;
422 tf->lbah = cyl >> 8;
423 tf->device |= head;
424 }
425
426 return 0;
427}
428
cb95d562
TH
429/**
430 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
431 * @pio_mask: pio_mask
432 * @mwdma_mask: mwdma_mask
433 * @udma_mask: udma_mask
434 *
435 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
436 * unsigned int xfer_mask.
437 *
438 * LOCKING:
439 * None.
440 *
441 * RETURNS:
442 * Packed xfer_mask.
443 */
444static unsigned int ata_pack_xfermask(unsigned int pio_mask,
445 unsigned int mwdma_mask,
446 unsigned int udma_mask)
447{
448 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
449 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
450 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
451}
452
c0489e4e
TH
453/**
454 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
455 * @xfer_mask: xfer_mask to unpack
456 * @pio_mask: resulting pio_mask
457 * @mwdma_mask: resulting mwdma_mask
458 * @udma_mask: resulting udma_mask
459 *
460 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
461 * Any NULL distination masks will be ignored.
462 */
463static void ata_unpack_xfermask(unsigned int xfer_mask,
464 unsigned int *pio_mask,
465 unsigned int *mwdma_mask,
466 unsigned int *udma_mask)
467{
468 if (pio_mask)
469 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
470 if (mwdma_mask)
471 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
472 if (udma_mask)
473 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
474}
475
cb95d562 476static const struct ata_xfer_ent {
be9a50c8 477 int shift, bits;
cb95d562
TH
478 u8 base;
479} ata_xfer_tbl[] = {
480 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
481 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
482 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
483 { -1, },
484};
485
486/**
487 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
488 * @xfer_mask: xfer_mask of interest
489 *
490 * Return matching XFER_* value for @xfer_mask. Only the highest
491 * bit of @xfer_mask is considered.
492 *
493 * LOCKING:
494 * None.
495 *
496 * RETURNS:
497 * Matching XFER_* value, 0 if no match found.
498 */
499static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
500{
501 int highbit = fls(xfer_mask) - 1;
502 const struct ata_xfer_ent *ent;
503
504 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
505 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
506 return ent->base + highbit - ent->shift;
507 return 0;
508}
509
510/**
511 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
512 * @xfer_mode: XFER_* of interest
513 *
514 * Return matching xfer_mask for @xfer_mode.
515 *
516 * LOCKING:
517 * None.
518 *
519 * RETURNS:
520 * Matching xfer_mask, 0 if no match found.
521 */
522static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
523{
524 const struct ata_xfer_ent *ent;
525
526 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
527 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
528 return 1 << (ent->shift + xfer_mode - ent->base);
529 return 0;
530}
531
532/**
533 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
534 * @xfer_mode: XFER_* of interest
535 *
536 * Return matching xfer_shift for @xfer_mode.
537 *
538 * LOCKING:
539 * None.
540 *
541 * RETURNS:
542 * Matching xfer_shift, -1 if no match found.
543 */
544static int ata_xfer_mode2shift(unsigned int xfer_mode)
545{
546 const struct ata_xfer_ent *ent;
547
548 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
549 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
550 return ent->shift;
551 return -1;
552}
553
1da177e4 554/**
1da7b0d0
TH
555 * ata_mode_string - convert xfer_mask to string
556 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
557 *
558 * Determine string which represents the highest speed
1da7b0d0 559 * (highest bit in @modemask).
1da177e4
LT
560 *
561 * LOCKING:
562 * None.
563 *
564 * RETURNS:
565 * Constant C string representing highest speed listed in
1da7b0d0 566 * @mode_mask, or the constant C string "<n/a>".
1da177e4 567 */
1da7b0d0 568static const char *ata_mode_string(unsigned int xfer_mask)
1da177e4 569{
75f554bc
TH
570 static const char * const xfer_mode_str[] = {
571 "PIO0",
572 "PIO1",
573 "PIO2",
574 "PIO3",
575 "PIO4",
b352e57d
AC
576 "PIO5",
577 "PIO6",
75f554bc
TH
578 "MWDMA0",
579 "MWDMA1",
580 "MWDMA2",
b352e57d
AC
581 "MWDMA3",
582 "MWDMA4",
75f554bc
TH
583 "UDMA/16",
584 "UDMA/25",
585 "UDMA/33",
586 "UDMA/44",
587 "UDMA/66",
588 "UDMA/100",
589 "UDMA/133",
590 "UDMA7",
591 };
1da7b0d0 592 int highbit;
1da177e4 593
1da7b0d0
TH
594 highbit = fls(xfer_mask) - 1;
595 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
596 return xfer_mode_str[highbit];
1da177e4 597 return "<n/a>";
1da177e4
LT
598}
599
4c360c81
TH
600static const char *sata_spd_string(unsigned int spd)
601{
602 static const char * const spd_str[] = {
603 "1.5 Gbps",
604 "3.0 Gbps",
605 };
606
607 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
608 return "<unknown>";
609 return spd_str[spd - 1];
610}
611
3373efd8 612void ata_dev_disable(struct ata_device *dev)
0b8efb0a 613{
09d7f9b0 614 if (ata_dev_enabled(dev)) {
9af5c9c9 615 if (ata_msg_drv(dev->link->ap))
09d7f9b0 616 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
4ae72a1e
TH
617 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
618 ATA_DNXFER_QUIET);
0b8efb0a
TH
619 dev->class++;
620 }
621}
622
ca77329f
KCA
623static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
624{
625 struct ata_link *link = dev->link;
626 struct ata_port *ap = link->ap;
627 u32 scontrol;
628 unsigned int err_mask;
629 int rc;
630
631 /*
632 * disallow DIPM for drivers which haven't set
633 * ATA_FLAG_IPM. This is because when DIPM is enabled,
634 * phy ready will be set in the interrupt status on
635 * state changes, which will cause some drivers to
636 * think there are errors - additionally drivers will
637 * need to disable hot plug.
638 */
639 if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
640 ap->pm_policy = NOT_AVAILABLE;
641 return -EINVAL;
642 }
643
644 /*
645 * For DIPM, we will only enable it for the
646 * min_power setting.
647 *
648 * Why? Because Disks are too stupid to know that
649 * If the host rejects a request to go to SLUMBER
650 * they should retry at PARTIAL, and instead it
651 * just would give up. So, for medium_power to
652 * work at all, we need to only allow HIPM.
653 */
654 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
655 if (rc)
656 return rc;
657
658 switch (policy) {
659 case MIN_POWER:
660 /* no restrictions on IPM transitions */
661 scontrol &= ~(0x3 << 8);
662 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
663 if (rc)
664 return rc;
665
666 /* enable DIPM */
667 if (dev->flags & ATA_DFLAG_DIPM)
668 err_mask = ata_dev_set_feature(dev,
669 SETFEATURES_SATA_ENABLE, SATA_DIPM);
670 break;
671 case MEDIUM_POWER:
672 /* allow IPM to PARTIAL */
673 scontrol &= ~(0x1 << 8);
674 scontrol |= (0x2 << 8);
675 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
676 if (rc)
677 return rc;
678
679 /* disable DIPM */
680 if (ata_dev_enabled(dev) && (dev->flags & ATA_DFLAG_DIPM))
681 err_mask = ata_dev_set_feature(dev,
682 SETFEATURES_SATA_DISABLE, SATA_DIPM);
683 break;
684 case NOT_AVAILABLE:
685 case MAX_PERFORMANCE:
686 /* disable all IPM transitions */
687 scontrol |= (0x3 << 8);
688 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
689 if (rc)
690 return rc;
691
692 /* disable DIPM */
693 if (ata_dev_enabled(dev) && (dev->flags & ATA_DFLAG_DIPM))
694 err_mask = ata_dev_set_feature(dev,
695 SETFEATURES_SATA_DISABLE, SATA_DIPM);
696 break;
697 }
698
699 /* FIXME: handle SET FEATURES failure */
700 (void) err_mask;
701
702 return 0;
703}
704
705/**
706 * ata_dev_enable_pm - enable SATA interface power management
48166fd9
SH
707 * @dev: device to enable power management
708 * @policy: the link power management policy
ca77329f
KCA
709 *
710 * Enable SATA Interface power management. This will enable
711 * Device Interface Power Management (DIPM) for min_power
712 * policy, and then call driver specific callbacks for
713 * enabling Host Initiated Power management.
714 *
715 * Locking: Caller.
716 * Returns: -EINVAL if IPM is not supported, 0 otherwise.
717 */
718void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
719{
720 int rc = 0;
721 struct ata_port *ap = dev->link->ap;
722
723 /* set HIPM first, then DIPM */
724 if (ap->ops->enable_pm)
725 rc = ap->ops->enable_pm(ap, policy);
726 if (rc)
727 goto enable_pm_out;
728 rc = ata_dev_set_dipm(dev, policy);
729
730enable_pm_out:
731 if (rc)
732 ap->pm_policy = MAX_PERFORMANCE;
733 else
734 ap->pm_policy = policy;
735 return /* rc */; /* hopefully we can use 'rc' eventually */
736}
737
1992a5ed 738#ifdef CONFIG_PM
ca77329f
KCA
739/**
740 * ata_dev_disable_pm - disable SATA interface power management
48166fd9 741 * @dev: device to disable power management
ca77329f
KCA
742 *
743 * Disable SATA Interface power management. This will disable
744 * Device Interface Power Management (DIPM) without changing
745 * policy, call driver specific callbacks for disabling Host
746 * Initiated Power management.
747 *
748 * Locking: Caller.
749 * Returns: void
750 */
751static void ata_dev_disable_pm(struct ata_device *dev)
752{
753 struct ata_port *ap = dev->link->ap;
754
755 ata_dev_set_dipm(dev, MAX_PERFORMANCE);
756 if (ap->ops->disable_pm)
757 ap->ops->disable_pm(ap);
758}
1992a5ed 759#endif /* CONFIG_PM */
ca77329f
KCA
760
761void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
762{
763 ap->pm_policy = policy;
764 ap->link.eh_info.action |= ATA_EHI_LPM;
765 ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
766 ata_port_schedule_eh(ap);
767}
768
1992a5ed 769#ifdef CONFIG_PM
ca77329f
KCA
770static void ata_lpm_enable(struct ata_host *host)
771{
772 struct ata_link *link;
773 struct ata_port *ap;
774 struct ata_device *dev;
775 int i;
776
777 for (i = 0; i < host->n_ports; i++) {
778 ap = host->ports[i];
779 ata_port_for_each_link(link, ap) {
780 ata_link_for_each_dev(dev, link)
781 ata_dev_disable_pm(dev);
782 }
783 }
784}
785
786static void ata_lpm_disable(struct ata_host *host)
787{
788 int i;
789
790 for (i = 0; i < host->n_ports; i++) {
791 struct ata_port *ap = host->ports[i];
792 ata_lpm_schedule(ap, ap->pm_policy);
793 }
794}
1992a5ed 795#endif /* CONFIG_PM */
ca77329f
KCA
796
797
1da177e4 798/**
0d5ff566 799 * ata_devchk - PATA device presence detection
1da177e4
LT
800 * @ap: ATA channel to examine
801 * @device: Device to examine (starting at zero)
802 *
803 * This technique was originally described in
804 * Hale Landis's ATADRVR (www.ata-atapi.com), and
805 * later found its way into the ATA/ATAPI spec.
806 *
807 * Write a pattern to the ATA shadow registers,
808 * and if a device is present, it will respond by
809 * correctly storing and echoing back the
810 * ATA shadow register contents.
811 *
812 * LOCKING:
813 * caller.
814 */
815
0d5ff566 816static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1da177e4
LT
817{
818 struct ata_ioports *ioaddr = &ap->ioaddr;
819 u8 nsect, lbal;
820
821 ap->ops->dev_select(ap, device);
822
0d5ff566
TH
823 iowrite8(0x55, ioaddr->nsect_addr);
824 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 825
0d5ff566
TH
826 iowrite8(0xaa, ioaddr->nsect_addr);
827 iowrite8(0x55, ioaddr->lbal_addr);
1da177e4 828
0d5ff566
TH
829 iowrite8(0x55, ioaddr->nsect_addr);
830 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 831
0d5ff566
TH
832 nsect = ioread8(ioaddr->nsect_addr);
833 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
834
835 if ((nsect == 0x55) && (lbal == 0xaa))
836 return 1; /* we found a device */
837
838 return 0; /* nothing found */
839}
840
1da177e4
LT
841/**
842 * ata_dev_classify - determine device type based on ATA-spec signature
843 * @tf: ATA taskfile register set for device to be identified
844 *
845 * Determine from taskfile register contents whether a device is
846 * ATA or ATAPI, as per "Signature and persistence" section
847 * of ATA/PI spec (volume 1, sect 5.14).
848 *
849 * LOCKING:
850 * None.
851 *
852 * RETURNS:
633273a3
TH
853 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
854 * %ATA_DEV_UNKNOWN the event of failure.
1da177e4 855 */
057ace5e 856unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
857{
858 /* Apple's open source Darwin code hints that some devices only
859 * put a proper signature into the LBA mid/high registers,
860 * So, we only check those. It's sufficient for uniqueness.
633273a3
TH
861 *
862 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
863 * signatures for ATA and ATAPI devices attached on SerialATA,
864 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
865 * spec has never mentioned about using different signatures
866 * for ATA/ATAPI devices. Then, Serial ATA II: Port
867 * Multiplier specification began to use 0x69/0x96 to identify
868 * port multpliers and 0x3c/0xc3 to identify SEMB device.
869 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
870 * 0x69/0x96 shortly and described them as reserved for
871 * SerialATA.
872 *
873 * We follow the current spec and consider that 0x69/0x96
874 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1da177e4 875 */
633273a3 876 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1da177e4
LT
877 DPRINTK("found ATA device by sig\n");
878 return ATA_DEV_ATA;
879 }
880
633273a3 881 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1da177e4
LT
882 DPRINTK("found ATAPI device by sig\n");
883 return ATA_DEV_ATAPI;
884 }
885
633273a3
TH
886 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
887 DPRINTK("found PMP device by sig\n");
888 return ATA_DEV_PMP;
889 }
890
891 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
2dcb407e 892 printk(KERN_INFO "ata: SEMB device ignored\n");
633273a3
TH
893 return ATA_DEV_SEMB_UNSUP; /* not yet */
894 }
895
1da177e4
LT
896 DPRINTK("unknown device\n");
897 return ATA_DEV_UNKNOWN;
898}
899
900/**
901 * ata_dev_try_classify - Parse returned ATA device signature
3f19859e
TH
902 * @dev: ATA device to classify (starting at zero)
903 * @present: device seems present
b4dc7623 904 * @r_err: Value of error register on completion
1da177e4
LT
905 *
906 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
907 * an ATA/ATAPI-defined set of values is placed in the ATA
908 * shadow registers, indicating the results of device detection
909 * and diagnostics.
910 *
911 * Select the ATA device, and read the values from the ATA shadow
912 * registers. Then parse according to the Error register value,
913 * and the spec-defined values examined by ata_dev_classify().
914 *
915 * LOCKING:
916 * caller.
b4dc7623
TH
917 *
918 * RETURNS:
919 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4 920 */
3f19859e
TH
921unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
922 u8 *r_err)
1da177e4 923{
3f19859e 924 struct ata_port *ap = dev->link->ap;
1da177e4
LT
925 struct ata_taskfile tf;
926 unsigned int class;
927 u8 err;
928
3f19859e 929 ap->ops->dev_select(ap, dev->devno);
1da177e4
LT
930
931 memset(&tf, 0, sizeof(tf));
932
1da177e4 933 ap->ops->tf_read(ap, &tf);
0169e284 934 err = tf.feature;
b4dc7623
TH
935 if (r_err)
936 *r_err = err;
1da177e4 937
93590859 938 /* see if device passed diags: if master then continue and warn later */
3f19859e 939 if (err == 0 && dev->devno == 0)
93590859 940 /* diagnostic fail : do nothing _YET_ */
3f19859e 941 dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
93590859 942 else if (err == 1)
1da177e4 943 /* do nothing */ ;
3f19859e 944 else if ((dev->devno == 0) && (err == 0x81))
1da177e4
LT
945 /* do nothing */ ;
946 else
b4dc7623 947 return ATA_DEV_NONE;
1da177e4 948
b4dc7623 949 /* determine if device is ATA or ATAPI */
1da177e4 950 class = ata_dev_classify(&tf);
b4dc7623 951
d7fbee05
TH
952 if (class == ATA_DEV_UNKNOWN) {
953 /* If the device failed diagnostic, it's likely to
954 * have reported incorrect device signature too.
955 * Assume ATA device if the device seems present but
956 * device signature is invalid with diagnostic
957 * failure.
958 */
959 if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
960 class = ATA_DEV_ATA;
961 else
962 class = ATA_DEV_NONE;
963 } else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
964 class = ATA_DEV_NONE;
965
b4dc7623 966 return class;
1da177e4
LT
967}
968
969/**
6a62a04d 970 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
971 * @id: IDENTIFY DEVICE results we will examine
972 * @s: string into which data is output
973 * @ofs: offset into identify device page
974 * @len: length of string to return. must be an even number.
975 *
976 * The strings in the IDENTIFY DEVICE page are broken up into
977 * 16-bit chunks. Run through the string, and output each
978 * 8-bit chunk linearly, regardless of platform.
979 *
980 * LOCKING:
981 * caller.
982 */
983
6a62a04d
TH
984void ata_id_string(const u16 *id, unsigned char *s,
985 unsigned int ofs, unsigned int len)
1da177e4
LT
986{
987 unsigned int c;
988
989 while (len > 0) {
990 c = id[ofs] >> 8;
991 *s = c;
992 s++;
993
994 c = id[ofs] & 0xff;
995 *s = c;
996 s++;
997
998 ofs++;
999 len -= 2;
1000 }
1001}
1002
0e949ff3 1003/**
6a62a04d 1004 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
1005 * @id: IDENTIFY DEVICE results we will examine
1006 * @s: string into which data is output
1007 * @ofs: offset into identify device page
1008 * @len: length of string to return. must be an odd number.
1009 *
6a62a04d 1010 * This function is identical to ata_id_string except that it
0e949ff3
TH
1011 * trims trailing spaces and terminates the resulting string with
1012 * null. @len must be actual maximum length (even number) + 1.
1013 *
1014 * LOCKING:
1015 * caller.
1016 */
6a62a04d
TH
1017void ata_id_c_string(const u16 *id, unsigned char *s,
1018 unsigned int ofs, unsigned int len)
0e949ff3
TH
1019{
1020 unsigned char *p;
1021
1022 WARN_ON(!(len & 1));
1023
6a62a04d 1024 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
1025
1026 p = s + strnlen(s, len - 1);
1027 while (p > s && p[-1] == ' ')
1028 p--;
1029 *p = '\0';
1030}
0baab86b 1031
db6f8759
TH
1032static u64 ata_id_n_sectors(const u16 *id)
1033{
1034 if (ata_id_has_lba(id)) {
1035 if (ata_id_has_lba48(id))
1036 return ata_id_u64(id, 100);
1037 else
1038 return ata_id_u32(id, 60);
1039 } else {
1040 if (ata_id_current_chs_valid(id))
1041 return ata_id_u32(id, 57);
1042 else
1043 return id[1] * id[3] * id[6];
1044 }
1045}
1046
1e999736
AC
1047static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
1048{
1049 u64 sectors = 0;
1050
1051 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1052 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1053 sectors |= (tf->hob_lbal & 0xff) << 24;
1054 sectors |= (tf->lbah & 0xff) << 16;
1055 sectors |= (tf->lbam & 0xff) << 8;
1056 sectors |= (tf->lbal & 0xff);
1057
1058 return ++sectors;
1059}
1060
1061static u64 ata_tf_to_lba(struct ata_taskfile *tf)
1062{
1063 u64 sectors = 0;
1064
1065 sectors |= (tf->device & 0x0f) << 24;
1066 sectors |= (tf->lbah & 0xff) << 16;
1067 sectors |= (tf->lbam & 0xff) << 8;
1068 sectors |= (tf->lbal & 0xff);
1069
1070 return ++sectors;
1071}
1072
1073/**
c728a914
TH
1074 * ata_read_native_max_address - Read native max address
1075 * @dev: target device
1076 * @max_sectors: out parameter for the result native max address
1e999736 1077 *
c728a914
TH
1078 * Perform an LBA48 or LBA28 native size query upon the device in
1079 * question.
1e999736 1080 *
c728a914
TH
1081 * RETURNS:
1082 * 0 on success, -EACCES if command is aborted by the drive.
1083 * -EIO on other errors.
1e999736 1084 */
c728a914 1085static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1e999736 1086{
c728a914 1087 unsigned int err_mask;
1e999736 1088 struct ata_taskfile tf;
c728a914 1089 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1090
1091 ata_tf_init(dev, &tf);
1092
c728a914 1093 /* always clear all address registers */
1e999736 1094 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1e999736 1095
c728a914
TH
1096 if (lba48) {
1097 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1098 tf.flags |= ATA_TFLAG_LBA48;
1099 } else
1100 tf.command = ATA_CMD_READ_NATIVE_MAX;
1e999736 1101
1e999736 1102 tf.protocol |= ATA_PROT_NODATA;
c728a914
TH
1103 tf.device |= ATA_LBA;
1104
2b789108 1105 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914
TH
1106 if (err_mask) {
1107 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1108 "max address (err_mask=0x%x)\n", err_mask);
1109 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1110 return -EACCES;
1111 return -EIO;
1112 }
1e999736 1113
c728a914
TH
1114 if (lba48)
1115 *max_sectors = ata_tf_to_lba48(&tf);
1116 else
1117 *max_sectors = ata_tf_to_lba(&tf);
2dcb407e 1118 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
93328e11 1119 (*max_sectors)--;
c728a914 1120 return 0;
1e999736
AC
1121}
1122
1123/**
c728a914
TH
1124 * ata_set_max_sectors - Set max sectors
1125 * @dev: target device
6b38d1d1 1126 * @new_sectors: new max sectors value to set for the device
1e999736 1127 *
c728a914
TH
1128 * Set max sectors of @dev to @new_sectors.
1129 *
1130 * RETURNS:
1131 * 0 on success, -EACCES if command is aborted or denied (due to
1132 * previous non-volatile SET_MAX) by the drive. -EIO on other
1133 * errors.
1e999736 1134 */
05027adc 1135static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1e999736 1136{
c728a914 1137 unsigned int err_mask;
1e999736 1138 struct ata_taskfile tf;
c728a914 1139 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1140
1141 new_sectors--;
1142
1143 ata_tf_init(dev, &tf);
1144
1e999736 1145 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
c728a914
TH
1146
1147 if (lba48) {
1148 tf.command = ATA_CMD_SET_MAX_EXT;
1149 tf.flags |= ATA_TFLAG_LBA48;
1150
1151 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1152 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1153 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1e582ba4 1154 } else {
c728a914
TH
1155 tf.command = ATA_CMD_SET_MAX;
1156
1e582ba4
TH
1157 tf.device |= (new_sectors >> 24) & 0xf;
1158 }
1159
1e999736 1160 tf.protocol |= ATA_PROT_NODATA;
c728a914 1161 tf.device |= ATA_LBA;
1e999736
AC
1162
1163 tf.lbal = (new_sectors >> 0) & 0xff;
1164 tf.lbam = (new_sectors >> 8) & 0xff;
1165 tf.lbah = (new_sectors >> 16) & 0xff;
1e999736 1166
2b789108 1167 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914
TH
1168 if (err_mask) {
1169 ata_dev_printk(dev, KERN_WARNING, "failed to set "
1170 "max address (err_mask=0x%x)\n", err_mask);
1171 if (err_mask == AC_ERR_DEV &&
1172 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1173 return -EACCES;
1174 return -EIO;
1175 }
1176
c728a914 1177 return 0;
1e999736
AC
1178}
1179
1180/**
1181 * ata_hpa_resize - Resize a device with an HPA set
1182 * @dev: Device to resize
1183 *
1184 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1185 * it if required to the full size of the media. The caller must check
1186 * the drive has the HPA feature set enabled.
05027adc
TH
1187 *
1188 * RETURNS:
1189 * 0 on success, -errno on failure.
1e999736 1190 */
05027adc 1191static int ata_hpa_resize(struct ata_device *dev)
1e999736 1192{
05027adc
TH
1193 struct ata_eh_context *ehc = &dev->link->eh_context;
1194 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1195 u64 sectors = ata_id_n_sectors(dev->id);
1196 u64 native_sectors;
c728a914 1197 int rc;
a617c09f 1198
05027adc
TH
1199 /* do we need to do it? */
1200 if (dev->class != ATA_DEV_ATA ||
1201 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1202 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
c728a914 1203 return 0;
1e999736 1204
05027adc
TH
1205 /* read native max address */
1206 rc = ata_read_native_max_address(dev, &native_sectors);
1207 if (rc) {
1208 /* If HPA isn't going to be unlocked, skip HPA
1209 * resizing from the next try.
1210 */
1211 if (!ata_ignore_hpa) {
1212 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1213 "broken, will skip HPA handling\n");
1214 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1215
1216 /* we can continue if device aborted the command */
1217 if (rc == -EACCES)
1218 rc = 0;
1e999736 1219 }
37301a55 1220
05027adc
TH
1221 return rc;
1222 }
1223
1224 /* nothing to do? */
1225 if (native_sectors <= sectors || !ata_ignore_hpa) {
1226 if (!print_info || native_sectors == sectors)
1227 return 0;
1228
1229 if (native_sectors > sectors)
1230 ata_dev_printk(dev, KERN_INFO,
1231 "HPA detected: current %llu, native %llu\n",
1232 (unsigned long long)sectors,
1233 (unsigned long long)native_sectors);
1234 else if (native_sectors < sectors)
1235 ata_dev_printk(dev, KERN_WARNING,
1236 "native sectors (%llu) is smaller than "
1237 "sectors (%llu)\n",
1238 (unsigned long long)native_sectors,
1239 (unsigned long long)sectors);
1240 return 0;
1241 }
1242
1243 /* let's unlock HPA */
1244 rc = ata_set_max_sectors(dev, native_sectors);
1245 if (rc == -EACCES) {
1246 /* if device aborted the command, skip HPA resizing */
1247 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1248 "(%llu -> %llu), skipping HPA handling\n",
1249 (unsigned long long)sectors,
1250 (unsigned long long)native_sectors);
1251 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1252 return 0;
1253 } else if (rc)
1254 return rc;
1255
1256 /* re-read IDENTIFY data */
1257 rc = ata_dev_reread_id(dev, 0);
1258 if (rc) {
1259 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1260 "data after HPA resizing\n");
1261 return rc;
1262 }
1263
1264 if (print_info) {
1265 u64 new_sectors = ata_id_n_sectors(dev->id);
1266 ata_dev_printk(dev, KERN_INFO,
1267 "HPA unlocked: %llu -> %llu, native %llu\n",
1268 (unsigned long long)sectors,
1269 (unsigned long long)new_sectors,
1270 (unsigned long long)native_sectors);
1271 }
1272
1273 return 0;
1e999736
AC
1274}
1275
10305f0f
AC
1276/**
1277 * ata_id_to_dma_mode - Identify DMA mode from id block
1278 * @dev: device to identify
cc261267 1279 * @unknown: mode to assume if we cannot tell
10305f0f
AC
1280 *
1281 * Set up the timing values for the device based upon the identify
1282 * reported values for the DMA mode. This function is used by drivers
1283 * which rely upon firmware configured modes, but wish to report the
1284 * mode correctly when possible.
1285 *
1286 * In addition we emit similarly formatted messages to the default
1287 * ata_dev_set_mode handler, in order to provide consistency of
1288 * presentation.
1289 */
1290
1291void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
1292{
1293 unsigned int mask;
1294 u8 mode;
1295
1296 /* Pack the DMA modes */
1297 mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
1298 if (dev->id[53] & 0x04)
1299 mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
1300
1301 /* Select the mode in use */
1302 mode = ata_xfer_mask2mode(mask);
1303
1304 if (mode != 0) {
1305 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
1306 ata_mode_string(mask));
1307 } else {
1308 /* SWDMA perhaps ? */
1309 mode = unknown;
1310 ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
1311 }
1312
1313 /* Configure the device reporting */
1314 dev->xfer_mode = mode;
1315 dev->xfer_shift = ata_xfer_mode2shift(mode);
1316}
1317
0baab86b
EF
1318/**
1319 * ata_noop_dev_select - Select device 0/1 on ATA bus
1320 * @ap: ATA channel to manipulate
1321 * @device: ATA device (numbered from zero) to select
1322 *
1323 * This function performs no actual function.
1324 *
1325 * May be used as the dev_select() entry in ata_port_operations.
1326 *
1327 * LOCKING:
1328 * caller.
1329 */
2dcb407e 1330void ata_noop_dev_select(struct ata_port *ap, unsigned int device)
1da177e4
LT
1331{
1332}
1333
0baab86b 1334
1da177e4
LT
1335/**
1336 * ata_std_dev_select - Select device 0/1 on ATA bus
1337 * @ap: ATA channel to manipulate
1338 * @device: ATA device (numbered from zero) to select
1339 *
1340 * Use the method defined in the ATA specification to
1341 * make either device 0, or device 1, active on the
0baab86b
EF
1342 * ATA channel. Works with both PIO and MMIO.
1343 *
1344 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
1345 *
1346 * LOCKING:
1347 * caller.
1348 */
1349
2dcb407e 1350void ata_std_dev_select(struct ata_port *ap, unsigned int device)
1da177e4
LT
1351{
1352 u8 tmp;
1353
1354 if (device == 0)
1355 tmp = ATA_DEVICE_OBS;
1356 else
1357 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1358
0d5ff566 1359 iowrite8(tmp, ap->ioaddr.device_addr);
1da177e4
LT
1360 ata_pause(ap); /* needed; also flushes, for mmio */
1361}
1362
1363/**
1364 * ata_dev_select - Select device 0/1 on ATA bus
1365 * @ap: ATA channel to manipulate
1366 * @device: ATA device (numbered from zero) to select
1367 * @wait: non-zero to wait for Status register BSY bit to clear
1368 * @can_sleep: non-zero if context allows sleeping
1369 *
1370 * Use the method defined in the ATA specification to
1371 * make either device 0, or device 1, active on the
1372 * ATA channel.
1373 *
1374 * This is a high-level version of ata_std_dev_select(),
1375 * which additionally provides the services of inserting
1376 * the proper pauses and status polling, where needed.
1377 *
1378 * LOCKING:
1379 * caller.
1380 */
1381
1382void ata_dev_select(struct ata_port *ap, unsigned int device,
1383 unsigned int wait, unsigned int can_sleep)
1384{
88574551 1385 if (ata_msg_probe(ap))
44877b4e
TH
1386 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
1387 "device %u, wait %u\n", device, wait);
1da177e4
LT
1388
1389 if (wait)
1390 ata_wait_idle(ap);
1391
1392 ap->ops->dev_select(ap, device);
1393
1394 if (wait) {
9af5c9c9 1395 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
1da177e4
LT
1396 msleep(150);
1397 ata_wait_idle(ap);
1398 }
1399}
1400
1401/**
1402 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 1403 * @id: IDENTIFY DEVICE page to dump
1da177e4 1404 *
0bd3300a
TH
1405 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1406 * page.
1da177e4
LT
1407 *
1408 * LOCKING:
1409 * caller.
1410 */
1411
0bd3300a 1412static inline void ata_dump_id(const u16 *id)
1da177e4
LT
1413{
1414 DPRINTK("49==0x%04x "
1415 "53==0x%04x "
1416 "63==0x%04x "
1417 "64==0x%04x "
1418 "75==0x%04x \n",
0bd3300a
TH
1419 id[49],
1420 id[53],
1421 id[63],
1422 id[64],
1423 id[75]);
1da177e4
LT
1424 DPRINTK("80==0x%04x "
1425 "81==0x%04x "
1426 "82==0x%04x "
1427 "83==0x%04x "
1428 "84==0x%04x \n",
0bd3300a
TH
1429 id[80],
1430 id[81],
1431 id[82],
1432 id[83],
1433 id[84]);
1da177e4
LT
1434 DPRINTK("88==0x%04x "
1435 "93==0x%04x\n",
0bd3300a
TH
1436 id[88],
1437 id[93]);
1da177e4
LT
1438}
1439
cb95d562
TH
1440/**
1441 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1442 * @id: IDENTIFY data to compute xfer mask from
1443 *
1444 * Compute the xfermask for this device. This is not as trivial
1445 * as it seems if we must consider early devices correctly.
1446 *
1447 * FIXME: pre IDE drive timing (do we care ?).
1448 *
1449 * LOCKING:
1450 * None.
1451 *
1452 * RETURNS:
1453 * Computed xfermask
1454 */
1455static unsigned int ata_id_xfermask(const u16 *id)
1456{
1457 unsigned int pio_mask, mwdma_mask, udma_mask;
1458
1459 /* Usual case. Word 53 indicates word 64 is valid */
1460 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1461 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1462 pio_mask <<= 3;
1463 pio_mask |= 0x7;
1464 } else {
1465 /* If word 64 isn't valid then Word 51 high byte holds
1466 * the PIO timing number for the maximum. Turn it into
1467 * a mask.
1468 */
7a0f1c8a 1469 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb 1470 if (mode < 5) /* Valid PIO range */
2dcb407e 1471 pio_mask = (2 << mode) - 1;
46767aeb
AC
1472 else
1473 pio_mask = 1;
cb95d562
TH
1474
1475 /* But wait.. there's more. Design your standards by
1476 * committee and you too can get a free iordy field to
1477 * process. However its the speeds not the modes that
1478 * are supported... Note drivers using the timing API
1479 * will get this right anyway
1480 */
1481 }
1482
1483 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1484
b352e57d
AC
1485 if (ata_id_is_cfa(id)) {
1486 /*
1487 * Process compact flash extended modes
1488 */
1489 int pio = id[163] & 0x7;
1490 int dma = (id[163] >> 3) & 7;
1491
1492 if (pio)
1493 pio_mask |= (1 << 5);
1494 if (pio > 1)
1495 pio_mask |= (1 << 6);
1496 if (dma)
1497 mwdma_mask |= (1 << 3);
1498 if (dma > 1)
1499 mwdma_mask |= (1 << 4);
1500 }
1501
fb21f0d0
TH
1502 udma_mask = 0;
1503 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1504 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1505
1506 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1507}
1508
86e45b6b
TH
1509/**
1510 * ata_port_queue_task - Queue port_task
1511 * @ap: The ata_port to queue port_task for
e2a7f77a 1512 * @fn: workqueue function to be scheduled
65f27f38 1513 * @data: data for @fn to use
e2a7f77a 1514 * @delay: delay time for workqueue function
86e45b6b
TH
1515 *
1516 * Schedule @fn(@data) for execution after @delay jiffies using
1517 * port_task. There is one port_task per port and it's the
1518 * user(low level driver)'s responsibility to make sure that only
1519 * one task is active at any given time.
1520 *
1521 * libata core layer takes care of synchronization between
1522 * port_task and EH. ata_port_queue_task() may be ignored for EH
1523 * synchronization.
1524 *
1525 * LOCKING:
1526 * Inherited from caller.
1527 */
65f27f38 1528void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
86e45b6b
TH
1529 unsigned long delay)
1530{
65f27f38
DH
1531 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1532 ap->port_task_data = data;
86e45b6b 1533
45a66c1c
ON
1534 /* may fail if ata_port_flush_task() in progress */
1535 queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1536}
1537
1538/**
1539 * ata_port_flush_task - Flush port_task
1540 * @ap: The ata_port to flush port_task for
1541 *
1542 * After this function completes, port_task is guranteed not to
1543 * be running or scheduled.
1544 *
1545 * LOCKING:
1546 * Kernel thread context (may sleep)
1547 */
1548void ata_port_flush_task(struct ata_port *ap)
1549{
86e45b6b
TH
1550 DPRINTK("ENTER\n");
1551
45a66c1c 1552 cancel_rearming_delayed_work(&ap->port_task);
86e45b6b 1553
0dd4b21f
BP
1554 if (ata_msg_ctl(ap))
1555 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
86e45b6b
TH
1556}
1557
7102d230 1558static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1559{
77853bf2 1560 struct completion *waiting = qc->private_data;
a2a7a662 1561
a2a7a662 1562 complete(waiting);
a2a7a662
TH
1563}
1564
1565/**
2432697b 1566 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1567 * @dev: Device to which the command is sent
1568 * @tf: Taskfile registers for the command and the result
d69cf37d 1569 * @cdb: CDB for packet command
a2a7a662 1570 * @dma_dir: Data tranfer direction of the command
5c1ad8b3 1571 * @sgl: sg list for the data buffer of the command
2432697b 1572 * @n_elem: Number of sg entries
2b789108 1573 * @timeout: Timeout in msecs (0 for default)
a2a7a662
TH
1574 *
1575 * Executes libata internal command with timeout. @tf contains
1576 * command on entry and result on return. Timeout and error
1577 * conditions are reported via return value. No recovery action
1578 * is taken after a command times out. It's caller's duty to
1579 * clean up after timeout.
1580 *
1581 * LOCKING:
1582 * None. Should be called with kernel context, might sleep.
551e8889
TH
1583 *
1584 * RETURNS:
1585 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1586 */
2432697b
TH
1587unsigned ata_exec_internal_sg(struct ata_device *dev,
1588 struct ata_taskfile *tf, const u8 *cdb,
87260216 1589 int dma_dir, struct scatterlist *sgl,
2b789108 1590 unsigned int n_elem, unsigned long timeout)
a2a7a662 1591{
9af5c9c9
TH
1592 struct ata_link *link = dev->link;
1593 struct ata_port *ap = link->ap;
a2a7a662
TH
1594 u8 command = tf->command;
1595 struct ata_queued_cmd *qc;
2ab7db1f 1596 unsigned int tag, preempted_tag;
dedaf2b0 1597 u32 preempted_sactive, preempted_qc_active;
da917d69 1598 int preempted_nr_active_links;
60be6b9a 1599 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1600 unsigned long flags;
77853bf2 1601 unsigned int err_mask;
d95a717f 1602 int rc;
a2a7a662 1603
ba6a1308 1604 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1605
e3180499 1606 /* no internal command while frozen */
b51e9e5d 1607 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1608 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1609 return AC_ERR_SYSTEM;
1610 }
1611
2ab7db1f 1612 /* initialize internal qc */
a2a7a662 1613
2ab7db1f
TH
1614 /* XXX: Tag 0 is used for drivers with legacy EH as some
1615 * drivers choke if any other tag is given. This breaks
1616 * ata_tag_internal() test for those drivers. Don't use new
1617 * EH stuff without converting to it.
1618 */
1619 if (ap->ops->error_handler)
1620 tag = ATA_TAG_INTERNAL;
1621 else
1622 tag = 0;
1623
6cec4a39 1624 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1625 BUG();
f69499f4 1626 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1627
1628 qc->tag = tag;
1629 qc->scsicmd = NULL;
1630 qc->ap = ap;
1631 qc->dev = dev;
1632 ata_qc_reinit(qc);
1633
9af5c9c9
TH
1634 preempted_tag = link->active_tag;
1635 preempted_sactive = link->sactive;
dedaf2b0 1636 preempted_qc_active = ap->qc_active;
da917d69 1637 preempted_nr_active_links = ap->nr_active_links;
9af5c9c9
TH
1638 link->active_tag = ATA_TAG_POISON;
1639 link->sactive = 0;
dedaf2b0 1640 ap->qc_active = 0;
da917d69 1641 ap->nr_active_links = 0;
2ab7db1f
TH
1642
1643 /* prepare & issue qc */
a2a7a662 1644 qc->tf = *tf;
d69cf37d
TH
1645 if (cdb)
1646 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1647 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1648 qc->dma_dir = dma_dir;
1649 if (dma_dir != DMA_NONE) {
2432697b 1650 unsigned int i, buflen = 0;
87260216 1651 struct scatterlist *sg;
2432697b 1652
87260216
JA
1653 for_each_sg(sgl, sg, n_elem, i)
1654 buflen += sg->length;
2432697b 1655
87260216 1656 ata_sg_init(qc, sgl, n_elem);
49c80429 1657 qc->nbytes = buflen;
a2a7a662
TH
1658 }
1659
77853bf2 1660 qc->private_data = &wait;
a2a7a662
TH
1661 qc->complete_fn = ata_qc_complete_internal;
1662
8e0e694a 1663 ata_qc_issue(qc);
a2a7a662 1664
ba6a1308 1665 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1666
2b789108
TH
1667 if (!timeout)
1668 timeout = ata_probe_timeout * 1000 / HZ;
1669
1670 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
d95a717f
TH
1671
1672 ata_port_flush_task(ap);
41ade50c 1673
d95a717f 1674 if (!rc) {
ba6a1308 1675 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1676
1677 /* We're racing with irq here. If we lose, the
1678 * following test prevents us from completing the qc
d95a717f
TH
1679 * twice. If we win, the port is frozen and will be
1680 * cleaned up by ->post_internal_cmd().
a2a7a662 1681 */
77853bf2 1682 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1683 qc->err_mask |= AC_ERR_TIMEOUT;
1684
1685 if (ap->ops->error_handler)
1686 ata_port_freeze(ap);
1687 else
1688 ata_qc_complete(qc);
f15a1daf 1689
0dd4b21f
BP
1690 if (ata_msg_warn(ap))
1691 ata_dev_printk(dev, KERN_WARNING,
88574551 1692 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1693 }
1694
ba6a1308 1695 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1696 }
1697
d95a717f
TH
1698 /* do post_internal_cmd */
1699 if (ap->ops->post_internal_cmd)
1700 ap->ops->post_internal_cmd(qc);
1701
a51d644a
TH
1702 /* perform minimal error analysis */
1703 if (qc->flags & ATA_QCFLAG_FAILED) {
1704 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1705 qc->err_mask |= AC_ERR_DEV;
1706
1707 if (!qc->err_mask)
1708 qc->err_mask |= AC_ERR_OTHER;
1709
1710 if (qc->err_mask & ~AC_ERR_OTHER)
1711 qc->err_mask &= ~AC_ERR_OTHER;
d95a717f
TH
1712 }
1713
15869303 1714 /* finish up */
ba6a1308 1715 spin_lock_irqsave(ap->lock, flags);
15869303 1716
e61e0672 1717 *tf = qc->result_tf;
77853bf2
TH
1718 err_mask = qc->err_mask;
1719
1720 ata_qc_free(qc);
9af5c9c9
TH
1721 link->active_tag = preempted_tag;
1722 link->sactive = preempted_sactive;
dedaf2b0 1723 ap->qc_active = preempted_qc_active;
da917d69 1724 ap->nr_active_links = preempted_nr_active_links;
77853bf2 1725
1f7dd3e9
TH
1726 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1727 * Until those drivers are fixed, we detect the condition
1728 * here, fail the command with AC_ERR_SYSTEM and reenable the
1729 * port.
1730 *
1731 * Note that this doesn't change any behavior as internal
1732 * command failure results in disabling the device in the
1733 * higher layer for LLDDs without new reset/EH callbacks.
1734 *
1735 * Kill the following code as soon as those drivers are fixed.
1736 */
198e0fed 1737 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1738 err_mask |= AC_ERR_SYSTEM;
1739 ata_port_probe(ap);
1740 }
1741
ba6a1308 1742 spin_unlock_irqrestore(ap->lock, flags);
15869303 1743
77853bf2 1744 return err_mask;
a2a7a662
TH
1745}
1746
2432697b 1747/**
33480a0e 1748 * ata_exec_internal - execute libata internal command
2432697b
TH
1749 * @dev: Device to which the command is sent
1750 * @tf: Taskfile registers for the command and the result
1751 * @cdb: CDB for packet command
1752 * @dma_dir: Data tranfer direction of the command
1753 * @buf: Data buffer of the command
1754 * @buflen: Length of data buffer
2b789108 1755 * @timeout: Timeout in msecs (0 for default)
2432697b
TH
1756 *
1757 * Wrapper around ata_exec_internal_sg() which takes simple
1758 * buffer instead of sg list.
1759 *
1760 * LOCKING:
1761 * None. Should be called with kernel context, might sleep.
1762 *
1763 * RETURNS:
1764 * Zero on success, AC_ERR_* mask on failure
1765 */
1766unsigned ata_exec_internal(struct ata_device *dev,
1767 struct ata_taskfile *tf, const u8 *cdb,
2b789108
TH
1768 int dma_dir, void *buf, unsigned int buflen,
1769 unsigned long timeout)
2432697b 1770{
33480a0e
TH
1771 struct scatterlist *psg = NULL, sg;
1772 unsigned int n_elem = 0;
2432697b 1773
33480a0e
TH
1774 if (dma_dir != DMA_NONE) {
1775 WARN_ON(!buf);
1776 sg_init_one(&sg, buf, buflen);
1777 psg = &sg;
1778 n_elem++;
1779 }
2432697b 1780
2b789108
TH
1781 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1782 timeout);
2432697b
TH
1783}
1784
977e6b9f
TH
1785/**
1786 * ata_do_simple_cmd - execute simple internal command
1787 * @dev: Device to which the command is sent
1788 * @cmd: Opcode to execute
1789 *
1790 * Execute a 'simple' command, that only consists of the opcode
1791 * 'cmd' itself, without filling any other registers
1792 *
1793 * LOCKING:
1794 * Kernel thread context (may sleep).
1795 *
1796 * RETURNS:
1797 * Zero on success, AC_ERR_* mask on failure
e58eb583 1798 */
77b08fb5 1799unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1800{
1801 struct ata_taskfile tf;
e58eb583
TH
1802
1803 ata_tf_init(dev, &tf);
1804
1805 tf.command = cmd;
1806 tf.flags |= ATA_TFLAG_DEVICE;
1807 tf.protocol = ATA_PROT_NODATA;
1808
2b789108 1809 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
e58eb583
TH
1810}
1811
1bc4ccff
AC
1812/**
1813 * ata_pio_need_iordy - check if iordy needed
1814 * @adev: ATA device
1815 *
1816 * Check if the current speed of the device requires IORDY. Used
1817 * by various controllers for chip configuration.
1818 */
a617c09f 1819
1bc4ccff
AC
1820unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1821{
432729f0
AC
1822 /* Controller doesn't support IORDY. Probably a pointless check
1823 as the caller should know this */
9af5c9c9 1824 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1bc4ccff 1825 return 0;
432729f0
AC
1826 /* PIO3 and higher it is mandatory */
1827 if (adev->pio_mode > XFER_PIO_2)
1828 return 1;
1829 /* We turn it on when possible */
1830 if (ata_id_has_iordy(adev->id))
1bc4ccff 1831 return 1;
432729f0
AC
1832 return 0;
1833}
2e9edbf8 1834
432729f0
AC
1835/**
1836 * ata_pio_mask_no_iordy - Return the non IORDY mask
1837 * @adev: ATA device
1838 *
1839 * Compute the highest mode possible if we are not using iordy. Return
1840 * -1 if no iordy mode is available.
1841 */
a617c09f 1842
432729f0
AC
1843static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1844{
1bc4ccff 1845 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1bc4ccff 1846 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
432729f0 1847 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1bc4ccff
AC
1848 /* Is the speed faster than the drive allows non IORDY ? */
1849 if (pio) {
1850 /* This is cycle times not frequency - watch the logic! */
1851 if (pio > 240) /* PIO2 is 240nS per cycle */
432729f0
AC
1852 return 3 << ATA_SHIFT_PIO;
1853 return 7 << ATA_SHIFT_PIO;
1bc4ccff
AC
1854 }
1855 }
432729f0 1856 return 3 << ATA_SHIFT_PIO;
1bc4ccff
AC
1857}
1858
1da177e4 1859/**
49016aca 1860 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1861 * @dev: target device
1862 * @p_class: pointer to class of the target device (may be changed)
bff04647 1863 * @flags: ATA_READID_* flags
fe635c7e 1864 * @id: buffer to read IDENTIFY data into
1da177e4 1865 *
49016aca
TH
1866 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1867 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1868 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1869 * for pre-ATA4 drives.
1da177e4 1870 *
50a99018 1871 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
2dcb407e 1872 * now we abort if we hit that case.
50a99018 1873 *
1da177e4 1874 * LOCKING:
49016aca
TH
1875 * Kernel thread context (may sleep)
1876 *
1877 * RETURNS:
1878 * 0 on success, -errno otherwise.
1da177e4 1879 */
a9beec95 1880int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1881 unsigned int flags, u16 *id)
1da177e4 1882{
9af5c9c9 1883 struct ata_port *ap = dev->link->ap;
49016aca 1884 unsigned int class = *p_class;
a0123703 1885 struct ata_taskfile tf;
49016aca
TH
1886 unsigned int err_mask = 0;
1887 const char *reason;
54936f8b 1888 int may_fallback = 1, tried_spinup = 0;
49016aca 1889 int rc;
1da177e4 1890
0dd4b21f 1891 if (ata_msg_ctl(ap))
44877b4e 1892 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1893
49016aca 1894 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
49016aca 1895 retry:
3373efd8 1896 ata_tf_init(dev, &tf);
a0123703 1897
49016aca
TH
1898 switch (class) {
1899 case ATA_DEV_ATA:
a0123703 1900 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1901 break;
1902 case ATA_DEV_ATAPI:
a0123703 1903 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1904 break;
1905 default:
1906 rc = -ENODEV;
1907 reason = "unsupported class";
1908 goto err_out;
1da177e4
LT
1909 }
1910
a0123703 1911 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1912
1913 /* Some devices choke if TF registers contain garbage. Make
1914 * sure those are properly initialized.
1915 */
1916 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1917
1918 /* Device presence detection is unreliable on some
1919 * controllers. Always poll IDENTIFY if available.
1920 */
1921 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 1922
3373efd8 1923 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
2b789108 1924 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
a0123703 1925 if (err_mask) {
800b3996 1926 if (err_mask & AC_ERR_NODEV_HINT) {
55a8e2c8 1927 DPRINTK("ata%u.%d: NODEV after polling detection\n",
44877b4e 1928 ap->print_id, dev->devno);
55a8e2c8
TH
1929 return -ENOENT;
1930 }
1931
54936f8b
TH
1932 /* Device or controller might have reported the wrong
1933 * device class. Give a shot at the other IDENTIFY if
1934 * the current one is aborted by the device.
1935 */
1936 if (may_fallback &&
1937 (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1938 may_fallback = 0;
1939
1940 if (class == ATA_DEV_ATA)
1941 class = ATA_DEV_ATAPI;
1942 else
1943 class = ATA_DEV_ATA;
1944 goto retry;
1945 }
1946
49016aca
TH
1947 rc = -EIO;
1948 reason = "I/O error";
1da177e4
LT
1949 goto err_out;
1950 }
1951
54936f8b
TH
1952 /* Falling back doesn't make sense if ID data was read
1953 * successfully at least once.
1954 */
1955 may_fallback = 0;
1956
49016aca 1957 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1958
49016aca 1959 /* sanity check */
a4f5749b 1960 rc = -EINVAL;
6070068b 1961 reason = "device reports invalid type";
a4f5749b
TH
1962
1963 if (class == ATA_DEV_ATA) {
1964 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1965 goto err_out;
1966 } else {
1967 if (ata_id_is_ata(id))
1968 goto err_out;
49016aca
TH
1969 }
1970
169439c2
ML
1971 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1972 tried_spinup = 1;
1973 /*
1974 * Drive powered-up in standby mode, and requires a specific
1975 * SET_FEATURES spin-up subcommand before it will accept
1976 * anything other than the original IDENTIFY command.
1977 */
218f3d30 1978 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
fb0582f9 1979 if (err_mask && id[2] != 0x738c) {
169439c2
ML
1980 rc = -EIO;
1981 reason = "SPINUP failed";
1982 goto err_out;
1983 }
1984 /*
1985 * If the drive initially returned incomplete IDENTIFY info,
1986 * we now must reissue the IDENTIFY command.
1987 */
1988 if (id[2] == 0x37c8)
1989 goto retry;
1990 }
1991
bff04647 1992 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
1993 /*
1994 * The exact sequence expected by certain pre-ATA4 drives is:
1995 * SRST RESET
50a99018
AC
1996 * IDENTIFY (optional in early ATA)
1997 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
49016aca
TH
1998 * anything else..
1999 * Some drives were very specific about that exact sequence.
50a99018
AC
2000 *
2001 * Note that ATA4 says lba is mandatory so the second check
2002 * shoud never trigger.
49016aca
TH
2003 */
2004 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 2005 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
2006 if (err_mask) {
2007 rc = -EIO;
2008 reason = "INIT_DEV_PARAMS failed";
2009 goto err_out;
2010 }
2011
2012 /* current CHS translation info (id[53-58]) might be
2013 * changed. reread the identify device info.
2014 */
bff04647 2015 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
2016 goto retry;
2017 }
2018 }
2019
2020 *p_class = class;
fe635c7e 2021
49016aca
TH
2022 return 0;
2023
2024 err_out:
88574551 2025 if (ata_msg_warn(ap))
0dd4b21f 2026 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 2027 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
2028 return rc;
2029}
2030
3373efd8 2031static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 2032{
9af5c9c9
TH
2033 struct ata_port *ap = dev->link->ap;
2034 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
2035}
2036
a6e6ce8e
TH
2037static void ata_dev_config_ncq(struct ata_device *dev,
2038 char *desc, size_t desc_sz)
2039{
9af5c9c9 2040 struct ata_port *ap = dev->link->ap;
a6e6ce8e
TH
2041 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2042
2043 if (!ata_id_has_ncq(dev->id)) {
2044 desc[0] = '\0';
2045 return;
2046 }
75683fe7 2047 if (dev->horkage & ATA_HORKAGE_NONCQ) {
6919a0a6
AC
2048 snprintf(desc, desc_sz, "NCQ (not used)");
2049 return;
2050 }
a6e6ce8e 2051 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 2052 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
2053 dev->flags |= ATA_DFLAG_NCQ;
2054 }
2055
2056 if (hdepth >= ddepth)
2057 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
2058 else
2059 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
2060}
2061
49016aca 2062/**
ffeae418 2063 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
2064 * @dev: Target device to configure
2065 *
2066 * Configure @dev according to @dev->id. Generic and low-level
2067 * driver specific fixups are also applied.
49016aca
TH
2068 *
2069 * LOCKING:
ffeae418
TH
2070 * Kernel thread context (may sleep)
2071 *
2072 * RETURNS:
2073 * 0 on success, -errno otherwise
49016aca 2074 */
efdaedc4 2075int ata_dev_configure(struct ata_device *dev)
49016aca 2076{
9af5c9c9
TH
2077 struct ata_port *ap = dev->link->ap;
2078 struct ata_eh_context *ehc = &dev->link->eh_context;
6746544c 2079 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1148c3a7 2080 const u16 *id = dev->id;
ff8854b2 2081 unsigned int xfer_mask;
b352e57d 2082 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
2083 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2084 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 2085 int rc;
49016aca 2086
0dd4b21f 2087 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
44877b4e
TH
2088 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
2089 __FUNCTION__);
ffeae418 2090 return 0;
49016aca
TH
2091 }
2092
0dd4b21f 2093 if (ata_msg_probe(ap))
44877b4e 2094 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 2095
75683fe7
TH
2096 /* set horkage */
2097 dev->horkage |= ata_dev_blacklisted(dev);
2098
6746544c
TH
2099 /* let ACPI work its magic */
2100 rc = ata_acpi_on_devcfg(dev);
2101 if (rc)
2102 return rc;
08573a86 2103
05027adc
TH
2104 /* massage HPA, do it early as it might change IDENTIFY data */
2105 rc = ata_hpa_resize(dev);
2106 if (rc)
2107 return rc;
2108
c39f5ebe 2109 /* print device capabilities */
0dd4b21f 2110 if (ata_msg_probe(ap))
88574551
TH
2111 ata_dev_printk(dev, KERN_DEBUG,
2112 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2113 "85:%04x 86:%04x 87:%04x 88:%04x\n",
0dd4b21f 2114 __FUNCTION__,
f15a1daf
TH
2115 id[49], id[82], id[83], id[84],
2116 id[85], id[86], id[87], id[88]);
c39f5ebe 2117
208a9933 2118 /* initialize to-be-configured parameters */
ea1dd4e1 2119 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
2120 dev->max_sectors = 0;
2121 dev->cdb_len = 0;
2122 dev->n_sectors = 0;
2123 dev->cylinders = 0;
2124 dev->heads = 0;
2125 dev->sectors = 0;
2126
1da177e4
LT
2127 /*
2128 * common ATA, ATAPI feature tests
2129 */
2130
ff8854b2 2131 /* find max transfer mode; for printk only */
1148c3a7 2132 xfer_mask = ata_id_xfermask(id);
1da177e4 2133
0dd4b21f
BP
2134 if (ata_msg_probe(ap))
2135 ata_dump_id(id);
1da177e4 2136
ef143d57
AL
2137 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2138 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2139 sizeof(fwrevbuf));
2140
2141 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2142 sizeof(modelbuf));
2143
1da177e4
LT
2144 /* ATA-specific feature tests */
2145 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
2146 if (ata_id_is_cfa(id)) {
2147 if (id[162] & 1) /* CPRM may make this media unusable */
44877b4e
TH
2148 ata_dev_printk(dev, KERN_WARNING,
2149 "supports DRM functions and may "
2150 "not be fully accessable.\n");
b352e57d 2151 snprintf(revbuf, 7, "CFA");
2dcb407e
JG
2152 } else
2153 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
b352e57d 2154
1148c3a7 2155 dev->n_sectors = ata_id_n_sectors(id);
2940740b 2156
3f64f565
EM
2157 if (dev->id[59] & 0x100)
2158 dev->multi_count = dev->id[59] & 0xff;
2159
1148c3a7 2160 if (ata_id_has_lba(id)) {
4c2d721a 2161 const char *lba_desc;
a6e6ce8e 2162 char ncq_desc[20];
8bf62ece 2163
4c2d721a
TH
2164 lba_desc = "LBA";
2165 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 2166 if (ata_id_has_lba48(id)) {
8bf62ece 2167 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 2168 lba_desc = "LBA48";
6fc49adb
TH
2169
2170 if (dev->n_sectors >= (1UL << 28) &&
2171 ata_id_has_flush_ext(id))
2172 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 2173 }
8bf62ece 2174
a6e6ce8e
TH
2175 /* config NCQ */
2176 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2177
8bf62ece 2178 /* print device info to dmesg */
3f64f565
EM
2179 if (ata_msg_drv(ap) && print_info) {
2180 ata_dev_printk(dev, KERN_INFO,
2181 "%s: %s, %s, max %s\n",
2182 revbuf, modelbuf, fwrevbuf,
2183 ata_mode_string(xfer_mask));
2184 ata_dev_printk(dev, KERN_INFO,
2185 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 2186 (unsigned long long)dev->n_sectors,
3f64f565
EM
2187 dev->multi_count, lba_desc, ncq_desc);
2188 }
ffeae418 2189 } else {
8bf62ece
AL
2190 /* CHS */
2191
2192 /* Default translation */
1148c3a7
TH
2193 dev->cylinders = id[1];
2194 dev->heads = id[3];
2195 dev->sectors = id[6];
8bf62ece 2196
1148c3a7 2197 if (ata_id_current_chs_valid(id)) {
8bf62ece 2198 /* Current CHS translation is valid. */
1148c3a7
TH
2199 dev->cylinders = id[54];
2200 dev->heads = id[55];
2201 dev->sectors = id[56];
8bf62ece
AL
2202 }
2203
2204 /* print device info to dmesg */
3f64f565 2205 if (ata_msg_drv(ap) && print_info) {
88574551 2206 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2207 "%s: %s, %s, max %s\n",
2208 revbuf, modelbuf, fwrevbuf,
2209 ata_mode_string(xfer_mask));
a84471fe 2210 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2211 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2212 (unsigned long long)dev->n_sectors,
2213 dev->multi_count, dev->cylinders,
2214 dev->heads, dev->sectors);
2215 }
07f6f7d0
AL
2216 }
2217
6e7846e9 2218 dev->cdb_len = 16;
1da177e4
LT
2219 }
2220
2221 /* ATAPI-specific feature tests */
2c13b7ce 2222 else if (dev->class == ATA_DEV_ATAPI) {
854c73a2
TH
2223 const char *cdb_intr_string = "";
2224 const char *atapi_an_string = "";
7d77b247 2225 u32 sntf;
08a556db 2226
1148c3a7 2227 rc = atapi_cdb_len(id);
1da177e4 2228 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 2229 if (ata_msg_warn(ap))
88574551
TH
2230 ata_dev_printk(dev, KERN_WARNING,
2231 "unsupported CDB len\n");
ffeae418 2232 rc = -EINVAL;
1da177e4
LT
2233 goto err_out_nosup;
2234 }
6e7846e9 2235 dev->cdb_len = (unsigned int) rc;
1da177e4 2236
7d77b247
TH
2237 /* Enable ATAPI AN if both the host and device have
2238 * the support. If PMP is attached, SNTF is required
2239 * to enable ATAPI AN to discern between PHY status
2240 * changed notifications and ATAPI ANs.
9f45cbd3 2241 */
7d77b247
TH
2242 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2243 (!ap->nr_pmp_links ||
2244 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
854c73a2
TH
2245 unsigned int err_mask;
2246
9f45cbd3 2247 /* issue SET feature command to turn this on */
218f3d30
JG
2248 err_mask = ata_dev_set_feature(dev,
2249 SETFEATURES_SATA_ENABLE, SATA_AN);
854c73a2 2250 if (err_mask)
9f45cbd3 2251 ata_dev_printk(dev, KERN_ERR,
854c73a2
TH
2252 "failed to enable ATAPI AN "
2253 "(err_mask=0x%x)\n", err_mask);
2254 else {
9f45cbd3 2255 dev->flags |= ATA_DFLAG_AN;
854c73a2
TH
2256 atapi_an_string = ", ATAPI AN";
2257 }
9f45cbd3
KCA
2258 }
2259
08a556db 2260 if (ata_id_cdb_intr(dev->id)) {
312f7da2 2261 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
2262 cdb_intr_string = ", CDB intr";
2263 }
312f7da2 2264
1da177e4 2265 /* print device info to dmesg */
5afc8142 2266 if (ata_msg_drv(ap) && print_info)
ef143d57 2267 ata_dev_printk(dev, KERN_INFO,
854c73a2 2268 "ATAPI: %s, %s, max %s%s%s\n",
ef143d57 2269 modelbuf, fwrevbuf,
12436c30 2270 ata_mode_string(xfer_mask),
854c73a2 2271 cdb_intr_string, atapi_an_string);
1da177e4
LT
2272 }
2273
914ed354
TH
2274 /* determine max_sectors */
2275 dev->max_sectors = ATA_MAX_SECTORS;
2276 if (dev->flags & ATA_DFLAG_LBA48)
2277 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2278
ca77329f
KCA
2279 if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2280 if (ata_id_has_hipm(dev->id))
2281 dev->flags |= ATA_DFLAG_HIPM;
2282 if (ata_id_has_dipm(dev->id))
2283 dev->flags |= ATA_DFLAG_DIPM;
2284 }
2285
93590859
AC
2286 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2287 /* Let the user know. We don't want to disallow opens for
2288 rescue purposes, or in case the vendor is just a blithering
2289 idiot */
2dcb407e 2290 if (print_info) {
93590859
AC
2291 ata_dev_printk(dev, KERN_WARNING,
2292"Drive reports diagnostics failure. This may indicate a drive\n");
2293 ata_dev_printk(dev, KERN_WARNING,
2294"fault or invalid emulation. Contact drive vendor for information.\n");
2295 }
2296 }
2297
4b2f3ede 2298 /* limit bridge transfers to udma5, 200 sectors */
3373efd8 2299 if (ata_dev_knobble(dev)) {
5afc8142 2300 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
2301 ata_dev_printk(dev, KERN_INFO,
2302 "applying bridge limits\n");
5a529139 2303 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
2304 dev->max_sectors = ATA_MAX_SECTORS;
2305 }
2306
f8d8e579
TB
2307 if ((dev->class == ATA_DEV_ATAPI) &&
2308 (atapi_command_packet_set(id) == TYPE_TAPE))
2309 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
2310
75683fe7 2311 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
03ec52de
TH
2312 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2313 dev->max_sectors);
18d6e9d5 2314
ca77329f
KCA
2315 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2316 dev->horkage |= ATA_HORKAGE_IPM;
2317
2318 /* reset link pm_policy for this port to no pm */
2319 ap->pm_policy = MAX_PERFORMANCE;
2320 }
2321
4b2f3ede 2322 if (ap->ops->dev_config)
cd0d3bbc 2323 ap->ops->dev_config(dev);
4b2f3ede 2324
0dd4b21f
BP
2325 if (ata_msg_probe(ap))
2326 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2327 __FUNCTION__, ata_chk_status(ap));
ffeae418 2328 return 0;
1da177e4
LT
2329
2330err_out_nosup:
0dd4b21f 2331 if (ata_msg_probe(ap))
88574551
TH
2332 ata_dev_printk(dev, KERN_DEBUG,
2333 "%s: EXIT, err\n", __FUNCTION__);
ffeae418 2334 return rc;
1da177e4
LT
2335}
2336
be0d18df 2337/**
2e41e8e6 2338 * ata_cable_40wire - return 40 wire cable type
be0d18df
AC
2339 * @ap: port
2340 *
2e41e8e6 2341 * Helper method for drivers which want to hardwire 40 wire cable
be0d18df
AC
2342 * detection.
2343 */
2344
2345int ata_cable_40wire(struct ata_port *ap)
2346{
2347 return ATA_CBL_PATA40;
2348}
2349
2350/**
2e41e8e6 2351 * ata_cable_80wire - return 80 wire cable type
be0d18df
AC
2352 * @ap: port
2353 *
2e41e8e6 2354 * Helper method for drivers which want to hardwire 80 wire cable
be0d18df
AC
2355 * detection.
2356 */
2357
2358int ata_cable_80wire(struct ata_port *ap)
2359{
2360 return ATA_CBL_PATA80;
2361}
2362
2363/**
2364 * ata_cable_unknown - return unknown PATA cable.
2365 * @ap: port
2366 *
2367 * Helper method for drivers which have no PATA cable detection.
2368 */
2369
2370int ata_cable_unknown(struct ata_port *ap)
2371{
2372 return ATA_CBL_PATA_UNK;
2373}
2374
2375/**
2376 * ata_cable_sata - return SATA cable type
2377 * @ap: port
2378 *
2379 * Helper method for drivers which have SATA cables
2380 */
2381
2382int ata_cable_sata(struct ata_port *ap)
2383{
2384 return ATA_CBL_SATA;
2385}
2386
1da177e4
LT
2387/**
2388 * ata_bus_probe - Reset and probe ATA bus
2389 * @ap: Bus to probe
2390 *
0cba632b
JG
2391 * Master ATA bus probing function. Initiates a hardware-dependent
2392 * bus reset, then attempts to identify any devices found on
2393 * the bus.
2394 *
1da177e4 2395 * LOCKING:
0cba632b 2396 * PCI/etc. bus probe sem.
1da177e4
LT
2397 *
2398 * RETURNS:
96072e69 2399 * Zero on success, negative errno otherwise.
1da177e4
LT
2400 */
2401
80289167 2402int ata_bus_probe(struct ata_port *ap)
1da177e4 2403{
28ca5c57 2404 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 2405 int tries[ATA_MAX_DEVICES];
f58229f8 2406 int rc;
e82cbdb9 2407 struct ata_device *dev;
1da177e4 2408
28ca5c57 2409 ata_port_probe(ap);
c19ba8af 2410
f58229f8
TH
2411 ata_link_for_each_dev(dev, &ap->link)
2412 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
14d2bac1
TH
2413
2414 retry:
cdeab114
TH
2415 ata_link_for_each_dev(dev, &ap->link) {
2416 /* If we issue an SRST then an ATA drive (not ATAPI)
2417 * may change configuration and be in PIO0 timing. If
2418 * we do a hard reset (or are coming from power on)
2419 * this is true for ATA or ATAPI. Until we've set a
2420 * suitable controller mode we should not touch the
2421 * bus as we may be talking too fast.
2422 */
2423 dev->pio_mode = XFER_PIO_0;
2424
2425 /* If the controller has a pio mode setup function
2426 * then use it to set the chipset to rights. Don't
2427 * touch the DMA setup as that will be dealt with when
2428 * configuring devices.
2429 */
2430 if (ap->ops->set_piomode)
2431 ap->ops->set_piomode(ap, dev);
2432 }
2433
2044470c 2434 /* reset and determine device classes */
52783c5d 2435 ap->ops->phy_reset(ap);
2061a47a 2436
f58229f8 2437 ata_link_for_each_dev(dev, &ap->link) {
52783c5d
TH
2438 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2439 dev->class != ATA_DEV_UNKNOWN)
2440 classes[dev->devno] = dev->class;
2441 else
2442 classes[dev->devno] = ATA_DEV_NONE;
2044470c 2443
52783c5d 2444 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 2445 }
1da177e4 2446
52783c5d 2447 ata_port_probe(ap);
2044470c 2448
f31f0cc2
JG
2449 /* read IDENTIFY page and configure devices. We have to do the identify
2450 specific sequence bass-ackwards so that PDIAG- is released by
2451 the slave device */
2452
f58229f8
TH
2453 ata_link_for_each_dev(dev, &ap->link) {
2454 if (tries[dev->devno])
2455 dev->class = classes[dev->devno];
ffeae418 2456
14d2bac1 2457 if (!ata_dev_enabled(dev))
ffeae418 2458 continue;
ffeae418 2459
bff04647
TH
2460 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2461 dev->id);
14d2bac1
TH
2462 if (rc)
2463 goto fail;
f31f0cc2
JG
2464 }
2465
be0d18df
AC
2466 /* Now ask for the cable type as PDIAG- should have been released */
2467 if (ap->ops->cable_detect)
2468 ap->cbl = ap->ops->cable_detect(ap);
2469
614fe29b
AC
2470 /* We may have SATA bridge glue hiding here irrespective of the
2471 reported cable types and sensed types */
2472 ata_link_for_each_dev(dev, &ap->link) {
2473 if (!ata_dev_enabled(dev))
2474 continue;
2475 /* SATA drives indicate we have a bridge. We don't know which
2476 end of the link the bridge is which is a problem */
2477 if (ata_id_is_sata(dev->id))
2478 ap->cbl = ATA_CBL_SATA;
2479 }
2480
f31f0cc2
JG
2481 /* After the identify sequence we can now set up the devices. We do
2482 this in the normal order so that the user doesn't get confused */
2483
f58229f8 2484 ata_link_for_each_dev(dev, &ap->link) {
f31f0cc2
JG
2485 if (!ata_dev_enabled(dev))
2486 continue;
14d2bac1 2487
9af5c9c9 2488 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
efdaedc4 2489 rc = ata_dev_configure(dev);
9af5c9c9 2490 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
2491 if (rc)
2492 goto fail;
1da177e4
LT
2493 }
2494
e82cbdb9 2495 /* configure transfer mode */
0260731f 2496 rc = ata_set_mode(&ap->link, &dev);
4ae72a1e 2497 if (rc)
51713d35 2498 goto fail;
1da177e4 2499
f58229f8
TH
2500 ata_link_for_each_dev(dev, &ap->link)
2501 if (ata_dev_enabled(dev))
e82cbdb9 2502 return 0;
1da177e4 2503
e82cbdb9
TH
2504 /* no device present, disable port */
2505 ata_port_disable(ap);
96072e69 2506 return -ENODEV;
14d2bac1
TH
2507
2508 fail:
4ae72a1e
TH
2509 tries[dev->devno]--;
2510
14d2bac1
TH
2511 switch (rc) {
2512 case -EINVAL:
4ae72a1e 2513 /* eeek, something went very wrong, give up */
14d2bac1
TH
2514 tries[dev->devno] = 0;
2515 break;
4ae72a1e
TH
2516
2517 case -ENODEV:
2518 /* give it just one more chance */
2519 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 2520 case -EIO:
4ae72a1e
TH
2521 if (tries[dev->devno] == 1) {
2522 /* This is the last chance, better to slow
2523 * down than lose it.
2524 */
936fd732 2525 sata_down_spd_limit(&ap->link);
4ae72a1e
TH
2526 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2527 }
14d2bac1
TH
2528 }
2529
4ae72a1e 2530 if (!tries[dev->devno])
3373efd8 2531 ata_dev_disable(dev);
ec573755 2532
14d2bac1 2533 goto retry;
1da177e4
LT
2534}
2535
2536/**
0cba632b
JG
2537 * ata_port_probe - Mark port as enabled
2538 * @ap: Port for which we indicate enablement
1da177e4 2539 *
0cba632b
JG
2540 * Modify @ap data structure such that the system
2541 * thinks that the entire port is enabled.
2542 *
cca3974e 2543 * LOCKING: host lock, or some other form of
0cba632b 2544 * serialization.
1da177e4
LT
2545 */
2546
2547void ata_port_probe(struct ata_port *ap)
2548{
198e0fed 2549 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
2550}
2551
3be680b7
TH
2552/**
2553 * sata_print_link_status - Print SATA link status
936fd732 2554 * @link: SATA link to printk link status about
3be680b7
TH
2555 *
2556 * This function prints link speed and status of a SATA link.
2557 *
2558 * LOCKING:
2559 * None.
2560 */
936fd732 2561void sata_print_link_status(struct ata_link *link)
3be680b7 2562{
6d5f9732 2563 u32 sstatus, scontrol, tmp;
3be680b7 2564
936fd732 2565 if (sata_scr_read(link, SCR_STATUS, &sstatus))
3be680b7 2566 return;
936fd732 2567 sata_scr_read(link, SCR_CONTROL, &scontrol);
3be680b7 2568
936fd732 2569 if (ata_link_online(link)) {
3be680b7 2570 tmp = (sstatus >> 4) & 0xf;
936fd732 2571 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2572 "SATA link up %s (SStatus %X SControl %X)\n",
2573 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 2574 } else {
936fd732 2575 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2576 "SATA link down (SStatus %X SControl %X)\n",
2577 sstatus, scontrol);
3be680b7
TH
2578 }
2579}
2580
1da177e4 2581/**
780a87f7
JG
2582 * __sata_phy_reset - Wake/reset a low-level SATA PHY
2583 * @ap: SATA port associated with target SATA PHY.
1da177e4 2584 *
780a87f7
JG
2585 * This function issues commands to standard SATA Sxxx
2586 * PHY registers, to wake up the phy (and device), and
2587 * clear any reset condition.
1da177e4
LT
2588 *
2589 * LOCKING:
0cba632b 2590 * PCI/etc. bus probe sem.
1da177e4
LT
2591 *
2592 */
2593void __sata_phy_reset(struct ata_port *ap)
2594{
936fd732 2595 struct ata_link *link = &ap->link;
1da177e4 2596 unsigned long timeout = jiffies + (HZ * 5);
936fd732 2597 u32 sstatus;
1da177e4
LT
2598
2599 if (ap->flags & ATA_FLAG_SATA_RESET) {
cdcca89e 2600 /* issue phy wake/reset */
936fd732 2601 sata_scr_write_flush(link, SCR_CONTROL, 0x301);
62ba2841
TH
2602 /* Couldn't find anything in SATA I/II specs, but
2603 * AHCI-1.1 10.4.2 says at least 1 ms. */
2604 mdelay(1);
1da177e4 2605 }
81952c54 2606 /* phy wake/clear reset */
936fd732 2607 sata_scr_write_flush(link, SCR_CONTROL, 0x300);
1da177e4
LT
2608
2609 /* wait for phy to become ready, if necessary */
2610 do {
2611 msleep(200);
936fd732 2612 sata_scr_read(link, SCR_STATUS, &sstatus);
1da177e4
LT
2613 if ((sstatus & 0xf) != 1)
2614 break;
2615 } while (time_before(jiffies, timeout));
2616
3be680b7 2617 /* print link status */
936fd732 2618 sata_print_link_status(link);
656563e3 2619
3be680b7 2620 /* TODO: phy layer with polling, timeouts, etc. */
936fd732 2621 if (!ata_link_offline(link))
1da177e4 2622 ata_port_probe(ap);
3be680b7 2623 else
1da177e4 2624 ata_port_disable(ap);
1da177e4 2625
198e0fed 2626 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2627 return;
2628
2629 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2630 ata_port_disable(ap);
2631 return;
2632 }
2633
2634 ap->cbl = ATA_CBL_SATA;
2635}
2636
2637/**
780a87f7
JG
2638 * sata_phy_reset - Reset SATA bus.
2639 * @ap: SATA port associated with target SATA PHY.
1da177e4 2640 *
780a87f7
JG
2641 * This function resets the SATA bus, and then probes
2642 * the bus for devices.
1da177e4
LT
2643 *
2644 * LOCKING:
0cba632b 2645 * PCI/etc. bus probe sem.
1da177e4
LT
2646 *
2647 */
2648void sata_phy_reset(struct ata_port *ap)
2649{
2650 __sata_phy_reset(ap);
198e0fed 2651 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2652 return;
2653 ata_bus_reset(ap);
2654}
2655
ebdfca6e
AC
2656/**
2657 * ata_dev_pair - return other device on cable
ebdfca6e
AC
2658 * @adev: device
2659 *
2660 * Obtain the other device on the same cable, or if none is
2661 * present NULL is returned
2662 */
2e9edbf8 2663
3373efd8 2664struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2665{
9af5c9c9
TH
2666 struct ata_link *link = adev->link;
2667 struct ata_device *pair = &link->device[1 - adev->devno];
e1211e3f 2668 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2669 return NULL;
2670 return pair;
2671}
2672
1da177e4 2673/**
780a87f7
JG
2674 * ata_port_disable - Disable port.
2675 * @ap: Port to be disabled.
1da177e4 2676 *
780a87f7
JG
2677 * Modify @ap data structure such that the system
2678 * thinks that the entire port is disabled, and should
2679 * never attempt to probe or communicate with devices
2680 * on this port.
2681 *
cca3974e 2682 * LOCKING: host lock, or some other form of
780a87f7 2683 * serialization.
1da177e4
LT
2684 */
2685
2686void ata_port_disable(struct ata_port *ap)
2687{
9af5c9c9
TH
2688 ap->link.device[0].class = ATA_DEV_NONE;
2689 ap->link.device[1].class = ATA_DEV_NONE;
198e0fed 2690 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2691}
2692
1c3fae4d 2693/**
3c567b7d 2694 * sata_down_spd_limit - adjust SATA spd limit downward
936fd732 2695 * @link: Link to adjust SATA spd limit for
1c3fae4d 2696 *
936fd732 2697 * Adjust SATA spd limit of @link downward. Note that this
1c3fae4d 2698 * function only adjusts the limit. The change must be applied
3c567b7d 2699 * using sata_set_spd().
1c3fae4d
TH
2700 *
2701 * LOCKING:
2702 * Inherited from caller.
2703 *
2704 * RETURNS:
2705 * 0 on success, negative errno on failure
2706 */
936fd732 2707int sata_down_spd_limit(struct ata_link *link)
1c3fae4d 2708{
81952c54
TH
2709 u32 sstatus, spd, mask;
2710 int rc, highbit;
1c3fae4d 2711
936fd732 2712 if (!sata_scr_valid(link))
008a7896
TH
2713 return -EOPNOTSUPP;
2714
2715 /* If SCR can be read, use it to determine the current SPD.
936fd732 2716 * If not, use cached value in link->sata_spd.
008a7896 2717 */
936fd732 2718 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
008a7896
TH
2719 if (rc == 0)
2720 spd = (sstatus >> 4) & 0xf;
2721 else
936fd732 2722 spd = link->sata_spd;
1c3fae4d 2723
936fd732 2724 mask = link->sata_spd_limit;
1c3fae4d
TH
2725 if (mask <= 1)
2726 return -EINVAL;
008a7896
TH
2727
2728 /* unconditionally mask off the highest bit */
1c3fae4d
TH
2729 highbit = fls(mask) - 1;
2730 mask &= ~(1 << highbit);
2731
008a7896
TH
2732 /* Mask off all speeds higher than or equal to the current
2733 * one. Force 1.5Gbps if current SPD is not available.
2734 */
2735 if (spd > 1)
2736 mask &= (1 << (spd - 1)) - 1;
2737 else
2738 mask &= 1;
2739
2740 /* were we already at the bottom? */
1c3fae4d
TH
2741 if (!mask)
2742 return -EINVAL;
2743
936fd732 2744 link->sata_spd_limit = mask;
1c3fae4d 2745
936fd732 2746 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
f15a1daf 2747 sata_spd_string(fls(mask)));
1c3fae4d
TH
2748
2749 return 0;
2750}
2751
936fd732 2752static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
1c3fae4d 2753{
5270222f
TH
2754 struct ata_link *host_link = &link->ap->link;
2755 u32 limit, target, spd;
1c3fae4d 2756
5270222f
TH
2757 limit = link->sata_spd_limit;
2758
2759 /* Don't configure downstream link faster than upstream link.
2760 * It doesn't speed up anything and some PMPs choke on such
2761 * configuration.
2762 */
2763 if (!ata_is_host_link(link) && host_link->sata_spd)
2764 limit &= (1 << host_link->sata_spd) - 1;
2765
2766 if (limit == UINT_MAX)
2767 target = 0;
1c3fae4d 2768 else
5270222f 2769 target = fls(limit);
1c3fae4d
TH
2770
2771 spd = (*scontrol >> 4) & 0xf;
5270222f 2772 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
1c3fae4d 2773
5270222f 2774 return spd != target;
1c3fae4d
TH
2775}
2776
2777/**
3c567b7d 2778 * sata_set_spd_needed - is SATA spd configuration needed
936fd732 2779 * @link: Link in question
1c3fae4d
TH
2780 *
2781 * Test whether the spd limit in SControl matches
936fd732 2782 * @link->sata_spd_limit. This function is used to determine
1c3fae4d
TH
2783 * whether hardreset is necessary to apply SATA spd
2784 * configuration.
2785 *
2786 * LOCKING:
2787 * Inherited from caller.
2788 *
2789 * RETURNS:
2790 * 1 if SATA spd configuration is needed, 0 otherwise.
2791 */
936fd732 2792int sata_set_spd_needed(struct ata_link *link)
1c3fae4d
TH
2793{
2794 u32 scontrol;
2795
936fd732 2796 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
db64bcf3 2797 return 1;
1c3fae4d 2798
936fd732 2799 return __sata_set_spd_needed(link, &scontrol);
1c3fae4d
TH
2800}
2801
2802/**
3c567b7d 2803 * sata_set_spd - set SATA spd according to spd limit
936fd732 2804 * @link: Link to set SATA spd for
1c3fae4d 2805 *
936fd732 2806 * Set SATA spd of @link according to sata_spd_limit.
1c3fae4d
TH
2807 *
2808 * LOCKING:
2809 * Inherited from caller.
2810 *
2811 * RETURNS:
2812 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2813 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2814 */
936fd732 2815int sata_set_spd(struct ata_link *link)
1c3fae4d
TH
2816{
2817 u32 scontrol;
81952c54 2818 int rc;
1c3fae4d 2819
936fd732 2820 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 2821 return rc;
1c3fae4d 2822
936fd732 2823 if (!__sata_set_spd_needed(link, &scontrol))
1c3fae4d
TH
2824 return 0;
2825
936fd732 2826 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54
TH
2827 return rc;
2828
1c3fae4d
TH
2829 return 1;
2830}
2831
452503f9
AC
2832/*
2833 * This mode timing computation functionality is ported over from
2834 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2835 */
2836/*
b352e57d 2837 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2838 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2839 * for UDMA6, which is currently supported only by Maxtor drives.
2840 *
2841 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2842 */
2843
2844static const struct ata_timing ata_timing[] = {
2845
2846 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2847 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2848 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2849 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2850
b352e57d
AC
2851 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2852 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
452503f9
AC
2853 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2854 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2855 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2856
2857/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2e9edbf8 2858
452503f9
AC
2859 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2860 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2861 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2e9edbf8 2862
452503f9
AC
2863 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2864 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2865 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2866
b352e57d
AC
2867 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2868 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
452503f9
AC
2869 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2870 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2871
2872 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2873 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2874 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2875
2876/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2877
2878 { 0xFF }
2879};
2880
2dcb407e
JG
2881#define ENOUGH(v, unit) (((v)-1)/(unit)+1)
2882#define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
452503f9
AC
2883
2884static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2885{
2886 q->setup = EZ(t->setup * 1000, T);
2887 q->act8b = EZ(t->act8b * 1000, T);
2888 q->rec8b = EZ(t->rec8b * 1000, T);
2889 q->cyc8b = EZ(t->cyc8b * 1000, T);
2890 q->active = EZ(t->active * 1000, T);
2891 q->recover = EZ(t->recover * 1000, T);
2892 q->cycle = EZ(t->cycle * 1000, T);
2893 q->udma = EZ(t->udma * 1000, UT);
2894}
2895
2896void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2897 struct ata_timing *m, unsigned int what)
2898{
2899 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2900 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2901 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2902 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2903 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2904 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2905 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2906 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2907}
2908
2dcb407e 2909static const struct ata_timing *ata_timing_find_mode(unsigned short speed)
452503f9
AC
2910{
2911 const struct ata_timing *t;
2912
2913 for (t = ata_timing; t->mode != speed; t++)
91190758 2914 if (t->mode == 0xFF)
452503f9 2915 return NULL;
2e9edbf8 2916 return t;
452503f9
AC
2917}
2918
2919int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2920 struct ata_timing *t, int T, int UT)
2921{
2922 const struct ata_timing *s;
2923 struct ata_timing p;
2924
2925 /*
2e9edbf8 2926 * Find the mode.
75b1f2f8 2927 */
452503f9
AC
2928
2929 if (!(s = ata_timing_find_mode(speed)))
2930 return -EINVAL;
2931
75b1f2f8
AL
2932 memcpy(t, s, sizeof(*s));
2933
452503f9
AC
2934 /*
2935 * If the drive is an EIDE drive, it can tell us it needs extended
2936 * PIO/MW_DMA cycle timing.
2937 */
2938
2939 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2940 memset(&p, 0, sizeof(p));
2dcb407e 2941 if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
452503f9
AC
2942 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2943 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2dcb407e 2944 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
452503f9
AC
2945 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2946 }
2947 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2948 }
2949
2950 /*
2951 * Convert the timing to bus clock counts.
2952 */
2953
75b1f2f8 2954 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2955
2956 /*
c893a3ae
RD
2957 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2958 * S.M.A.R.T * and some other commands. We have to ensure that the
2959 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2960 */
2961
fd3367af 2962 if (speed > XFER_PIO_6) {
452503f9
AC
2963 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2964 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2965 }
2966
2967 /*
c893a3ae 2968 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2969 */
2970
2971 if (t->act8b + t->rec8b < t->cyc8b) {
2972 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2973 t->rec8b = t->cyc8b - t->act8b;
2974 }
2975
2976 if (t->active + t->recover < t->cycle) {
2977 t->active += (t->cycle - (t->active + t->recover)) / 2;
2978 t->recover = t->cycle - t->active;
2979 }
a617c09f 2980
4f701d1e
AC
2981 /* In a few cases quantisation may produce enough errors to
2982 leave t->cycle too low for the sum of active and recovery
2983 if so we must correct this */
2984 if (t->active + t->recover > t->cycle)
2985 t->cycle = t->active + t->recover;
452503f9
AC
2986
2987 return 0;
2988}
2989
cf176e1a
TH
2990/**
2991 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 2992 * @dev: Device to adjust xfer masks
458337db 2993 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
2994 *
2995 * Adjust xfer masks of @dev downward. Note that this function
2996 * does not apply the change. Invoking ata_set_mode() afterwards
2997 * will apply the limit.
2998 *
2999 * LOCKING:
3000 * Inherited from caller.
3001 *
3002 * RETURNS:
3003 * 0 on success, negative errno on failure
3004 */
458337db 3005int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 3006{
458337db
TH
3007 char buf[32];
3008 unsigned int orig_mask, xfer_mask;
3009 unsigned int pio_mask, mwdma_mask, udma_mask;
3010 int quiet, highbit;
cf176e1a 3011
458337db
TH
3012 quiet = !!(sel & ATA_DNXFER_QUIET);
3013 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 3014
458337db
TH
3015 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
3016 dev->mwdma_mask,
3017 dev->udma_mask);
3018 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 3019
458337db
TH
3020 switch (sel) {
3021 case ATA_DNXFER_PIO:
3022 highbit = fls(pio_mask) - 1;
3023 pio_mask &= ~(1 << highbit);
3024 break;
3025
3026 case ATA_DNXFER_DMA:
3027 if (udma_mask) {
3028 highbit = fls(udma_mask) - 1;
3029 udma_mask &= ~(1 << highbit);
3030 if (!udma_mask)
3031 return -ENOENT;
3032 } else if (mwdma_mask) {
3033 highbit = fls(mwdma_mask) - 1;
3034 mwdma_mask &= ~(1 << highbit);
3035 if (!mwdma_mask)
3036 return -ENOENT;
3037 }
3038 break;
3039
3040 case ATA_DNXFER_40C:
3041 udma_mask &= ATA_UDMA_MASK_40C;
3042 break;
3043
3044 case ATA_DNXFER_FORCE_PIO0:
3045 pio_mask &= 1;
3046 case ATA_DNXFER_FORCE_PIO:
3047 mwdma_mask = 0;
3048 udma_mask = 0;
3049 break;
3050
458337db
TH
3051 default:
3052 BUG();
3053 }
3054
3055 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
3056
3057 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
3058 return -ENOENT;
3059
3060 if (!quiet) {
3061 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
3062 snprintf(buf, sizeof(buf), "%s:%s",
3063 ata_mode_string(xfer_mask),
3064 ata_mode_string(xfer_mask & ATA_MASK_PIO));
3065 else
3066 snprintf(buf, sizeof(buf), "%s",
3067 ata_mode_string(xfer_mask));
3068
3069 ata_dev_printk(dev, KERN_WARNING,
3070 "limiting speed to %s\n", buf);
3071 }
cf176e1a
TH
3072
3073 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
3074 &dev->udma_mask);
3075
cf176e1a 3076 return 0;
cf176e1a
TH
3077}
3078
3373efd8 3079static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 3080{
9af5c9c9 3081 struct ata_eh_context *ehc = &dev->link->eh_context;
83206a29
TH
3082 unsigned int err_mask;
3083 int rc;
1da177e4 3084
e8384607 3085 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
3086 if (dev->xfer_shift == ATA_SHIFT_PIO)
3087 dev->flags |= ATA_DFLAG_PIO;
3088
3373efd8 3089 err_mask = ata_dev_set_xfermode(dev);
2dcb407e 3090
11750a40
AC
3091 /* Old CFA may refuse this command, which is just fine */
3092 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2dcb407e
JG
3093 err_mask &= ~AC_ERR_DEV;
3094
0bc2a79a
AC
3095 /* Some very old devices and some bad newer ones fail any kind of
3096 SET_XFERMODE request but support PIO0-2 timings and no IORDY */
3097 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
3098 dev->pio_mode <= XFER_PIO_2)
3099 err_mask &= ~AC_ERR_DEV;
2dcb407e 3100
3acaf94b
AC
3101 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3102 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3103 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3104 dev->dma_mode == XFER_MW_DMA_0 &&
3105 (dev->id[63] >> 8) & 1)
3106 err_mask &= ~AC_ERR_DEV;
3107
83206a29 3108 if (err_mask) {
f15a1daf
TH
3109 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3110 "(err_mask=0x%x)\n", err_mask);
83206a29
TH
3111 return -EIO;
3112 }
1da177e4 3113
baa1e78a 3114 ehc->i.flags |= ATA_EHI_POST_SETMODE;
422c9daa 3115 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
baa1e78a 3116 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
5eb45c02 3117 if (rc)
83206a29 3118 return rc;
48a8a14f 3119
23e71c3d
TH
3120 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3121 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 3122
f15a1daf
TH
3123 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
3124 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
83206a29 3125 return 0;
1da177e4
LT
3126}
3127
1da177e4 3128/**
04351821 3129 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 3130 * @link: link on which timings will be programmed
e82cbdb9 3131 * @r_failed_dev: out paramter for failed device
1da177e4 3132 *
04351821
AC
3133 * Standard implementation of the function used to tune and set
3134 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3135 * ata_dev_set_mode() fails, pointer to the failing device is
e82cbdb9 3136 * returned in @r_failed_dev.
780a87f7 3137 *
1da177e4 3138 * LOCKING:
0cba632b 3139 * PCI/etc. bus probe sem.
e82cbdb9
TH
3140 *
3141 * RETURNS:
3142 * 0 on success, negative errno otherwise
1da177e4 3143 */
04351821 3144
0260731f 3145int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
1da177e4 3146{
0260731f 3147 struct ata_port *ap = link->ap;
e8e0619f 3148 struct ata_device *dev;
f58229f8 3149 int rc = 0, used_dma = 0, found = 0;
3adcebb2 3150
a6d5a51c 3151 /* step 1: calculate xfer_mask */
f58229f8 3152 ata_link_for_each_dev(dev, link) {
acf356b1 3153 unsigned int pio_mask, dma_mask;
b3a70601 3154 unsigned int mode_mask;
a6d5a51c 3155
e1211e3f 3156 if (!ata_dev_enabled(dev))
a6d5a51c
TH
3157 continue;
3158
b3a70601
AC
3159 mode_mask = ATA_DMA_MASK_ATA;
3160 if (dev->class == ATA_DEV_ATAPI)
3161 mode_mask = ATA_DMA_MASK_ATAPI;
3162 else if (ata_id_is_cfa(dev->id))
3163 mode_mask = ATA_DMA_MASK_CFA;
3164
3373efd8 3165 ata_dev_xfermask(dev);
1da177e4 3166
acf356b1
TH
3167 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3168 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
b3a70601
AC
3169
3170 if (libata_dma_mask & mode_mask)
3171 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3172 else
3173 dma_mask = 0;
3174
acf356b1
TH
3175 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3176 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 3177
4f65977d 3178 found = 1;
5444a6f4
AC
3179 if (dev->dma_mode)
3180 used_dma = 1;
a6d5a51c 3181 }
4f65977d 3182 if (!found)
e82cbdb9 3183 goto out;
a6d5a51c
TH
3184
3185 /* step 2: always set host PIO timings */
f58229f8 3186 ata_link_for_each_dev(dev, link) {
e8e0619f
TH
3187 if (!ata_dev_enabled(dev))
3188 continue;
3189
3190 if (!dev->pio_mode) {
f15a1daf 3191 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 3192 rc = -EINVAL;
e82cbdb9 3193 goto out;
e8e0619f
TH
3194 }
3195
3196 dev->xfer_mode = dev->pio_mode;
3197 dev->xfer_shift = ATA_SHIFT_PIO;
3198 if (ap->ops->set_piomode)
3199 ap->ops->set_piomode(ap, dev);
3200 }
1da177e4 3201
a6d5a51c 3202 /* step 3: set host DMA timings */
f58229f8 3203 ata_link_for_each_dev(dev, link) {
e8e0619f
TH
3204 if (!ata_dev_enabled(dev) || !dev->dma_mode)
3205 continue;
3206
3207 dev->xfer_mode = dev->dma_mode;
3208 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3209 if (ap->ops->set_dmamode)
3210 ap->ops->set_dmamode(ap, dev);
3211 }
1da177e4
LT
3212
3213 /* step 4: update devices' xfer mode */
f58229f8 3214 ata_link_for_each_dev(dev, link) {
18d90deb 3215 /* don't update suspended devices' xfer mode */
9666f400 3216 if (!ata_dev_enabled(dev))
83206a29
TH
3217 continue;
3218
3373efd8 3219 rc = ata_dev_set_mode(dev);
5bbc53f4 3220 if (rc)
e82cbdb9 3221 goto out;
83206a29 3222 }
1da177e4 3223
e8e0619f
TH
3224 /* Record simplex status. If we selected DMA then the other
3225 * host channels are not permitted to do so.
5444a6f4 3226 */
cca3974e 3227 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 3228 ap->host->simplex_claimed = ap;
5444a6f4 3229
e82cbdb9
TH
3230 out:
3231 if (rc)
3232 *r_failed_dev = dev;
3233 return rc;
1da177e4
LT
3234}
3235
04351821
AC
3236/**
3237 * ata_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 3238 * @link: link on which timings will be programmed
04351821
AC
3239 * @r_failed_dev: out paramter for failed device
3240 *
3241 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3242 * ata_set_mode() fails, pointer to the failing device is
3243 * returned in @r_failed_dev.
3244 *
3245 * LOCKING:
3246 * PCI/etc. bus probe sem.
3247 *
3248 * RETURNS:
3249 * 0 on success, negative errno otherwise
3250 */
0260731f 3251int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
04351821 3252{
0260731f
TH
3253 struct ata_port *ap = link->ap;
3254
04351821
AC
3255 /* has private set_mode? */
3256 if (ap->ops->set_mode)
0260731f
TH
3257 return ap->ops->set_mode(link, r_failed_dev);
3258 return ata_do_set_mode(link, r_failed_dev);
04351821
AC
3259}
3260
1fdffbce
JG
3261/**
3262 * ata_tf_to_host - issue ATA taskfile to host controller
3263 * @ap: port to which command is being issued
3264 * @tf: ATA taskfile register set
3265 *
3266 * Issues ATA taskfile register set to ATA host controller,
3267 * with proper synchronization with interrupt handler and
3268 * other threads.
3269 *
3270 * LOCKING:
cca3974e 3271 * spin_lock_irqsave(host lock)
1fdffbce
JG
3272 */
3273
3274static inline void ata_tf_to_host(struct ata_port *ap,
3275 const struct ata_taskfile *tf)
3276{
3277 ap->ops->tf_load(ap, tf);
3278 ap->ops->exec_command(ap, tf);
3279}
3280
1da177e4
LT
3281/**
3282 * ata_busy_sleep - sleep until BSY clears, or timeout
3283 * @ap: port containing status register to be polled
3284 * @tmout_pat: impatience timeout
3285 * @tmout: overall timeout
3286 *
780a87f7
JG
3287 * Sleep until ATA Status register bit BSY clears,
3288 * or a timeout occurs.
3289 *
d1adc1bb
TH
3290 * LOCKING:
3291 * Kernel thread context (may sleep).
3292 *
3293 * RETURNS:
3294 * 0 on success, -errno otherwise.
1da177e4 3295 */
d1adc1bb
TH
3296int ata_busy_sleep(struct ata_port *ap,
3297 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
3298{
3299 unsigned long timer_start, timeout;
3300 u8 status;
3301
3302 status = ata_busy_wait(ap, ATA_BUSY, 300);
3303 timer_start = jiffies;
3304 timeout = timer_start + tmout_pat;
d1adc1bb
TH
3305 while (status != 0xff && (status & ATA_BUSY) &&
3306 time_before(jiffies, timeout)) {
1da177e4
LT
3307 msleep(50);
3308 status = ata_busy_wait(ap, ATA_BUSY, 3);
3309 }
3310
d1adc1bb 3311 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 3312 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
3313 "port is slow to respond, please be patient "
3314 "(Status 0x%x)\n", status);
1da177e4
LT
3315
3316 timeout = timer_start + tmout;
d1adc1bb
TH
3317 while (status != 0xff && (status & ATA_BUSY) &&
3318 time_before(jiffies, timeout)) {
1da177e4
LT
3319 msleep(50);
3320 status = ata_chk_status(ap);
3321 }
3322
d1adc1bb
TH
3323 if (status == 0xff)
3324 return -ENODEV;
3325
1da177e4 3326 if (status & ATA_BUSY) {
f15a1daf 3327 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
3328 "(%lu secs, Status 0x%x)\n",
3329 tmout / HZ, status);
d1adc1bb 3330 return -EBUSY;
1da177e4
LT
3331 }
3332
3333 return 0;
3334}
3335
88ff6eaf
TH
3336/**
3337 * ata_wait_after_reset - wait before checking status after reset
3338 * @ap: port containing status register to be polled
3339 * @deadline: deadline jiffies for the operation
3340 *
3341 * After reset, we need to pause a while before reading status.
3342 * Also, certain combination of controller and device report 0xff
3343 * for some duration (e.g. until SATA PHY is up and running)
3344 * which is interpreted as empty port in ATA world. This
3345 * function also waits for such devices to get out of 0xff
3346 * status.
3347 *
3348 * LOCKING:
3349 * Kernel thread context (may sleep).
3350 */
3351void ata_wait_after_reset(struct ata_port *ap, unsigned long deadline)
3352{
3353 unsigned long until = jiffies + ATA_TMOUT_FF_WAIT;
3354
3355 if (time_before(until, deadline))
3356 deadline = until;
3357
3358 /* Spec mandates ">= 2ms" before checking status. We wait
3359 * 150ms, because that was the magic delay used for ATAPI
3360 * devices in Hale Landis's ATADRVR, for the period of time
3361 * between when the ATA command register is written, and then
3362 * status is checked. Because waiting for "a while" before
3363 * checking status is fine, post SRST, we perform this magic
3364 * delay here as well.
3365 *
3366 * Old drivers/ide uses the 2mS rule and then waits for ready.
3367 */
3368 msleep(150);
3369
3370 /* Wait for 0xff to clear. Some SATA devices take a long time
3371 * to clear 0xff after reset. For example, HHD424020F7SV00
3372 * iVDR needs >= 800ms while. Quantum GoVault needs even more
3373 * than that.
3374 */
3375 while (1) {
3376 u8 status = ata_chk_status(ap);
3377
3378 if (status != 0xff || time_after(jiffies, deadline))
3379 return;
3380
3381 msleep(50);
3382 }
3383}
3384
d4b2bab4
TH
3385/**
3386 * ata_wait_ready - sleep until BSY clears, or timeout
3387 * @ap: port containing status register to be polled
3388 * @deadline: deadline jiffies for the operation
3389 *
3390 * Sleep until ATA Status register bit BSY clears, or timeout
3391 * occurs.
3392 *
3393 * LOCKING:
3394 * Kernel thread context (may sleep).
3395 *
3396 * RETURNS:
3397 * 0 on success, -errno otherwise.
3398 */
3399int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3400{
3401 unsigned long start = jiffies;
3402 int warned = 0;
3403
3404 while (1) {
3405 u8 status = ata_chk_status(ap);
3406 unsigned long now = jiffies;
3407
3408 if (!(status & ATA_BUSY))
3409 return 0;
936fd732 3410 if (!ata_link_online(&ap->link) && status == 0xff)
d4b2bab4
TH
3411 return -ENODEV;
3412 if (time_after(now, deadline))
3413 return -EBUSY;
3414
3415 if (!warned && time_after(now, start + 5 * HZ) &&
3416 (deadline - now > 3 * HZ)) {
3417 ata_port_printk(ap, KERN_WARNING,
3418 "port is slow to respond, please be patient "
3419 "(Status 0x%x)\n", status);
3420 warned = 1;
3421 }
3422
3423 msleep(50);
3424 }
3425}
3426
3427static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3428 unsigned long deadline)
1da177e4
LT
3429{
3430 struct ata_ioports *ioaddr = &ap->ioaddr;
3431 unsigned int dev0 = devmask & (1 << 0);
3432 unsigned int dev1 = devmask & (1 << 1);
9b89391c 3433 int rc, ret = 0;
1da177e4
LT
3434
3435 /* if device 0 was found in ata_devchk, wait for its
3436 * BSY bit to clear
3437 */
d4b2bab4
TH
3438 if (dev0) {
3439 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3440 if (rc) {
3441 if (rc != -ENODEV)
3442 return rc;
3443 ret = rc;
3444 }
d4b2bab4 3445 }
1da177e4 3446
e141d999
TH
3447 /* if device 1 was found in ata_devchk, wait for register
3448 * access briefly, then wait for BSY to clear.
1da177e4 3449 */
e141d999
TH
3450 if (dev1) {
3451 int i;
1da177e4
LT
3452
3453 ap->ops->dev_select(ap, 1);
e141d999
TH
3454
3455 /* Wait for register access. Some ATAPI devices fail
3456 * to set nsect/lbal after reset, so don't waste too
3457 * much time on it. We're gonna wait for !BSY anyway.
3458 */
3459 for (i = 0; i < 2; i++) {
3460 u8 nsect, lbal;
3461
3462 nsect = ioread8(ioaddr->nsect_addr);
3463 lbal = ioread8(ioaddr->lbal_addr);
3464 if ((nsect == 1) && (lbal == 1))
3465 break;
3466 msleep(50); /* give drive a breather */
3467 }
3468
d4b2bab4 3469 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3470 if (rc) {
3471 if (rc != -ENODEV)
3472 return rc;
3473 ret = rc;
3474 }
d4b2bab4 3475 }
1da177e4
LT
3476
3477 /* is all this really necessary? */
3478 ap->ops->dev_select(ap, 0);
3479 if (dev1)
3480 ap->ops->dev_select(ap, 1);
3481 if (dev0)
3482 ap->ops->dev_select(ap, 0);
d4b2bab4 3483
9b89391c 3484 return ret;
1da177e4
LT
3485}
3486
d4b2bab4
TH
3487static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3488 unsigned long deadline)
1da177e4
LT
3489{
3490 struct ata_ioports *ioaddr = &ap->ioaddr;
3491
44877b4e 3492 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
1da177e4
LT
3493
3494 /* software reset. causes dev0 to be selected */
0d5ff566
TH
3495 iowrite8(ap->ctl, ioaddr->ctl_addr);
3496 udelay(20); /* FIXME: flush */
3497 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3498 udelay(20); /* FIXME: flush */
3499 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4 3500
88ff6eaf
TH
3501 /* wait a while before checking status */
3502 ata_wait_after_reset(ap, deadline);
1da177e4 3503
2e9edbf8 3504 /* Before we perform post reset processing we want to see if
298a41ca
TH
3505 * the bus shows 0xFF because the odd clown forgets the D7
3506 * pulldown resistor.
3507 */
150981b0 3508 if (ata_chk_status(ap) == 0xFF)
9b89391c 3509 return -ENODEV;
09c7ad79 3510
d4b2bab4 3511 return ata_bus_post_reset(ap, devmask, deadline);
1da177e4
LT
3512}
3513
3514/**
3515 * ata_bus_reset - reset host port and associated ATA channel
3516 * @ap: port to reset
3517 *
3518 * This is typically the first time we actually start issuing
3519 * commands to the ATA channel. We wait for BSY to clear, then
3520 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3521 * result. Determine what devices, if any, are on the channel
3522 * by looking at the device 0/1 error register. Look at the signature
3523 * stored in each device's taskfile registers, to determine if
3524 * the device is ATA or ATAPI.
3525 *
3526 * LOCKING:
0cba632b 3527 * PCI/etc. bus probe sem.
cca3974e 3528 * Obtains host lock.
1da177e4
LT
3529 *
3530 * SIDE EFFECTS:
198e0fed 3531 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
3532 */
3533
3534void ata_bus_reset(struct ata_port *ap)
3535{
9af5c9c9 3536 struct ata_device *device = ap->link.device;
1da177e4
LT
3537 struct ata_ioports *ioaddr = &ap->ioaddr;
3538 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3539 u8 err;
aec5c3c1 3540 unsigned int dev0, dev1 = 0, devmask = 0;
9b89391c 3541 int rc;
1da177e4 3542
44877b4e 3543 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
1da177e4
LT
3544
3545 /* determine if device 0/1 are present */
3546 if (ap->flags & ATA_FLAG_SATA_RESET)
3547 dev0 = 1;
3548 else {
3549 dev0 = ata_devchk(ap, 0);
3550 if (slave_possible)
3551 dev1 = ata_devchk(ap, 1);
3552 }
3553
3554 if (dev0)
3555 devmask |= (1 << 0);
3556 if (dev1)
3557 devmask |= (1 << 1);
3558
3559 /* select device 0 again */
3560 ap->ops->dev_select(ap, 0);
3561
3562 /* issue bus reset */
9b89391c
TH
3563 if (ap->flags & ATA_FLAG_SRST) {
3564 rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
3565 if (rc && rc != -ENODEV)
aec5c3c1 3566 goto err_out;
9b89391c 3567 }
1da177e4
LT
3568
3569 /*
3570 * determine by signature whether we have ATA or ATAPI devices
3571 */
3f19859e 3572 device[0].class = ata_dev_try_classify(&device[0], dev0, &err);
1da177e4 3573 if ((slave_possible) && (err != 0x81))
3f19859e 3574 device[1].class = ata_dev_try_classify(&device[1], dev1, &err);
1da177e4 3575
1da177e4 3576 /* is double-select really necessary? */
9af5c9c9 3577 if (device[1].class != ATA_DEV_NONE)
1da177e4 3578 ap->ops->dev_select(ap, 1);
9af5c9c9 3579 if (device[0].class != ATA_DEV_NONE)
1da177e4
LT
3580 ap->ops->dev_select(ap, 0);
3581
3582 /* if no devices were detected, disable this port */
9af5c9c9
TH
3583 if ((device[0].class == ATA_DEV_NONE) &&
3584 (device[1].class == ATA_DEV_NONE))
1da177e4
LT
3585 goto err_out;
3586
3587 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3588 /* set up device control for ATA_FLAG_SATA_RESET */
0d5ff566 3589 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3590 }
3591
3592 DPRINTK("EXIT\n");
3593 return;
3594
3595err_out:
f15a1daf 3596 ata_port_printk(ap, KERN_ERR, "disabling port\n");
ac8869d5 3597 ata_port_disable(ap);
1da177e4
LT
3598
3599 DPRINTK("EXIT\n");
3600}
3601
d7bb4cc7 3602/**
936fd732
TH
3603 * sata_link_debounce - debounce SATA phy status
3604 * @link: ATA link to debounce SATA phy status for
d7bb4cc7 3605 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3606 * @deadline: deadline jiffies for the operation
d7bb4cc7 3607 *
936fd732 3608* Make sure SStatus of @link reaches stable state, determined by
d7bb4cc7
TH
3609 * holding the same value where DET is not 1 for @duration polled
3610 * every @interval, before @timeout. Timeout constraints the
d4b2bab4
TH
3611 * beginning of the stable state. Because DET gets stuck at 1 on
3612 * some controllers after hot unplugging, this functions waits
d7bb4cc7
TH
3613 * until timeout then returns 0 if DET is stable at 1.
3614 *
d4b2bab4
TH
3615 * @timeout is further limited by @deadline. The sooner of the
3616 * two is used.
3617 *
d7bb4cc7
TH
3618 * LOCKING:
3619 * Kernel thread context (may sleep)
3620 *
3621 * RETURNS:
3622 * 0 on success, -errno on failure.
3623 */
936fd732
TH
3624int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3625 unsigned long deadline)
7a7921e8 3626{
d7bb4cc7 3627 unsigned long interval_msec = params[0];
d4b2bab4
TH
3628 unsigned long duration = msecs_to_jiffies(params[1]);
3629 unsigned long last_jiffies, t;
d7bb4cc7
TH
3630 u32 last, cur;
3631 int rc;
3632
d4b2bab4
TH
3633 t = jiffies + msecs_to_jiffies(params[2]);
3634 if (time_before(t, deadline))
3635 deadline = t;
3636
936fd732 3637 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3638 return rc;
3639 cur &= 0xf;
3640
3641 last = cur;
3642 last_jiffies = jiffies;
3643
3644 while (1) {
3645 msleep(interval_msec);
936fd732 3646 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3647 return rc;
3648 cur &= 0xf;
3649
3650 /* DET stable? */
3651 if (cur == last) {
d4b2bab4 3652 if (cur == 1 && time_before(jiffies, deadline))
d7bb4cc7
TH
3653 continue;
3654 if (time_after(jiffies, last_jiffies + duration))
3655 return 0;
3656 continue;
3657 }
3658
3659 /* unstable, start over */
3660 last = cur;
3661 last_jiffies = jiffies;
3662
f1545154
TH
3663 /* Check deadline. If debouncing failed, return
3664 * -EPIPE to tell upper layer to lower link speed.
3665 */
d4b2bab4 3666 if (time_after(jiffies, deadline))
f1545154 3667 return -EPIPE;
d7bb4cc7
TH
3668 }
3669}
3670
3671/**
936fd732
TH
3672 * sata_link_resume - resume SATA link
3673 * @link: ATA link to resume SATA
d7bb4cc7 3674 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3675 * @deadline: deadline jiffies for the operation
d7bb4cc7 3676 *
936fd732 3677 * Resume SATA phy @link and debounce it.
d7bb4cc7
TH
3678 *
3679 * LOCKING:
3680 * Kernel thread context (may sleep)
3681 *
3682 * RETURNS:
3683 * 0 on success, -errno on failure.
3684 */
936fd732
TH
3685int sata_link_resume(struct ata_link *link, const unsigned long *params,
3686 unsigned long deadline)
d7bb4cc7
TH
3687{
3688 u32 scontrol;
81952c54
TH
3689 int rc;
3690
936fd732 3691 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 3692 return rc;
7a7921e8 3693
852ee16a 3694 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54 3695
936fd732 3696 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54 3697 return rc;
7a7921e8 3698
d7bb4cc7
TH
3699 /* Some PHYs react badly if SStatus is pounded immediately
3700 * after resuming. Delay 200ms before debouncing.
3701 */
3702 msleep(200);
7a7921e8 3703
936fd732 3704 return sata_link_debounce(link, params, deadline);
7a7921e8
TH
3705}
3706
f5914a46
TH
3707/**
3708 * ata_std_prereset - prepare for reset
cc0680a5 3709 * @link: ATA link to be reset
d4b2bab4 3710 * @deadline: deadline jiffies for the operation
f5914a46 3711 *
cc0680a5 3712 * @link is about to be reset. Initialize it. Failure from
b8cffc6a
TH
3713 * prereset makes libata abort whole reset sequence and give up
3714 * that port, so prereset should be best-effort. It does its
3715 * best to prepare for reset sequence but if things go wrong, it
3716 * should just whine, not fail.
f5914a46
TH
3717 *
3718 * LOCKING:
3719 * Kernel thread context (may sleep)
3720 *
3721 * RETURNS:
3722 * 0 on success, -errno otherwise.
3723 */
cc0680a5 3724int ata_std_prereset(struct ata_link *link, unsigned long deadline)
f5914a46 3725{
cc0680a5 3726 struct ata_port *ap = link->ap;
936fd732 3727 struct ata_eh_context *ehc = &link->eh_context;
e9c83914 3728 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
3729 int rc;
3730
31daabda 3731 /* handle link resume */
28324304 3732 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
0c88758b 3733 (link->flags & ATA_LFLAG_HRST_TO_RESUME))
28324304
TH
3734 ehc->i.action |= ATA_EH_HARDRESET;
3735
633273a3
TH
3736 /* Some PMPs don't work with only SRST, force hardreset if PMP
3737 * is supported.
3738 */
3739 if (ap->flags & ATA_FLAG_PMP)
3740 ehc->i.action |= ATA_EH_HARDRESET;
3741
f5914a46
TH
3742 /* if we're about to do hardreset, nothing more to do */
3743 if (ehc->i.action & ATA_EH_HARDRESET)
3744 return 0;
3745
936fd732 3746 /* if SATA, resume link */
a16abc0b 3747 if (ap->flags & ATA_FLAG_SATA) {
936fd732 3748 rc = sata_link_resume(link, timing, deadline);
b8cffc6a
TH
3749 /* whine about phy resume failure but proceed */
3750 if (rc && rc != -EOPNOTSUPP)
cc0680a5 3751 ata_link_printk(link, KERN_WARNING, "failed to resume "
f5914a46 3752 "link for reset (errno=%d)\n", rc);
f5914a46
TH
3753 }
3754
3755 /* Wait for !BSY if the controller can wait for the first D2H
3756 * Reg FIS and we don't know that no device is attached.
3757 */
0c88758b 3758 if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) {
b8cffc6a 3759 rc = ata_wait_ready(ap, deadline);
6dffaf61 3760 if (rc && rc != -ENODEV) {
cc0680a5 3761 ata_link_printk(link, KERN_WARNING, "device not ready "
b8cffc6a
TH
3762 "(errno=%d), forcing hardreset\n", rc);
3763 ehc->i.action |= ATA_EH_HARDRESET;
3764 }
3765 }
f5914a46
TH
3766
3767 return 0;
3768}
3769
c2bd5804
TH
3770/**
3771 * ata_std_softreset - reset host port via ATA SRST
cc0680a5 3772 * @link: ATA link to reset
c2bd5804 3773 * @classes: resulting classes of attached devices
d4b2bab4 3774 * @deadline: deadline jiffies for the operation
c2bd5804 3775 *
52783c5d 3776 * Reset host port using ATA SRST.
c2bd5804
TH
3777 *
3778 * LOCKING:
3779 * Kernel thread context (may sleep)
3780 *
3781 * RETURNS:
3782 * 0 on success, -errno otherwise.
3783 */
cc0680a5 3784int ata_std_softreset(struct ata_link *link, unsigned int *classes,
d4b2bab4 3785 unsigned long deadline)
c2bd5804 3786{
cc0680a5 3787 struct ata_port *ap = link->ap;
c2bd5804 3788 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
d4b2bab4
TH
3789 unsigned int devmask = 0;
3790 int rc;
c2bd5804
TH
3791 u8 err;
3792
3793 DPRINTK("ENTER\n");
3794
936fd732 3795 if (ata_link_offline(link)) {
3a39746a
TH
3796 classes[0] = ATA_DEV_NONE;
3797 goto out;
3798 }
3799
c2bd5804
TH
3800 /* determine if device 0/1 are present */
3801 if (ata_devchk(ap, 0))
3802 devmask |= (1 << 0);
3803 if (slave_possible && ata_devchk(ap, 1))
3804 devmask |= (1 << 1);
3805
c2bd5804
TH
3806 /* select device 0 again */
3807 ap->ops->dev_select(ap, 0);
3808
3809 /* issue bus reset */
3810 DPRINTK("about to softreset, devmask=%x\n", devmask);
d4b2bab4 3811 rc = ata_bus_softreset(ap, devmask, deadline);
9b89391c 3812 /* if link is occupied, -ENODEV too is an error */
936fd732 3813 if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
cc0680a5 3814 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
d4b2bab4 3815 return rc;
c2bd5804
TH
3816 }
3817
3818 /* determine by signature whether we have ATA or ATAPI devices */
3f19859e
TH
3819 classes[0] = ata_dev_try_classify(&link->device[0],
3820 devmask & (1 << 0), &err);
c2bd5804 3821 if (slave_possible && err != 0x81)
3f19859e
TH
3822 classes[1] = ata_dev_try_classify(&link->device[1],
3823 devmask & (1 << 1), &err);
c2bd5804 3824
3a39746a 3825 out:
c2bd5804
TH
3826 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3827 return 0;
3828}
3829
3830/**
cc0680a5
TH
3831 * sata_link_hardreset - reset link via SATA phy reset
3832 * @link: link to reset
b6103f6d 3833 * @timing: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3834 * @deadline: deadline jiffies for the operation
c2bd5804 3835 *
cc0680a5 3836 * SATA phy-reset @link using DET bits of SControl register.
c2bd5804
TH
3837 *
3838 * LOCKING:
3839 * Kernel thread context (may sleep)
3840 *
3841 * RETURNS:
3842 * 0 on success, -errno otherwise.
3843 */
cc0680a5 3844int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
d4b2bab4 3845 unsigned long deadline)
c2bd5804 3846{
852ee16a 3847 u32 scontrol;
81952c54 3848 int rc;
852ee16a 3849
c2bd5804
TH
3850 DPRINTK("ENTER\n");
3851
936fd732 3852 if (sata_set_spd_needed(link)) {
1c3fae4d
TH
3853 /* SATA spec says nothing about how to reconfigure
3854 * spd. To be on the safe side, turn off phy during
3855 * reconfiguration. This works for at least ICH7 AHCI
3856 * and Sil3124.
3857 */
936fd732 3858 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3859 goto out;
81952c54 3860
a34b6fc0 3861 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54 3862
936fd732 3863 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
b6103f6d 3864 goto out;
1c3fae4d 3865
936fd732 3866 sata_set_spd(link);
1c3fae4d
TH
3867 }
3868
3869 /* issue phy wake/reset */
936fd732 3870 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3871 goto out;
81952c54 3872
852ee16a 3873 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54 3874
936fd732 3875 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
b6103f6d 3876 goto out;
c2bd5804 3877
1c3fae4d 3878 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
3879 * 10.4.2 says at least 1 ms.
3880 */
3881 msleep(1);
3882
936fd732
TH
3883 /* bring link back */
3884 rc = sata_link_resume(link, timing, deadline);
b6103f6d
TH
3885 out:
3886 DPRINTK("EXIT, rc=%d\n", rc);
3887 return rc;
3888}
3889
3890/**
3891 * sata_std_hardreset - reset host port via SATA phy reset
cc0680a5 3892 * @link: link to reset
b6103f6d 3893 * @class: resulting class of attached device
d4b2bab4 3894 * @deadline: deadline jiffies for the operation
b6103f6d
TH
3895 *
3896 * SATA phy-reset host port using DET bits of SControl register,
3897 * wait for !BSY and classify the attached device.
3898 *
3899 * LOCKING:
3900 * Kernel thread context (may sleep)
3901 *
3902 * RETURNS:
3903 * 0 on success, -errno otherwise.
3904 */
cc0680a5 3905int sata_std_hardreset(struct ata_link *link, unsigned int *class,
d4b2bab4 3906 unsigned long deadline)
b6103f6d 3907{
cc0680a5 3908 struct ata_port *ap = link->ap;
936fd732 3909 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
b6103f6d
TH
3910 int rc;
3911
3912 DPRINTK("ENTER\n");
3913
3914 /* do hardreset */
cc0680a5 3915 rc = sata_link_hardreset(link, timing, deadline);
b6103f6d 3916 if (rc) {
cc0680a5 3917 ata_link_printk(link, KERN_ERR,
b6103f6d
TH
3918 "COMRESET failed (errno=%d)\n", rc);
3919 return rc;
3920 }
c2bd5804 3921
c2bd5804 3922 /* TODO: phy layer with polling, timeouts, etc. */
936fd732 3923 if (ata_link_offline(link)) {
c2bd5804
TH
3924 *class = ATA_DEV_NONE;
3925 DPRINTK("EXIT, link offline\n");
3926 return 0;
3927 }
3928
88ff6eaf
TH
3929 /* wait a while before checking status */
3930 ata_wait_after_reset(ap, deadline);
34fee227 3931
633273a3
TH
3932 /* If PMP is supported, we have to do follow-up SRST. Note
3933 * that some PMPs don't send D2H Reg FIS after hardreset at
3934 * all if the first port is empty. Wait for it just for a
3935 * second and request follow-up SRST.
3936 */
3937 if (ap->flags & ATA_FLAG_PMP) {
3938 ata_wait_ready(ap, jiffies + HZ);
3939 return -EAGAIN;
3940 }
3941
d4b2bab4 3942 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3943 /* link occupied, -ENODEV too is an error */
3944 if (rc) {
cc0680a5 3945 ata_link_printk(link, KERN_ERR,
d4b2bab4
TH
3946 "COMRESET failed (errno=%d)\n", rc);
3947 return rc;
c2bd5804
TH
3948 }
3949
3a39746a
TH
3950 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3951
3f19859e 3952 *class = ata_dev_try_classify(link->device, 1, NULL);
c2bd5804
TH
3953
3954 DPRINTK("EXIT, class=%u\n", *class);
3955 return 0;
3956}
3957
3958/**
3959 * ata_std_postreset - standard postreset callback
cc0680a5 3960 * @link: the target ata_link
c2bd5804
TH
3961 * @classes: classes of attached devices
3962 *
3963 * This function is invoked after a successful reset. Note that
3964 * the device might have been reset more than once using
3965 * different reset methods before postreset is invoked.
c2bd5804 3966 *
c2bd5804
TH
3967 * LOCKING:
3968 * Kernel thread context (may sleep)
3969 */
cc0680a5 3970void ata_std_postreset(struct ata_link *link, unsigned int *classes)
c2bd5804 3971{
cc0680a5 3972 struct ata_port *ap = link->ap;
dc2b3515
TH
3973 u32 serror;
3974
c2bd5804
TH
3975 DPRINTK("ENTER\n");
3976
c2bd5804 3977 /* print link status */
936fd732 3978 sata_print_link_status(link);
c2bd5804 3979
dc2b3515 3980 /* clear SError */
936fd732
TH
3981 if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
3982 sata_scr_write(link, SCR_ERROR, serror);
dc2b3515 3983
c2bd5804
TH
3984 /* is double-select really necessary? */
3985 if (classes[0] != ATA_DEV_NONE)
3986 ap->ops->dev_select(ap, 1);
3987 if (classes[1] != ATA_DEV_NONE)
3988 ap->ops->dev_select(ap, 0);
3989
3a39746a
TH
3990 /* bail out if no device is present */
3991 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3992 DPRINTK("EXIT, no device\n");
3993 return;
3994 }
3995
3996 /* set up device control */
0d5ff566
TH
3997 if (ap->ioaddr.ctl_addr)
3998 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
c2bd5804
TH
3999
4000 DPRINTK("EXIT\n");
4001}
4002
623a3128
TH
4003/**
4004 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
4005 * @dev: device to compare against
4006 * @new_class: class of the new device
4007 * @new_id: IDENTIFY page of the new device
4008 *
4009 * Compare @new_class and @new_id against @dev and determine
4010 * whether @dev is the device indicated by @new_class and
4011 * @new_id.
4012 *
4013 * LOCKING:
4014 * None.
4015 *
4016 * RETURNS:
4017 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
4018 */
3373efd8
TH
4019static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
4020 const u16 *new_id)
623a3128
TH
4021{
4022 const u16 *old_id = dev->id;
a0cf733b
TH
4023 unsigned char model[2][ATA_ID_PROD_LEN + 1];
4024 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
4025
4026 if (dev->class != new_class) {
f15a1daf
TH
4027 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
4028 dev->class, new_class);
623a3128
TH
4029 return 0;
4030 }
4031
a0cf733b
TH
4032 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
4033 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
4034 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
4035 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
4036
4037 if (strcmp(model[0], model[1])) {
f15a1daf
TH
4038 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
4039 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
4040 return 0;
4041 }
4042
4043 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
4044 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
4045 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
4046 return 0;
4047 }
4048
623a3128
TH
4049 return 1;
4050}
4051
4052/**
fe30911b 4053 * ata_dev_reread_id - Re-read IDENTIFY data
3fae450c 4054 * @dev: target ATA device
bff04647 4055 * @readid_flags: read ID flags
623a3128
TH
4056 *
4057 * Re-read IDENTIFY page and make sure @dev is still attached to
4058 * the port.
4059 *
4060 * LOCKING:
4061 * Kernel thread context (may sleep)
4062 *
4063 * RETURNS:
4064 * 0 on success, negative errno otherwise
4065 */
fe30911b 4066int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
623a3128 4067{
5eb45c02 4068 unsigned int class = dev->class;
9af5c9c9 4069 u16 *id = (void *)dev->link->ap->sector_buf;
623a3128
TH
4070 int rc;
4071
fe635c7e 4072 /* read ID data */
bff04647 4073 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128 4074 if (rc)
fe30911b 4075 return rc;
623a3128
TH
4076
4077 /* is the device still there? */
fe30911b
TH
4078 if (!ata_dev_same_device(dev, class, id))
4079 return -ENODEV;
623a3128 4080
fe635c7e 4081 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
fe30911b
TH
4082 return 0;
4083}
4084
4085/**
4086 * ata_dev_revalidate - Revalidate ATA device
4087 * @dev: device to revalidate
422c9daa 4088 * @new_class: new class code
fe30911b
TH
4089 * @readid_flags: read ID flags
4090 *
4091 * Re-read IDENTIFY page, make sure @dev is still attached to the
4092 * port and reconfigure it according to the new IDENTIFY page.
4093 *
4094 * LOCKING:
4095 * Kernel thread context (may sleep)
4096 *
4097 * RETURNS:
4098 * 0 on success, negative errno otherwise
4099 */
422c9daa
TH
4100int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
4101 unsigned int readid_flags)
fe30911b 4102{
6ddcd3b0 4103 u64 n_sectors = dev->n_sectors;
fe30911b
TH
4104 int rc;
4105
4106 if (!ata_dev_enabled(dev))
4107 return -ENODEV;
4108
422c9daa
TH
4109 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
4110 if (ata_class_enabled(new_class) &&
4111 new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
4112 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
4113 dev->class, new_class);
4114 rc = -ENODEV;
4115 goto fail;
4116 }
4117
fe30911b
TH
4118 /* re-read ID */
4119 rc = ata_dev_reread_id(dev, readid_flags);
4120 if (rc)
4121 goto fail;
623a3128
TH
4122
4123 /* configure device according to the new ID */
efdaedc4 4124 rc = ata_dev_configure(dev);
6ddcd3b0
TH
4125 if (rc)
4126 goto fail;
4127
4128 /* verify n_sectors hasn't changed */
b54eebd6
TH
4129 if (dev->class == ATA_DEV_ATA && n_sectors &&
4130 dev->n_sectors != n_sectors) {
6ddcd3b0
TH
4131 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
4132 "%llu != %llu\n",
4133 (unsigned long long)n_sectors,
4134 (unsigned long long)dev->n_sectors);
8270bec4
TH
4135
4136 /* restore original n_sectors */
4137 dev->n_sectors = n_sectors;
4138
6ddcd3b0
TH
4139 rc = -ENODEV;
4140 goto fail;
4141 }
4142
4143 return 0;
623a3128
TH
4144
4145 fail:
f15a1daf 4146 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
4147 return rc;
4148}
4149
6919a0a6
AC
4150struct ata_blacklist_entry {
4151 const char *model_num;
4152 const char *model_rev;
4153 unsigned long horkage;
4154};
4155
4156static const struct ata_blacklist_entry ata_device_blacklist [] = {
4157 /* Devices with DMA related problems under Linux */
4158 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
4159 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
4160 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
4161 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
4162 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
4163 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
4164 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
4165 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4166 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
4167 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
4168 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
4169 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4170 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4171 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4172 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4173 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
4174 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
4175 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
4176 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4177 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4178 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
4179 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
4180 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
4181 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
4182 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
4183 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
4184 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4185 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
2dcb407e 4186 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
39f19886 4187 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
3af9a77a
TH
4188 /* Odd clown on sil3726/4726 PMPs */
4189 { "Config Disk", NULL, ATA_HORKAGE_NODMA |
4190 ATA_HORKAGE_SKIP_PM },
6919a0a6 4191
18d6e9d5 4192 /* Weird ATAPI devices */
40a1d531 4193 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
18d6e9d5 4194
6919a0a6
AC
4195 /* Devices we expect to fail diagnostics */
4196
4197 /* Devices where NCQ should be avoided */
4198 /* NCQ is slow */
2dcb407e 4199 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
09125ea6
TH
4200 /* http://thread.gmane.org/gmane.linux.ide/14907 */
4201 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30 4202 /* NCQ is broken */
539cc7c7 4203 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
0e3dbc01 4204 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
0b0a43e0
DM
4205 { "HITACHI HDS7250SASUN500G*", NULL, ATA_HORKAGE_NONCQ },
4206 { "HITACHI HDS7225SBSUN250G*", NULL, ATA_HORKAGE_NONCQ },
da6f0ec2 4207 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
539cc7c7 4208
36e337d0
RH
4209 /* Blacklist entries taken from Silicon Image 3124/3132
4210 Windows driver .inf file - also several Linux problem reports */
4211 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4212 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4213 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
bd9c5a39
TH
4214 /* Drives which do spurious command completion */
4215 { "HTS541680J9SA00", "SB2IC7EP", ATA_HORKAGE_NONCQ, },
2f8fcebb 4216 { "HTS541612J9SA00", "SBDIC7JP", ATA_HORKAGE_NONCQ, },
70edb185 4217 { "HDT722516DLA380", "V43OA96A", ATA_HORKAGE_NONCQ, },
e14cbfa6 4218 { "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, },
0c173174 4219 { "Hitachi HTS542525K9SA00", "BBFOC31P", ATA_HORKAGE_NONCQ, },
2f8fcebb 4220 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
7f567620 4221 { "WDC WD3200AAJS-00RYA0", "12.01B01", ATA_HORKAGE_NONCQ, },
a520f261 4222 { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ, },
7f567620 4223 { "ST9120822AS", "3.CLF", ATA_HORKAGE_NONCQ, },
3fb6589c 4224 { "ST9160821AS", "3.CLF", ATA_HORKAGE_NONCQ, },
954bb005 4225 { "ST9160821AS", "3.ALD", ATA_HORKAGE_NONCQ, },
13587960 4226 { "ST9160821AS", "3.CCD", ATA_HORKAGE_NONCQ, },
7f567620
TH
4227 { "ST3160812AS", "3.ADJ", ATA_HORKAGE_NONCQ, },
4228 { "ST980813AS", "3.ADB", ATA_HORKAGE_NONCQ, },
5d6aca8d 4229 { "SAMSUNG HD401LJ", "ZZ100-15", ATA_HORKAGE_NONCQ, },
12850ffe 4230 { "Maxtor 7V300F0", "VA111900", ATA_HORKAGE_NONCQ, },
6919a0a6 4231
16c55b03
TH
4232 /* devices which puke on READ_NATIVE_MAX */
4233 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4234 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4235 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4236 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
6919a0a6 4237
93328e11
AC
4238 /* Devices which report 1 sector over size HPA */
4239 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4240 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
4241
6919a0a6
AC
4242 /* End Marker */
4243 { }
1da177e4 4244};
2e9edbf8 4245
741b7763 4246static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
539cc7c7
JG
4247{
4248 const char *p;
4249 int len;
4250
4251 /*
4252 * check for trailing wildcard: *\0
4253 */
4254 p = strchr(patt, wildchar);
4255 if (p && ((*(p + 1)) == 0))
4256 len = p - patt;
317b50b8 4257 else {
539cc7c7 4258 len = strlen(name);
317b50b8
AP
4259 if (!len) {
4260 if (!*patt)
4261 return 0;
4262 return -1;
4263 }
4264 }
539cc7c7
JG
4265
4266 return strncmp(patt, name, len);
4267}
4268
75683fe7 4269static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
1da177e4 4270{
8bfa79fc
TH
4271 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4272 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 4273 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 4274
8bfa79fc
TH
4275 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4276 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 4277
6919a0a6 4278 while (ad->model_num) {
539cc7c7 4279 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
6919a0a6
AC
4280 if (ad->model_rev == NULL)
4281 return ad->horkage;
539cc7c7 4282 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
6919a0a6 4283 return ad->horkage;
f4b15fef 4284 }
6919a0a6 4285 ad++;
f4b15fef 4286 }
1da177e4
LT
4287 return 0;
4288}
4289
6919a0a6
AC
4290static int ata_dma_blacklisted(const struct ata_device *dev)
4291{
4292 /* We don't support polling DMA.
4293 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4294 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4295 */
9af5c9c9 4296 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
6919a0a6
AC
4297 (dev->flags & ATA_DFLAG_CDB_INTR))
4298 return 1;
75683fe7 4299 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
6919a0a6
AC
4300}
4301
a6d5a51c
TH
4302/**
4303 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
4304 * @dev: Device to compute xfermask for
4305 *
acf356b1
TH
4306 * Compute supported xfermask of @dev and store it in
4307 * dev->*_mask. This function is responsible for applying all
4308 * known limits including host controller limits, device
4309 * blacklist, etc...
a6d5a51c
TH
4310 *
4311 * LOCKING:
4312 * None.
a6d5a51c 4313 */
3373efd8 4314static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 4315{
9af5c9c9
TH
4316 struct ata_link *link = dev->link;
4317 struct ata_port *ap = link->ap;
cca3974e 4318 struct ata_host *host = ap->host;
a6d5a51c 4319 unsigned long xfer_mask;
1da177e4 4320
37deecb5 4321 /* controller modes available */
565083e1
TH
4322 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4323 ap->mwdma_mask, ap->udma_mask);
4324
8343f889 4325 /* drive modes available */
37deecb5
TH
4326 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4327 dev->mwdma_mask, dev->udma_mask);
4328 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 4329
b352e57d
AC
4330 /*
4331 * CFA Advanced TrueIDE timings are not allowed on a shared
4332 * cable
4333 */
4334 if (ata_dev_pair(dev)) {
4335 /* No PIO5 or PIO6 */
4336 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4337 /* No MWDMA3 or MWDMA 4 */
4338 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4339 }
4340
37deecb5
TH
4341 if (ata_dma_blacklisted(dev)) {
4342 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
4343 ata_dev_printk(dev, KERN_WARNING,
4344 "device is on DMA blacklist, disabling DMA\n");
37deecb5 4345 }
a6d5a51c 4346
14d66ab7 4347 if ((host->flags & ATA_HOST_SIMPLEX) &&
2dcb407e 4348 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5
TH
4349 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4350 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4351 "other device, disabling DMA\n");
5444a6f4 4352 }
565083e1 4353
e424675f
JG
4354 if (ap->flags & ATA_FLAG_NO_IORDY)
4355 xfer_mask &= ata_pio_mask_no_iordy(dev);
4356
5444a6f4 4357 if (ap->ops->mode_filter)
a76b62ca 4358 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
5444a6f4 4359
8343f889
RH
4360 /* Apply cable rule here. Don't apply it early because when
4361 * we handle hot plug the cable type can itself change.
4362 * Check this last so that we know if the transfer rate was
4363 * solely limited by the cable.
4364 * Unknown or 80 wire cables reported host side are checked
4365 * drive side as well. Cases where we know a 40wire cable
4366 * is used safely for 80 are not checked here.
4367 */
4368 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4369 /* UDMA/44 or higher would be available */
2dcb407e
JG
4370 if ((ap->cbl == ATA_CBL_PATA40) ||
4371 (ata_drive_40wire(dev->id) &&
4372 (ap->cbl == ATA_CBL_PATA_UNK ||
4373 ap->cbl == ATA_CBL_PATA80))) {
4374 ata_dev_printk(dev, KERN_WARNING,
8343f889
RH
4375 "limited to UDMA/33 due to 40-wire cable\n");
4376 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4377 }
4378
565083e1
TH
4379 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4380 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
4381}
4382
1da177e4
LT
4383/**
4384 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
4385 * @dev: Device to which command will be sent
4386 *
780a87f7
JG
4387 * Issue SET FEATURES - XFER MODE command to device @dev
4388 * on port @ap.
4389 *
1da177e4 4390 * LOCKING:
0cba632b 4391 * PCI/etc. bus probe sem.
83206a29
TH
4392 *
4393 * RETURNS:
4394 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
4395 */
4396
3373efd8 4397static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 4398{
a0123703 4399 struct ata_taskfile tf;
83206a29 4400 unsigned int err_mask;
1da177e4
LT
4401
4402 /* set up set-features taskfile */
4403 DPRINTK("set features - xfer mode\n");
4404
464cf177
TH
4405 /* Some controllers and ATAPI devices show flaky interrupt
4406 * behavior after setting xfer mode. Use polling instead.
4407 */
3373efd8 4408 ata_tf_init(dev, &tf);
a0123703
TH
4409 tf.command = ATA_CMD_SET_FEATURES;
4410 tf.feature = SETFEATURES_XFER;
464cf177 4411 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
a0123703
TH
4412 tf.protocol = ATA_PROT_NODATA;
4413 tf.nsect = dev->xfer_mode;
1da177e4 4414
2b789108 4415 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
9f45cbd3
KCA
4416
4417 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4418 return err_mask;
4419}
9f45cbd3 4420/**
218f3d30 4421 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
9f45cbd3
KCA
4422 * @dev: Device to which command will be sent
4423 * @enable: Whether to enable or disable the feature
218f3d30 4424 * @feature: The sector count represents the feature to set
9f45cbd3
KCA
4425 *
4426 * Issue SET FEATURES - SATA FEATURES command to device @dev
218f3d30 4427 * on port @ap with sector count
9f45cbd3
KCA
4428 *
4429 * LOCKING:
4430 * PCI/etc. bus probe sem.
4431 *
4432 * RETURNS:
4433 * 0 on success, AC_ERR_* mask otherwise.
4434 */
218f3d30
JG
4435static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4436 u8 feature)
9f45cbd3
KCA
4437{
4438 struct ata_taskfile tf;
4439 unsigned int err_mask;
4440
4441 /* set up set-features taskfile */
4442 DPRINTK("set features - SATA features\n");
4443
4444 ata_tf_init(dev, &tf);
4445 tf.command = ATA_CMD_SET_FEATURES;
4446 tf.feature = enable;
4447 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4448 tf.protocol = ATA_PROT_NODATA;
218f3d30 4449 tf.nsect = feature;
9f45cbd3 4450
2b789108 4451 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1da177e4 4452
83206a29
TH
4453 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4454 return err_mask;
1da177e4
LT
4455}
4456
8bf62ece
AL
4457/**
4458 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 4459 * @dev: Device to which command will be sent
e2a7f77a
RD
4460 * @heads: Number of heads (taskfile parameter)
4461 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
4462 *
4463 * LOCKING:
6aff8f1f
TH
4464 * Kernel thread context (may sleep)
4465 *
4466 * RETURNS:
4467 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 4468 */
3373efd8
TH
4469static unsigned int ata_dev_init_params(struct ata_device *dev,
4470 u16 heads, u16 sectors)
8bf62ece 4471{
a0123703 4472 struct ata_taskfile tf;
6aff8f1f 4473 unsigned int err_mask;
8bf62ece
AL
4474
4475 /* Number of sectors per track 1-255. Number of heads 1-16 */
4476 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 4477 return AC_ERR_INVALID;
8bf62ece
AL
4478
4479 /* set up init dev params taskfile */
4480 DPRINTK("init dev params \n");
4481
3373efd8 4482 ata_tf_init(dev, &tf);
a0123703
TH
4483 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4484 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4485 tf.protocol = ATA_PROT_NODATA;
4486 tf.nsect = sectors;
4487 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 4488
2b789108 4489 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
18b2466c
AC
4490 /* A clean abort indicates an original or just out of spec drive
4491 and we should continue as we issue the setup based on the
4492 drive reported working geometry */
4493 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4494 err_mask = 0;
8bf62ece 4495
6aff8f1f
TH
4496 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4497 return err_mask;
8bf62ece
AL
4498}
4499
1da177e4 4500/**
0cba632b
JG
4501 * ata_sg_clean - Unmap DMA memory associated with command
4502 * @qc: Command containing DMA memory to be released
4503 *
4504 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
4505 *
4506 * LOCKING:
cca3974e 4507 * spin_lock_irqsave(host lock)
1da177e4 4508 */
70e6ad0c 4509void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
4510{
4511 struct ata_port *ap = qc->ap;
cedc9a47 4512 struct scatterlist *sg = qc->__sg;
1da177e4 4513 int dir = qc->dma_dir;
cedc9a47 4514 void *pad_buf = NULL;
1da177e4 4515
a4631474
TH
4516 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
4517 WARN_ON(sg == NULL);
1da177e4
LT
4518
4519 if (qc->flags & ATA_QCFLAG_SINGLE)
f131883e 4520 WARN_ON(qc->n_elem > 1);
1da177e4 4521
2c13b7ce 4522 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 4523
cedc9a47
JG
4524 /* if we padded the buffer out to 32-bit bound, and data
4525 * xfer direction is from-device, we must copy from the
4526 * pad buffer back into the supplied buffer
4527 */
4528 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4529 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4530
4531 if (qc->flags & ATA_QCFLAG_SG) {
e1410f2d 4532 if (qc->n_elem)
2f1f610b 4533 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
cedc9a47 4534 /* restore last sg */
87260216 4535 sg_last(sg, qc->orig_n_elem)->length += qc->pad_len;
cedc9a47
JG
4536 if (pad_buf) {
4537 struct scatterlist *psg = &qc->pad_sgent;
45711f1a 4538 void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
cedc9a47 4539 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
dfa15988 4540 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4541 }
4542 } else {
2e242fa9 4543 if (qc->n_elem)
2f1f610b 4544 dma_unmap_single(ap->dev,
e1410f2d
JG
4545 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
4546 dir);
cedc9a47
JG
4547 /* restore sg */
4548 sg->length += qc->pad_len;
4549 if (pad_buf)
4550 memcpy(qc->buf_virt + sg->length - qc->pad_len,
4551 pad_buf, qc->pad_len);
4552 }
1da177e4
LT
4553
4554 qc->flags &= ~ATA_QCFLAG_DMAMAP;
cedc9a47 4555 qc->__sg = NULL;
1da177e4
LT
4556}
4557
4558/**
4559 * ata_fill_sg - Fill PCI IDE PRD table
4560 * @qc: Metadata associated with taskfile to be transferred
4561 *
780a87f7
JG
4562 * Fill PCI IDE PRD (scatter-gather) table with segments
4563 * associated with the current disk command.
4564 *
1da177e4 4565 * LOCKING:
cca3974e 4566 * spin_lock_irqsave(host lock)
1da177e4
LT
4567 *
4568 */
4569static void ata_fill_sg(struct ata_queued_cmd *qc)
4570{
1da177e4 4571 struct ata_port *ap = qc->ap;
cedc9a47
JG
4572 struct scatterlist *sg;
4573 unsigned int idx;
1da177e4 4574
a4631474 4575 WARN_ON(qc->__sg == NULL);
f131883e 4576 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1da177e4
LT
4577
4578 idx = 0;
cedc9a47 4579 ata_for_each_sg(sg, qc) {
1da177e4
LT
4580 u32 addr, offset;
4581 u32 sg_len, len;
4582
4583 /* determine if physical DMA addr spans 64K boundary.
4584 * Note h/w doesn't support 64-bit, so we unconditionally
4585 * truncate dma_addr_t to u32.
4586 */
4587 addr = (u32) sg_dma_address(sg);
4588 sg_len = sg_dma_len(sg);
4589
4590 while (sg_len) {
4591 offset = addr & 0xffff;
4592 len = sg_len;
4593 if ((offset + sg_len) > 0x10000)
4594 len = 0x10000 - offset;
4595
4596 ap->prd[idx].addr = cpu_to_le32(addr);
4597 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
4598 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4599
4600 idx++;
4601 sg_len -= len;
4602 addr += len;
4603 }
4604 }
4605
4606 if (idx)
4607 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4608}
b9a4197e 4609
d26fc955
AC
4610/**
4611 * ata_fill_sg_dumb - Fill PCI IDE PRD table
4612 * @qc: Metadata associated with taskfile to be transferred
4613 *
4614 * Fill PCI IDE PRD (scatter-gather) table with segments
4615 * associated with the current disk command. Perform the fill
4616 * so that we avoid writing any length 64K records for
4617 * controllers that don't follow the spec.
4618 *
4619 * LOCKING:
4620 * spin_lock_irqsave(host lock)
4621 *
4622 */
4623static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4624{
4625 struct ata_port *ap = qc->ap;
4626 struct scatterlist *sg;
4627 unsigned int idx;
4628
4629 WARN_ON(qc->__sg == NULL);
4630 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4631
4632 idx = 0;
4633 ata_for_each_sg(sg, qc) {
4634 u32 addr, offset;
4635 u32 sg_len, len, blen;
4636
2dcb407e 4637 /* determine if physical DMA addr spans 64K boundary.
d26fc955
AC
4638 * Note h/w doesn't support 64-bit, so we unconditionally
4639 * truncate dma_addr_t to u32.
4640 */
4641 addr = (u32) sg_dma_address(sg);
4642 sg_len = sg_dma_len(sg);
4643
4644 while (sg_len) {
4645 offset = addr & 0xffff;
4646 len = sg_len;
4647 if ((offset + sg_len) > 0x10000)
4648 len = 0x10000 - offset;
4649
4650 blen = len & 0xffff;
4651 ap->prd[idx].addr = cpu_to_le32(addr);
4652 if (blen == 0) {
4653 /* Some PATA chipsets like the CS5530 can't
4654 cope with 0x0000 meaning 64K as the spec says */
4655 ap->prd[idx].flags_len = cpu_to_le32(0x8000);
4656 blen = 0x8000;
4657 ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000);
4658 }
4659 ap->prd[idx].flags_len = cpu_to_le32(blen);
4660 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4661
4662 idx++;
4663 sg_len -= len;
4664 addr += len;
4665 }
4666 }
4667
4668 if (idx)
4669 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4670}
4671
1da177e4
LT
4672/**
4673 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4674 * @qc: Metadata associated with taskfile to check
4675 *
780a87f7
JG
4676 * Allow low-level driver to filter ATA PACKET commands, returning
4677 * a status indicating whether or not it is OK to use DMA for the
4678 * supplied PACKET command.
4679 *
1da177e4 4680 * LOCKING:
cca3974e 4681 * spin_lock_irqsave(host lock)
0cba632b 4682 *
1da177e4
LT
4683 * RETURNS: 0 when ATAPI DMA can be used
4684 * nonzero otherwise
4685 */
4686int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4687{
4688 struct ata_port *ap = qc->ap;
b9a4197e
TH
4689
4690 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4691 * few ATAPI devices choke on such DMA requests.
4692 */
4693 if (unlikely(qc->nbytes & 15))
4694 return 1;
6f23a31d 4695
1da177e4 4696 if (ap->ops->check_atapi_dma)
b9a4197e 4697 return ap->ops->check_atapi_dma(qc);
1da177e4 4698
b9a4197e 4699 return 0;
1da177e4 4700}
b9a4197e 4701
31cc23b3
TH
4702/**
4703 * ata_std_qc_defer - Check whether a qc needs to be deferred
4704 * @qc: ATA command in question
4705 *
4706 * Non-NCQ commands cannot run with any other command, NCQ or
4707 * not. As upper layer only knows the queue depth, we are
4708 * responsible for maintaining exclusion. This function checks
4709 * whether a new command @qc can be issued.
4710 *
4711 * LOCKING:
4712 * spin_lock_irqsave(host lock)
4713 *
4714 * RETURNS:
4715 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4716 */
4717int ata_std_qc_defer(struct ata_queued_cmd *qc)
4718{
4719 struct ata_link *link = qc->dev->link;
4720
4721 if (qc->tf.protocol == ATA_PROT_NCQ) {
4722 if (!ata_tag_valid(link->active_tag))
4723 return 0;
4724 } else {
4725 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4726 return 0;
4727 }
4728
4729 return ATA_DEFER_LINK;
4730}
4731
1da177e4
LT
4732/**
4733 * ata_qc_prep - Prepare taskfile for submission
4734 * @qc: Metadata associated with taskfile to be prepared
4735 *
780a87f7
JG
4736 * Prepare ATA taskfile for submission.
4737 *
1da177e4 4738 * LOCKING:
cca3974e 4739 * spin_lock_irqsave(host lock)
1da177e4
LT
4740 */
4741void ata_qc_prep(struct ata_queued_cmd *qc)
4742{
4743 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4744 return;
4745
4746 ata_fill_sg(qc);
4747}
4748
d26fc955
AC
4749/**
4750 * ata_dumb_qc_prep - Prepare taskfile for submission
4751 * @qc: Metadata associated with taskfile to be prepared
4752 *
4753 * Prepare ATA taskfile for submission.
4754 *
4755 * LOCKING:
4756 * spin_lock_irqsave(host lock)
4757 */
4758void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4759{
4760 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4761 return;
4762
4763 ata_fill_sg_dumb(qc);
4764}
4765
e46834cd
BK
4766void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4767
0cba632b
JG
4768/**
4769 * ata_sg_init_one - Associate command with memory buffer
4770 * @qc: Command to be associated
4771 * @buf: Memory buffer
4772 * @buflen: Length of memory buffer, in bytes.
4773 *
4774 * Initialize the data-related elements of queued_cmd @qc
4775 * to point to a single memory buffer, @buf of byte length @buflen.
4776 *
4777 * LOCKING:
cca3974e 4778 * spin_lock_irqsave(host lock)
0cba632b
JG
4779 */
4780
1da177e4
LT
4781void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4782{
1da177e4
LT
4783 qc->flags |= ATA_QCFLAG_SINGLE;
4784
cedc9a47 4785 qc->__sg = &qc->sgent;
1da177e4 4786 qc->n_elem = 1;
cedc9a47 4787 qc->orig_n_elem = 1;
1da177e4 4788 qc->buf_virt = buf;
233277ca 4789 qc->nbytes = buflen;
87260216 4790 qc->cursg = qc->__sg;
1da177e4 4791
61c0596c 4792 sg_init_one(&qc->sgent, buf, buflen);
1da177e4
LT
4793}
4794
0cba632b
JG
4795/**
4796 * ata_sg_init - Associate command with scatter-gather table.
4797 * @qc: Command to be associated
4798 * @sg: Scatter-gather table.
4799 * @n_elem: Number of elements in s/g table.
4800 *
4801 * Initialize the data-related elements of queued_cmd @qc
4802 * to point to a scatter-gather table @sg, containing @n_elem
4803 * elements.
4804 *
4805 * LOCKING:
cca3974e 4806 * spin_lock_irqsave(host lock)
0cba632b
JG
4807 */
4808
1da177e4
LT
4809void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4810 unsigned int n_elem)
4811{
4812 qc->flags |= ATA_QCFLAG_SG;
cedc9a47 4813 qc->__sg = sg;
1da177e4 4814 qc->n_elem = n_elem;
cedc9a47 4815 qc->orig_n_elem = n_elem;
87260216 4816 qc->cursg = qc->__sg;
1da177e4
LT
4817}
4818
4819/**
0cba632b
JG
4820 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4821 * @qc: Command with memory buffer to be mapped.
4822 *
4823 * DMA-map the memory buffer associated with queued_cmd @qc.
1da177e4
LT
4824 *
4825 * LOCKING:
cca3974e 4826 * spin_lock_irqsave(host lock)
1da177e4
LT
4827 *
4828 * RETURNS:
0cba632b 4829 * Zero on success, negative on error.
1da177e4
LT
4830 */
4831
4832static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4833{
4834 struct ata_port *ap = qc->ap;
4835 int dir = qc->dma_dir;
cedc9a47 4836 struct scatterlist *sg = qc->__sg;
1da177e4 4837 dma_addr_t dma_address;
2e242fa9 4838 int trim_sg = 0;
1da177e4 4839
cedc9a47
JG
4840 /* we must lengthen transfers to end on a 32-bit boundary */
4841 qc->pad_len = sg->length & 3;
4842 if (qc->pad_len) {
4843 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4844 struct scatterlist *psg = &qc->pad_sgent;
4845
a4631474 4846 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4847
4848 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4849
4850 if (qc->tf.flags & ATA_TFLAG_WRITE)
4851 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
4852 qc->pad_len);
4853
4854 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4855 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4856 /* trim sg */
4857 sg->length -= qc->pad_len;
2e242fa9
TH
4858 if (sg->length == 0)
4859 trim_sg = 1;
cedc9a47
JG
4860
4861 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4862 sg->length, qc->pad_len);
4863 }
4864
2e242fa9
TH
4865 if (trim_sg) {
4866 qc->n_elem--;
e1410f2d
JG
4867 goto skip_map;
4868 }
4869
2f1f610b 4870 dma_address = dma_map_single(ap->dev, qc->buf_virt,
32529e01 4871 sg->length, dir);
537a95d9
TH
4872 if (dma_mapping_error(dma_address)) {
4873 /* restore sg */
4874 sg->length += qc->pad_len;
1da177e4 4875 return -1;
537a95d9 4876 }
1da177e4
LT
4877
4878 sg_dma_address(sg) = dma_address;
32529e01 4879 sg_dma_len(sg) = sg->length;
1da177e4 4880
2e242fa9 4881skip_map:
1da177e4
LT
4882 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
4883 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4884
4885 return 0;
4886}
4887
4888/**
0cba632b
JG
4889 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4890 * @qc: Command with scatter-gather table to be mapped.
4891 *
4892 * DMA-map the scatter-gather table associated with queued_cmd @qc.
1da177e4
LT
4893 *
4894 * LOCKING:
cca3974e 4895 * spin_lock_irqsave(host lock)
1da177e4
LT
4896 *
4897 * RETURNS:
0cba632b 4898 * Zero on success, negative on error.
1da177e4
LT
4899 *
4900 */
4901
4902static int ata_sg_setup(struct ata_queued_cmd *qc)
4903{
4904 struct ata_port *ap = qc->ap;
cedc9a47 4905 struct scatterlist *sg = qc->__sg;
87260216 4906 struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem);
e1410f2d 4907 int n_elem, pre_n_elem, dir, trim_sg = 0;
1da177e4 4908
44877b4e 4909 VPRINTK("ENTER, ata%u\n", ap->print_id);
a4631474 4910 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
1da177e4 4911
cedc9a47
JG
4912 /* we must lengthen transfers to end on a 32-bit boundary */
4913 qc->pad_len = lsg->length & 3;
4914 if (qc->pad_len) {
4915 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4916 struct scatterlist *psg = &qc->pad_sgent;
4917 unsigned int offset;
4918
a4631474 4919 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4920
4921 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4922
4923 /*
4924 * psg->page/offset are used to copy to-be-written
4925 * data in this function or read data in ata_sg_clean.
4926 */
4927 offset = lsg->offset + lsg->length - qc->pad_len;
acd054a5 4928 sg_init_table(psg, 1);
642f1490
JA
4929 sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT),
4930 qc->pad_len, offset_in_page(offset));
cedc9a47
JG
4931
4932 if (qc->tf.flags & ATA_TFLAG_WRITE) {
45711f1a 4933 void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
cedc9a47 4934 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
dfa15988 4935 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4936 }
4937
4938 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4939 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4940 /* trim last sg */
4941 lsg->length -= qc->pad_len;
e1410f2d
JG
4942 if (lsg->length == 0)
4943 trim_sg = 1;
cedc9a47
JG
4944
4945 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4946 qc->n_elem - 1, lsg->length, qc->pad_len);
4947 }
4948
e1410f2d
JG
4949 pre_n_elem = qc->n_elem;
4950 if (trim_sg && pre_n_elem)
4951 pre_n_elem--;
4952
4953 if (!pre_n_elem) {
4954 n_elem = 0;
4955 goto skip_map;
4956 }
4957
1da177e4 4958 dir = qc->dma_dir;
2f1f610b 4959 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
537a95d9
TH
4960 if (n_elem < 1) {
4961 /* restore last sg */
4962 lsg->length += qc->pad_len;
1da177e4 4963 return -1;
537a95d9 4964 }
1da177e4
LT
4965
4966 DPRINTK("%d sg elements mapped\n", n_elem);
4967
e1410f2d 4968skip_map:
1da177e4
LT
4969 qc->n_elem = n_elem;
4970
4971 return 0;
4972}
4973
0baab86b 4974/**
c893a3ae 4975 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
4976 * @buf: Buffer to swap
4977 * @buf_words: Number of 16-bit words in buffer.
4978 *
4979 * Swap halves of 16-bit words if needed to convert from
4980 * little-endian byte order to native cpu byte order, or
4981 * vice-versa.
4982 *
4983 * LOCKING:
6f0ef4fa 4984 * Inherited from caller.
0baab86b 4985 */
1da177e4
LT
4986void swap_buf_le16(u16 *buf, unsigned int buf_words)
4987{
4988#ifdef __BIG_ENDIAN
4989 unsigned int i;
4990
4991 for (i = 0; i < buf_words; i++)
4992 buf[i] = le16_to_cpu(buf[i]);
4993#endif /* __BIG_ENDIAN */
4994}
4995
6ae4cfb5 4996/**
0d5ff566 4997 * ata_data_xfer - Transfer data by PIO
a6b2c5d4 4998 * @adev: device to target
6ae4cfb5
AL
4999 * @buf: data buffer
5000 * @buflen: buffer length
344babaa 5001 * @write_data: read/write
6ae4cfb5
AL
5002 *
5003 * Transfer data from/to the device data register by PIO.
5004 *
5005 * LOCKING:
5006 * Inherited from caller.
6ae4cfb5 5007 */
0d5ff566
TH
5008void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
5009 unsigned int buflen, int write_data)
1da177e4 5010{
9af5c9c9 5011 struct ata_port *ap = adev->link->ap;
6ae4cfb5 5012 unsigned int words = buflen >> 1;
1da177e4 5013
6ae4cfb5 5014 /* Transfer multiple of 2 bytes */
1da177e4 5015 if (write_data)
0d5ff566 5016 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
1da177e4 5017 else
0d5ff566 5018 ioread16_rep(ap->ioaddr.data_addr, buf, words);
6ae4cfb5
AL
5019
5020 /* Transfer trailing 1 byte, if any. */
5021 if (unlikely(buflen & 0x01)) {
5022 u16 align_buf[1] = { 0 };
5023 unsigned char *trailing_buf = buf + buflen - 1;
5024
5025 if (write_data) {
5026 memcpy(align_buf, trailing_buf, 1);
0d5ff566 5027 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
6ae4cfb5 5028 } else {
0d5ff566 5029 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
6ae4cfb5
AL
5030 memcpy(trailing_buf, align_buf, 1);
5031 }
5032 }
1da177e4
LT
5033}
5034
75e99585 5035/**
0d5ff566 5036 * ata_data_xfer_noirq - Transfer data by PIO
75e99585
AC
5037 * @adev: device to target
5038 * @buf: data buffer
5039 * @buflen: buffer length
5040 * @write_data: read/write
5041 *
88574551 5042 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
5043 * transfer with interrupts disabled.
5044 *
5045 * LOCKING:
5046 * Inherited from caller.
5047 */
0d5ff566
TH
5048void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
5049 unsigned int buflen, int write_data)
75e99585
AC
5050{
5051 unsigned long flags;
5052 local_irq_save(flags);
0d5ff566 5053 ata_data_xfer(adev, buf, buflen, write_data);
75e99585
AC
5054 local_irq_restore(flags);
5055}
5056
5057
6ae4cfb5 5058/**
5a5dbd18 5059 * ata_pio_sector - Transfer a sector of data.
6ae4cfb5
AL
5060 * @qc: Command on going
5061 *
5a5dbd18 5062 * Transfer qc->sect_size bytes of data from/to the ATA device.
6ae4cfb5
AL
5063 *
5064 * LOCKING:
5065 * Inherited from caller.
5066 */
5067
1da177e4
LT
5068static void ata_pio_sector(struct ata_queued_cmd *qc)
5069{
5070 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
1da177e4
LT
5071 struct ata_port *ap = qc->ap;
5072 struct page *page;
5073 unsigned int offset;
5074 unsigned char *buf;
5075
5a5dbd18 5076 if (qc->curbytes == qc->nbytes - qc->sect_size)
14be71f4 5077 ap->hsm_task_state = HSM_ST_LAST;
1da177e4 5078
45711f1a 5079 page = sg_page(qc->cursg);
87260216 5080 offset = qc->cursg->offset + qc->cursg_ofs;
1da177e4
LT
5081
5082 /* get the current page and offset */
5083 page = nth_page(page, (offset >> PAGE_SHIFT));
5084 offset %= PAGE_SIZE;
5085
1da177e4
LT
5086 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5087
91b8b313
AL
5088 if (PageHighMem(page)) {
5089 unsigned long flags;
5090
a6b2c5d4 5091 /* FIXME: use a bounce buffer */
91b8b313
AL
5092 local_irq_save(flags);
5093 buf = kmap_atomic(page, KM_IRQ0);
083958d3 5094
91b8b313 5095 /* do the actual data transfer */
5a5dbd18 5096 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
1da177e4 5097
91b8b313
AL
5098 kunmap_atomic(buf, KM_IRQ0);
5099 local_irq_restore(flags);
5100 } else {
5101 buf = page_address(page);
5a5dbd18 5102 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
91b8b313 5103 }
1da177e4 5104
5a5dbd18
ML
5105 qc->curbytes += qc->sect_size;
5106 qc->cursg_ofs += qc->sect_size;
1da177e4 5107
87260216
JA
5108 if (qc->cursg_ofs == qc->cursg->length) {
5109 qc->cursg = sg_next(qc->cursg);
1da177e4
LT
5110 qc->cursg_ofs = 0;
5111 }
1da177e4 5112}
1da177e4 5113
07f6f7d0 5114/**
5a5dbd18 5115 * ata_pio_sectors - Transfer one or many sectors.
07f6f7d0
AL
5116 * @qc: Command on going
5117 *
5a5dbd18 5118 * Transfer one or many sectors of data from/to the
07f6f7d0
AL
5119 * ATA device for the DRQ request.
5120 *
5121 * LOCKING:
5122 * Inherited from caller.
5123 */
1da177e4 5124
07f6f7d0
AL
5125static void ata_pio_sectors(struct ata_queued_cmd *qc)
5126{
5127 if (is_multi_taskfile(&qc->tf)) {
5128 /* READ/WRITE MULTIPLE */
5129 unsigned int nsect;
5130
587005de 5131 WARN_ON(qc->dev->multi_count == 0);
1da177e4 5132
5a5dbd18 5133 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
726f0785 5134 qc->dev->multi_count);
07f6f7d0
AL
5135 while (nsect--)
5136 ata_pio_sector(qc);
5137 } else
5138 ata_pio_sector(qc);
4cc980b3
AL
5139
5140 ata_altstatus(qc->ap); /* flush */
07f6f7d0
AL
5141}
5142
c71c1857
AL
5143/**
5144 * atapi_send_cdb - Write CDB bytes to hardware
5145 * @ap: Port to which ATAPI device is attached.
5146 * @qc: Taskfile currently active
5147 *
5148 * When device has indicated its readiness to accept
5149 * a CDB, this function is called. Send the CDB.
5150 *
5151 * LOCKING:
5152 * caller.
5153 */
5154
5155static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
5156{
5157 /* send SCSI cdb */
5158 DPRINTK("send cdb\n");
db024d53 5159 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 5160
a6b2c5d4 5161 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
5162 ata_altstatus(ap); /* flush */
5163
5164 switch (qc->tf.protocol) {
5165 case ATA_PROT_ATAPI:
5166 ap->hsm_task_state = HSM_ST;
5167 break;
5168 case ATA_PROT_ATAPI_NODATA:
5169 ap->hsm_task_state = HSM_ST_LAST;
5170 break;
5171 case ATA_PROT_ATAPI_DMA:
5172 ap->hsm_task_state = HSM_ST_LAST;
5173 /* initiate bmdma */
5174 ap->ops->bmdma_start(qc);
5175 break;
5176 }
1da177e4
LT
5177}
5178
6ae4cfb5
AL
5179/**
5180 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
5181 * @qc: Command on going
5182 * @bytes: number of bytes
5183 *
5184 * Transfer Transfer data from/to the ATAPI device.
5185 *
5186 * LOCKING:
5187 * Inherited from caller.
5188 *
5189 */
5190
1da177e4
LT
5191static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
5192{
5193 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 5194 struct scatterlist *sg = qc->__sg;
0874ee76 5195 struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem);
1da177e4
LT
5196 struct ata_port *ap = qc->ap;
5197 struct page *page;
5198 unsigned char *buf;
5199 unsigned int offset, count;
0874ee76 5200 int no_more_sg = 0;
1da177e4 5201
563a6e1f 5202 if (qc->curbytes + bytes >= qc->nbytes)
14be71f4 5203 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
5204
5205next_sg:
0874ee76 5206 if (unlikely(no_more_sg)) {
7fb6ec28 5207 /*
563a6e1f
AL
5208 * The end of qc->sg is reached and the device expects
5209 * more data to transfer. In order not to overrun qc->sg
5210 * and fulfill length specified in the byte count register,
5211 * - for read case, discard trailing data from the device
5212 * - for write case, padding zero data to the device
5213 */
5214 u16 pad_buf[1] = { 0 };
5215 unsigned int words = bytes >> 1;
5216 unsigned int i;
5217
5218 if (words) /* warning if bytes > 1 */
f15a1daf
TH
5219 ata_dev_printk(qc->dev, KERN_WARNING,
5220 "%u bytes trailing data\n", bytes);
563a6e1f
AL
5221
5222 for (i = 0; i < words; i++)
2dcb407e 5223 ap->ops->data_xfer(qc->dev, (unsigned char *)pad_buf, 2, do_write);
563a6e1f 5224
14be71f4 5225 ap->hsm_task_state = HSM_ST_LAST;
563a6e1f
AL
5226 return;
5227 }
5228
87260216 5229 sg = qc->cursg;
1da177e4 5230
45711f1a 5231 page = sg_page(sg);
1da177e4
LT
5232 offset = sg->offset + qc->cursg_ofs;
5233
5234 /* get the current page and offset */
5235 page = nth_page(page, (offset >> PAGE_SHIFT));
5236 offset %= PAGE_SIZE;
5237
6952df03 5238 /* don't overrun current sg */
32529e01 5239 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
5240
5241 /* don't cross page boundaries */
5242 count = min(count, (unsigned int)PAGE_SIZE - offset);
5243
7282aa4b
AL
5244 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5245
91b8b313
AL
5246 if (PageHighMem(page)) {
5247 unsigned long flags;
5248
a6b2c5d4 5249 /* FIXME: use bounce buffer */
91b8b313
AL
5250 local_irq_save(flags);
5251 buf = kmap_atomic(page, KM_IRQ0);
083958d3 5252
91b8b313 5253 /* do the actual data transfer */
a6b2c5d4 5254 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
7282aa4b 5255
91b8b313
AL
5256 kunmap_atomic(buf, KM_IRQ0);
5257 local_irq_restore(flags);
5258 } else {
5259 buf = page_address(page);
a6b2c5d4 5260 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
91b8b313 5261 }
1da177e4
LT
5262
5263 bytes -= count;
5264 qc->curbytes += count;
5265 qc->cursg_ofs += count;
5266
32529e01 5267 if (qc->cursg_ofs == sg->length) {
0874ee76
FT
5268 if (qc->cursg == lsg)
5269 no_more_sg = 1;
5270
87260216 5271 qc->cursg = sg_next(qc->cursg);
1da177e4
LT
5272 qc->cursg_ofs = 0;
5273 }
5274
563a6e1f 5275 if (bytes)
1da177e4 5276 goto next_sg;
1da177e4
LT
5277}
5278
6ae4cfb5
AL
5279/**
5280 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
5281 * @qc: Command on going
5282 *
5283 * Transfer Transfer data from/to the ATAPI device.
5284 *
5285 * LOCKING:
5286 * Inherited from caller.
6ae4cfb5
AL
5287 */
5288
1da177e4
LT
5289static void atapi_pio_bytes(struct ata_queued_cmd *qc)
5290{
5291 struct ata_port *ap = qc->ap;
5292 struct ata_device *dev = qc->dev;
5293 unsigned int ireason, bc_lo, bc_hi, bytes;
5294 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
5295
eec4c3f3
AL
5296 /* Abuse qc->result_tf for temp storage of intermediate TF
5297 * here to save some kernel stack usage.
5298 * For normal completion, qc->result_tf is not relevant. For
5299 * error, qc->result_tf is later overwritten by ata_qc_complete().
5300 * So, the correctness of qc->result_tf is not affected.
5301 */
5302 ap->ops->tf_read(ap, &qc->result_tf);
5303 ireason = qc->result_tf.nsect;
5304 bc_lo = qc->result_tf.lbam;
5305 bc_hi = qc->result_tf.lbah;
1da177e4
LT
5306 bytes = (bc_hi << 8) | bc_lo;
5307
5308 /* shall be cleared to zero, indicating xfer of data */
5309 if (ireason & (1 << 0))
5310 goto err_out;
5311
5312 /* make sure transfer direction matches expected */
5313 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
5314 if (do_write != i_write)
5315 goto err_out;
5316
44877b4e 5317 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
312f7da2 5318
1da177e4 5319 __atapi_pio_bytes(qc, bytes);
4cc980b3 5320 ata_altstatus(ap); /* flush */
1da177e4
LT
5321
5322 return;
5323
5324err_out:
f15a1daf 5325 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
11a56d24 5326 qc->err_mask |= AC_ERR_HSM;
14be71f4 5327 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
5328}
5329
5330/**
c234fb00
AL
5331 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
5332 * @ap: the target ata_port
5333 * @qc: qc on going
1da177e4 5334 *
c234fb00
AL
5335 * RETURNS:
5336 * 1 if ok in workqueue, 0 otherwise.
1da177e4 5337 */
c234fb00
AL
5338
5339static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 5340{
c234fb00
AL
5341 if (qc->tf.flags & ATA_TFLAG_POLLING)
5342 return 1;
1da177e4 5343
c234fb00
AL
5344 if (ap->hsm_task_state == HSM_ST_FIRST) {
5345 if (qc->tf.protocol == ATA_PROT_PIO &&
5346 (qc->tf.flags & ATA_TFLAG_WRITE))
5347 return 1;
1da177e4 5348
c234fb00
AL
5349 if (is_atapi_taskfile(&qc->tf) &&
5350 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5351 return 1;
fe79e683
AL
5352 }
5353
c234fb00
AL
5354 return 0;
5355}
1da177e4 5356
c17ea20d
TH
5357/**
5358 * ata_hsm_qc_complete - finish a qc running on standard HSM
5359 * @qc: Command to complete
5360 * @in_wq: 1 if called from workqueue, 0 otherwise
5361 *
5362 * Finish @qc which is running on standard HSM.
5363 *
5364 * LOCKING:
cca3974e 5365 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
5366 * Otherwise, none on entry and grabs host lock.
5367 */
5368static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
5369{
5370 struct ata_port *ap = qc->ap;
5371 unsigned long flags;
5372
5373 if (ap->ops->error_handler) {
5374 if (in_wq) {
ba6a1308 5375 spin_lock_irqsave(ap->lock, flags);
c17ea20d 5376
cca3974e
JG
5377 /* EH might have kicked in while host lock is
5378 * released.
c17ea20d
TH
5379 */
5380 qc = ata_qc_from_tag(ap, qc->tag);
5381 if (qc) {
5382 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
83625006 5383 ap->ops->irq_on(ap);
c17ea20d
TH
5384 ata_qc_complete(qc);
5385 } else
5386 ata_port_freeze(ap);
5387 }
5388
ba6a1308 5389 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
5390 } else {
5391 if (likely(!(qc->err_mask & AC_ERR_HSM)))
5392 ata_qc_complete(qc);
5393 else
5394 ata_port_freeze(ap);
5395 }
5396 } else {
5397 if (in_wq) {
ba6a1308 5398 spin_lock_irqsave(ap->lock, flags);
83625006 5399 ap->ops->irq_on(ap);
c17ea20d 5400 ata_qc_complete(qc);
ba6a1308 5401 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
5402 } else
5403 ata_qc_complete(qc);
5404 }
5405}
5406
bb5cb290
AL
5407/**
5408 * ata_hsm_move - move the HSM to the next state.
5409 * @ap: the target ata_port
5410 * @qc: qc on going
5411 * @status: current device status
5412 * @in_wq: 1 if called from workqueue, 0 otherwise
5413 *
5414 * RETURNS:
5415 * 1 when poll next status needed, 0 otherwise.
5416 */
9a1004d0
TH
5417int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
5418 u8 status, int in_wq)
e2cec771 5419{
bb5cb290
AL
5420 unsigned long flags = 0;
5421 int poll_next;
5422
6912ccd5
AL
5423 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
5424
bb5cb290
AL
5425 /* Make sure ata_qc_issue_prot() does not throw things
5426 * like DMA polling into the workqueue. Notice that
5427 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
5428 */
c234fb00 5429 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 5430
e2cec771 5431fsm_start:
999bb6f4 5432 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
44877b4e 5433 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
999bb6f4 5434
e2cec771
AL
5435 switch (ap->hsm_task_state) {
5436 case HSM_ST_FIRST:
bb5cb290
AL
5437 /* Send first data block or PACKET CDB */
5438
5439 /* If polling, we will stay in the work queue after
5440 * sending the data. Otherwise, interrupt handler
5441 * takes over after sending the data.
5442 */
5443 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
5444
e2cec771 5445 /* check device status */
3655d1d3
AL
5446 if (unlikely((status & ATA_DRQ) == 0)) {
5447 /* handle BSY=0, DRQ=0 as error */
5448 if (likely(status & (ATA_ERR | ATA_DF)))
5449 /* device stops HSM for abort/error */
5450 qc->err_mask |= AC_ERR_DEV;
5451 else
5452 /* HSM violation. Let EH handle this */
5453 qc->err_mask |= AC_ERR_HSM;
5454
14be71f4 5455 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 5456 goto fsm_start;
1da177e4
LT
5457 }
5458
71601958
AL
5459 /* Device should not ask for data transfer (DRQ=1)
5460 * when it finds something wrong.
eee6c32f
AL
5461 * We ignore DRQ here and stop the HSM by
5462 * changing hsm_task_state to HSM_ST_ERR and
5463 * let the EH abort the command or reset the device.
71601958
AL
5464 */
5465 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
5466 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
5467 "error, dev_stat 0x%X\n", status);
3655d1d3 5468 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
5469 ap->hsm_task_state = HSM_ST_ERR;
5470 goto fsm_start;
71601958 5471 }
1da177e4 5472
bb5cb290
AL
5473 /* Send the CDB (atapi) or the first data block (ata pio out).
5474 * During the state transition, interrupt handler shouldn't
5475 * be invoked before the data transfer is complete and
5476 * hsm_task_state is changed. Hence, the following locking.
5477 */
5478 if (in_wq)
ba6a1308 5479 spin_lock_irqsave(ap->lock, flags);
1da177e4 5480
bb5cb290
AL
5481 if (qc->tf.protocol == ATA_PROT_PIO) {
5482 /* PIO data out protocol.
5483 * send first data block.
5484 */
0565c26d 5485
bb5cb290
AL
5486 /* ata_pio_sectors() might change the state
5487 * to HSM_ST_LAST. so, the state is changed here
5488 * before ata_pio_sectors().
5489 */
5490 ap->hsm_task_state = HSM_ST;
5491 ata_pio_sectors(qc);
bb5cb290
AL
5492 } else
5493 /* send CDB */
5494 atapi_send_cdb(ap, qc);
5495
5496 if (in_wq)
ba6a1308 5497 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
5498
5499 /* if polling, ata_pio_task() handles the rest.
5500 * otherwise, interrupt handler takes over from here.
5501 */
e2cec771 5502 break;
1c848984 5503
e2cec771
AL
5504 case HSM_ST:
5505 /* complete command or read/write the data register */
5506 if (qc->tf.protocol == ATA_PROT_ATAPI) {
5507 /* ATAPI PIO protocol */
5508 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
5509 /* No more data to transfer or device error.
5510 * Device error will be tagged in HSM_ST_LAST.
5511 */
e2cec771
AL
5512 ap->hsm_task_state = HSM_ST_LAST;
5513 goto fsm_start;
5514 }
1da177e4 5515
71601958
AL
5516 /* Device should not ask for data transfer (DRQ=1)
5517 * when it finds something wrong.
eee6c32f
AL
5518 * We ignore DRQ here and stop the HSM by
5519 * changing hsm_task_state to HSM_ST_ERR and
5520 * let the EH abort the command or reset the device.
71601958
AL
5521 */
5522 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
5523 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
5524 "device error, dev_stat 0x%X\n",
5525 status);
3655d1d3 5526 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
5527 ap->hsm_task_state = HSM_ST_ERR;
5528 goto fsm_start;
71601958 5529 }
1da177e4 5530
e2cec771 5531 atapi_pio_bytes(qc);
7fb6ec28 5532
e2cec771
AL
5533 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
5534 /* bad ireason reported by device */
5535 goto fsm_start;
1da177e4 5536
e2cec771
AL
5537 } else {
5538 /* ATA PIO protocol */
5539 if (unlikely((status & ATA_DRQ) == 0)) {
5540 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
5541 if (likely(status & (ATA_ERR | ATA_DF)))
5542 /* device stops HSM for abort/error */
5543 qc->err_mask |= AC_ERR_DEV;
5544 else
55a8e2c8
TH
5545 /* HSM violation. Let EH handle this.
5546 * Phantom devices also trigger this
5547 * condition. Mark hint.
5548 */
5549 qc->err_mask |= AC_ERR_HSM |
5550 AC_ERR_NODEV_HINT;
3655d1d3 5551
e2cec771
AL
5552 ap->hsm_task_state = HSM_ST_ERR;
5553 goto fsm_start;
5554 }
1da177e4 5555
eee6c32f
AL
5556 /* For PIO reads, some devices may ask for
5557 * data transfer (DRQ=1) alone with ERR=1.
5558 * We respect DRQ here and transfer one
5559 * block of junk data before changing the
5560 * hsm_task_state to HSM_ST_ERR.
5561 *
5562 * For PIO writes, ERR=1 DRQ=1 doesn't make
5563 * sense since the data block has been
5564 * transferred to the device.
71601958
AL
5565 */
5566 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
5567 /* data might be corrputed */
5568 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
5569
5570 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5571 ata_pio_sectors(qc);
eee6c32f
AL
5572 status = ata_wait_idle(ap);
5573 }
5574
3655d1d3
AL
5575 if (status & (ATA_BUSY | ATA_DRQ))
5576 qc->err_mask |= AC_ERR_HSM;
5577
eee6c32f
AL
5578 /* ata_pio_sectors() might change the
5579 * state to HSM_ST_LAST. so, the state
5580 * is changed after ata_pio_sectors().
5581 */
5582 ap->hsm_task_state = HSM_ST_ERR;
5583 goto fsm_start;
71601958
AL
5584 }
5585
e2cec771
AL
5586 ata_pio_sectors(qc);
5587
5588 if (ap->hsm_task_state == HSM_ST_LAST &&
5589 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5590 /* all data read */
52a32205 5591 status = ata_wait_idle(ap);
e2cec771
AL
5592 goto fsm_start;
5593 }
5594 }
5595
bb5cb290 5596 poll_next = 1;
1da177e4
LT
5597 break;
5598
14be71f4 5599 case HSM_ST_LAST:
6912ccd5
AL
5600 if (unlikely(!ata_ok(status))) {
5601 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
5602 ap->hsm_task_state = HSM_ST_ERR;
5603 goto fsm_start;
5604 }
5605
5606 /* no more data to transfer */
4332a771 5607 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
44877b4e 5608 ap->print_id, qc->dev->devno, status);
e2cec771 5609
6912ccd5
AL
5610 WARN_ON(qc->err_mask);
5611
e2cec771 5612 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 5613
e2cec771 5614 /* complete taskfile transaction */
c17ea20d 5615 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5616
5617 poll_next = 0;
1da177e4
LT
5618 break;
5619
14be71f4 5620 case HSM_ST_ERR:
e2cec771
AL
5621 /* make sure qc->err_mask is available to
5622 * know what's wrong and recover
5623 */
5624 WARN_ON(qc->err_mask == 0);
5625
5626 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 5627
999bb6f4 5628 /* complete taskfile transaction */
c17ea20d 5629 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5630
5631 poll_next = 0;
e2cec771
AL
5632 break;
5633 default:
bb5cb290 5634 poll_next = 0;
6912ccd5 5635 BUG();
1da177e4
LT
5636 }
5637
bb5cb290 5638 return poll_next;
1da177e4
LT
5639}
5640
65f27f38 5641static void ata_pio_task(struct work_struct *work)
8061f5f0 5642{
65f27f38
DH
5643 struct ata_port *ap =
5644 container_of(work, struct ata_port, port_task.work);
5645 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 5646 u8 status;
a1af3734 5647 int poll_next;
8061f5f0 5648
7fb6ec28 5649fsm_start:
a1af3734 5650 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 5651
a1af3734
AL
5652 /*
5653 * This is purely heuristic. This is a fast path.
5654 * Sometimes when we enter, BSY will be cleared in
5655 * a chk-status or two. If not, the drive is probably seeking
5656 * or something. Snooze for a couple msecs, then
5657 * chk-status again. If still busy, queue delayed work.
5658 */
5659 status = ata_busy_wait(ap, ATA_BUSY, 5);
5660 if (status & ATA_BUSY) {
5661 msleep(2);
5662 status = ata_busy_wait(ap, ATA_BUSY, 10);
5663 if (status & ATA_BUSY) {
31ce6dae 5664 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
a1af3734
AL
5665 return;
5666 }
8061f5f0
TH
5667 }
5668
a1af3734
AL
5669 /* move the HSM */
5670 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 5671
a1af3734
AL
5672 /* another command or interrupt handler
5673 * may be running at this point.
5674 */
5675 if (poll_next)
7fb6ec28 5676 goto fsm_start;
8061f5f0
TH
5677}
5678
1da177e4
LT
5679/**
5680 * ata_qc_new - Request an available ATA command, for queueing
5681 * @ap: Port associated with device @dev
5682 * @dev: Device from whom we request an available command structure
5683 *
5684 * LOCKING:
0cba632b 5685 * None.
1da177e4
LT
5686 */
5687
5688static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5689{
5690 struct ata_queued_cmd *qc = NULL;
5691 unsigned int i;
5692
e3180499 5693 /* no command while frozen */
b51e9e5d 5694 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
5695 return NULL;
5696
2ab7db1f
TH
5697 /* the last tag is reserved for internal command. */
5698 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 5699 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 5700 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
5701 break;
5702 }
5703
5704 if (qc)
5705 qc->tag = i;
5706
5707 return qc;
5708}
5709
5710/**
5711 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
5712 * @dev: Device from whom we request an available command structure
5713 *
5714 * LOCKING:
0cba632b 5715 * None.
1da177e4
LT
5716 */
5717
3373efd8 5718struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 5719{
9af5c9c9 5720 struct ata_port *ap = dev->link->ap;
1da177e4
LT
5721 struct ata_queued_cmd *qc;
5722
5723 qc = ata_qc_new(ap);
5724 if (qc) {
1da177e4
LT
5725 qc->scsicmd = NULL;
5726 qc->ap = ap;
5727 qc->dev = dev;
1da177e4 5728
2c13b7ce 5729 ata_qc_reinit(qc);
1da177e4
LT
5730 }
5731
5732 return qc;
5733}
5734
1da177e4
LT
5735/**
5736 * ata_qc_free - free unused ata_queued_cmd
5737 * @qc: Command to complete
5738 *
5739 * Designed to free unused ata_queued_cmd object
5740 * in case something prevents using it.
5741 *
5742 * LOCKING:
cca3974e 5743 * spin_lock_irqsave(host lock)
1da177e4
LT
5744 */
5745void ata_qc_free(struct ata_queued_cmd *qc)
5746{
4ba946e9
TH
5747 struct ata_port *ap = qc->ap;
5748 unsigned int tag;
5749
a4631474 5750 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 5751
4ba946e9
TH
5752 qc->flags = 0;
5753 tag = qc->tag;
5754 if (likely(ata_tag_valid(tag))) {
4ba946e9 5755 qc->tag = ATA_TAG_POISON;
6cec4a39 5756 clear_bit(tag, &ap->qc_allocated);
4ba946e9 5757 }
1da177e4
LT
5758}
5759
76014427 5760void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 5761{
dedaf2b0 5762 struct ata_port *ap = qc->ap;
9af5c9c9 5763 struct ata_link *link = qc->dev->link;
dedaf2b0 5764
a4631474
TH
5765 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5766 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
5767
5768 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5769 ata_sg_clean(qc);
5770
7401abf2 5771 /* command should be marked inactive atomically with qc completion */
da917d69 5772 if (qc->tf.protocol == ATA_PROT_NCQ) {
9af5c9c9 5773 link->sactive &= ~(1 << qc->tag);
da917d69
TH
5774 if (!link->sactive)
5775 ap->nr_active_links--;
5776 } else {
9af5c9c9 5777 link->active_tag = ATA_TAG_POISON;
da917d69
TH
5778 ap->nr_active_links--;
5779 }
5780
5781 /* clear exclusive status */
5782 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5783 ap->excl_link == link))
5784 ap->excl_link = NULL;
7401abf2 5785
3f3791d3
AL
5786 /* atapi: mark qc as inactive to prevent the interrupt handler
5787 * from completing the command twice later, before the error handler
5788 * is called. (when rc != 0 and atapi request sense is needed)
5789 */
5790 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 5791 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 5792
1da177e4 5793 /* call completion callback */
77853bf2 5794 qc->complete_fn(qc);
1da177e4
LT
5795}
5796
39599a53
TH
5797static void fill_result_tf(struct ata_queued_cmd *qc)
5798{
5799 struct ata_port *ap = qc->ap;
5800
39599a53 5801 qc->result_tf.flags = qc->tf.flags;
4742d54f 5802 ap->ops->tf_read(ap, &qc->result_tf);
39599a53
TH
5803}
5804
f686bcb8
TH
5805/**
5806 * ata_qc_complete - Complete an active ATA command
5807 * @qc: Command to complete
5808 * @err_mask: ATA Status register contents
5809 *
5810 * Indicate to the mid and upper layers that an ATA
5811 * command has completed, with either an ok or not-ok status.
5812 *
5813 * LOCKING:
cca3974e 5814 * spin_lock_irqsave(host lock)
f686bcb8
TH
5815 */
5816void ata_qc_complete(struct ata_queued_cmd *qc)
5817{
5818 struct ata_port *ap = qc->ap;
5819
5820 /* XXX: New EH and old EH use different mechanisms to
5821 * synchronize EH with regular execution path.
5822 *
5823 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5824 * Normal execution path is responsible for not accessing a
5825 * failed qc. libata core enforces the rule by returning NULL
5826 * from ata_qc_from_tag() for failed qcs.
5827 *
5828 * Old EH depends on ata_qc_complete() nullifying completion
5829 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5830 * not synchronize with interrupt handler. Only PIO task is
5831 * taken care of.
5832 */
5833 if (ap->ops->error_handler) {
4dbfa39b
TH
5834 struct ata_device *dev = qc->dev;
5835 struct ata_eh_info *ehi = &dev->link->eh_info;
5836
b51e9e5d 5837 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
5838
5839 if (unlikely(qc->err_mask))
5840 qc->flags |= ATA_QCFLAG_FAILED;
5841
5842 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5843 if (!ata_tag_internal(qc->tag)) {
5844 /* always fill result TF for failed qc */
39599a53 5845 fill_result_tf(qc);
f686bcb8
TH
5846 ata_qc_schedule_eh(qc);
5847 return;
5848 }
5849 }
5850
5851 /* read result TF if requested */
5852 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5853 fill_result_tf(qc);
f686bcb8 5854
4dbfa39b
TH
5855 /* Some commands need post-processing after successful
5856 * completion.
5857 */
5858 switch (qc->tf.command) {
5859 case ATA_CMD_SET_FEATURES:
5860 if (qc->tf.feature != SETFEATURES_WC_ON &&
5861 qc->tf.feature != SETFEATURES_WC_OFF)
5862 break;
5863 /* fall through */
5864 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
5865 case ATA_CMD_SET_MULTI: /* multi_count changed */
5866 /* revalidate device */
5867 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
5868 ata_port_schedule_eh(ap);
5869 break;
054a5fba
TH
5870
5871 case ATA_CMD_SLEEP:
5872 dev->flags |= ATA_DFLAG_SLEEPING;
5873 break;
4dbfa39b
TH
5874 }
5875
f686bcb8
TH
5876 __ata_qc_complete(qc);
5877 } else {
5878 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5879 return;
5880
5881 /* read result TF if failed or requested */
5882 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5883 fill_result_tf(qc);
f686bcb8
TH
5884
5885 __ata_qc_complete(qc);
5886 }
5887}
5888
dedaf2b0
TH
5889/**
5890 * ata_qc_complete_multiple - Complete multiple qcs successfully
5891 * @ap: port in question
5892 * @qc_active: new qc_active mask
5893 * @finish_qc: LLDD callback invoked before completing a qc
5894 *
5895 * Complete in-flight commands. This functions is meant to be
5896 * called from low-level driver's interrupt routine to complete
5897 * requests normally. ap->qc_active and @qc_active is compared
5898 * and commands are completed accordingly.
5899 *
5900 * LOCKING:
cca3974e 5901 * spin_lock_irqsave(host lock)
dedaf2b0
TH
5902 *
5903 * RETURNS:
5904 * Number of completed commands on success, -errno otherwise.
5905 */
5906int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5907 void (*finish_qc)(struct ata_queued_cmd *))
5908{
5909 int nr_done = 0;
5910 u32 done_mask;
5911 int i;
5912
5913 done_mask = ap->qc_active ^ qc_active;
5914
5915 if (unlikely(done_mask & qc_active)) {
5916 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5917 "(%08x->%08x)\n", ap->qc_active, qc_active);
5918 return -EINVAL;
5919 }
5920
5921 for (i = 0; i < ATA_MAX_QUEUE; i++) {
5922 struct ata_queued_cmd *qc;
5923
5924 if (!(done_mask & (1 << i)))
5925 continue;
5926
5927 if ((qc = ata_qc_from_tag(ap, i))) {
5928 if (finish_qc)
5929 finish_qc(qc);
5930 ata_qc_complete(qc);
5931 nr_done++;
5932 }
5933 }
5934
5935 return nr_done;
5936}
5937
1da177e4
LT
5938static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
5939{
5940 struct ata_port *ap = qc->ap;
5941
5942 switch (qc->tf.protocol) {
3dc1d881 5943 case ATA_PROT_NCQ:
1da177e4
LT
5944 case ATA_PROT_DMA:
5945 case ATA_PROT_ATAPI_DMA:
5946 return 1;
5947
5948 case ATA_PROT_ATAPI:
5949 case ATA_PROT_PIO:
1da177e4
LT
5950 if (ap->flags & ATA_FLAG_PIO_DMA)
5951 return 1;
5952
5953 /* fall through */
5954
5955 default:
5956 return 0;
5957 }
5958
5959 /* never reached */
5960}
5961
5962/**
5963 * ata_qc_issue - issue taskfile to device
5964 * @qc: command to issue to device
5965 *
5966 * Prepare an ATA command to submission to device.
5967 * This includes mapping the data into a DMA-able
5968 * area, filling in the S/G table, and finally
5969 * writing the taskfile to hardware, starting the command.
5970 *
5971 * LOCKING:
cca3974e 5972 * spin_lock_irqsave(host lock)
1da177e4 5973 */
8e0e694a 5974void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
5975{
5976 struct ata_port *ap = qc->ap;
9af5c9c9 5977 struct ata_link *link = qc->dev->link;
1da177e4 5978
dedaf2b0
TH
5979 /* Make sure only one non-NCQ command is outstanding. The
5980 * check is skipped for old EH because it reuses active qc to
5981 * request ATAPI sense.
5982 */
9af5c9c9 5983 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
dedaf2b0
TH
5984
5985 if (qc->tf.protocol == ATA_PROT_NCQ) {
9af5c9c9 5986 WARN_ON(link->sactive & (1 << qc->tag));
da917d69
TH
5987
5988 if (!link->sactive)
5989 ap->nr_active_links++;
9af5c9c9 5990 link->sactive |= 1 << qc->tag;
dedaf2b0 5991 } else {
9af5c9c9 5992 WARN_ON(link->sactive);
da917d69
TH
5993
5994 ap->nr_active_links++;
9af5c9c9 5995 link->active_tag = qc->tag;
dedaf2b0
TH
5996 }
5997
e4a70e76 5998 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 5999 ap->qc_active |= 1 << qc->tag;
e4a70e76 6000
1da177e4
LT
6001 if (ata_should_dma_map(qc)) {
6002 if (qc->flags & ATA_QCFLAG_SG) {
6003 if (ata_sg_setup(qc))
8e436af9 6004 goto sg_err;
1da177e4
LT
6005 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
6006 if (ata_sg_setup_one(qc))
8e436af9 6007 goto sg_err;
1da177e4
LT
6008 }
6009 } else {
6010 qc->flags &= ~ATA_QCFLAG_DMAMAP;
6011 }
6012
054a5fba
TH
6013 /* if device is sleeping, schedule softreset and abort the link */
6014 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
6015 link->eh_info.action |= ATA_EH_SOFTRESET;
6016 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
6017 ata_link_abort(link);
6018 return;
6019 }
6020
1da177e4
LT
6021 ap->ops->qc_prep(qc);
6022
8e0e694a
TH
6023 qc->err_mask |= ap->ops->qc_issue(qc);
6024 if (unlikely(qc->err_mask))
6025 goto err;
6026 return;
1da177e4 6027
8e436af9
TH
6028sg_err:
6029 qc->flags &= ~ATA_QCFLAG_DMAMAP;
8e0e694a
TH
6030 qc->err_mask |= AC_ERR_SYSTEM;
6031err:
6032 ata_qc_complete(qc);
1da177e4
LT
6033}
6034
6035/**
6036 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
6037 * @qc: command to issue to device
6038 *
6039 * Using various libata functions and hooks, this function
6040 * starts an ATA command. ATA commands are grouped into
6041 * classes called "protocols", and issuing each type of protocol
6042 * is slightly different.
6043 *
0baab86b
EF
6044 * May be used as the qc_issue() entry in ata_port_operations.
6045 *
1da177e4 6046 * LOCKING:
cca3974e 6047 * spin_lock_irqsave(host lock)
1da177e4
LT
6048 *
6049 * RETURNS:
9a3d9eb0 6050 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
6051 */
6052
9a3d9eb0 6053unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
6054{
6055 struct ata_port *ap = qc->ap;
6056
e50362ec
AL
6057 /* Use polling pio if the LLD doesn't handle
6058 * interrupt driven pio and atapi CDB interrupt.
6059 */
6060 if (ap->flags & ATA_FLAG_PIO_POLLING) {
6061 switch (qc->tf.protocol) {
6062 case ATA_PROT_PIO:
e3472cbe 6063 case ATA_PROT_NODATA:
e50362ec
AL
6064 case ATA_PROT_ATAPI:
6065 case ATA_PROT_ATAPI_NODATA:
6066 qc->tf.flags |= ATA_TFLAG_POLLING;
6067 break;
6068 case ATA_PROT_ATAPI_DMA:
6069 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 6070 /* see ata_dma_blacklisted() */
e50362ec
AL
6071 BUG();
6072 break;
6073 default:
6074 break;
6075 }
6076 }
6077
312f7da2 6078 /* select the device */
1da177e4
LT
6079 ata_dev_select(ap, qc->dev->devno, 1, 0);
6080
312f7da2 6081 /* start the command */
1da177e4
LT
6082 switch (qc->tf.protocol) {
6083 case ATA_PROT_NODATA:
312f7da2
AL
6084 if (qc->tf.flags & ATA_TFLAG_POLLING)
6085 ata_qc_set_polling(qc);
6086
e5338254 6087 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
6088 ap->hsm_task_state = HSM_ST_LAST;
6089
6090 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 6091 ata_port_queue_task(ap, ata_pio_task, qc, 0);
312f7da2 6092
1da177e4
LT
6093 break;
6094
6095 case ATA_PROT_DMA:
587005de 6096 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 6097
1da177e4
LT
6098 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
6099 ap->ops->bmdma_setup(qc); /* set up bmdma */
6100 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 6101 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
6102 break;
6103
312f7da2
AL
6104 case ATA_PROT_PIO:
6105 if (qc->tf.flags & ATA_TFLAG_POLLING)
6106 ata_qc_set_polling(qc);
1da177e4 6107
e5338254 6108 ata_tf_to_host(ap, &qc->tf);
312f7da2 6109
54f00389
AL
6110 if (qc->tf.flags & ATA_TFLAG_WRITE) {
6111 /* PIO data out protocol */
6112 ap->hsm_task_state = HSM_ST_FIRST;
31ce6dae 6113 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
6114
6115 /* always send first data block using
e27486db 6116 * the ata_pio_task() codepath.
54f00389 6117 */
312f7da2 6118 } else {
54f00389
AL
6119 /* PIO data in protocol */
6120 ap->hsm_task_state = HSM_ST;
6121
6122 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 6123 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
6124
6125 /* if polling, ata_pio_task() handles the rest.
6126 * otherwise, interrupt handler takes over from here.
6127 */
312f7da2
AL
6128 }
6129
1da177e4
LT
6130 break;
6131
1da177e4 6132 case ATA_PROT_ATAPI:
1da177e4 6133 case ATA_PROT_ATAPI_NODATA:
312f7da2
AL
6134 if (qc->tf.flags & ATA_TFLAG_POLLING)
6135 ata_qc_set_polling(qc);
6136
e5338254 6137 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 6138
312f7da2
AL
6139 ap->hsm_task_state = HSM_ST_FIRST;
6140
6141 /* send cdb by polling if no cdb interrupt */
6142 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
6143 (qc->tf.flags & ATA_TFLAG_POLLING))
31ce6dae 6144 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
6145 break;
6146
6147 case ATA_PROT_ATAPI_DMA:
587005de 6148 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 6149
1da177e4
LT
6150 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
6151 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
6152 ap->hsm_task_state = HSM_ST_FIRST;
6153
6154 /* send cdb by polling if no cdb interrupt */
6155 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
31ce6dae 6156 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
6157 break;
6158
6159 default:
6160 WARN_ON(1);
9a3d9eb0 6161 return AC_ERR_SYSTEM;
1da177e4
LT
6162 }
6163
6164 return 0;
6165}
6166
1da177e4
LT
6167/**
6168 * ata_host_intr - Handle host interrupt for given (port, task)
6169 * @ap: Port on which interrupt arrived (possibly...)
6170 * @qc: Taskfile currently active in engine
6171 *
6172 * Handle host interrupt for given queued command. Currently,
6173 * only DMA interrupts are handled. All other commands are
6174 * handled via polling with interrupts disabled (nIEN bit).
6175 *
6176 * LOCKING:
cca3974e 6177 * spin_lock_irqsave(host lock)
1da177e4
LT
6178 *
6179 * RETURNS:
6180 * One if interrupt was handled, zero if not (shared irq).
6181 */
6182
2dcb407e
JG
6183inline unsigned int ata_host_intr(struct ata_port *ap,
6184 struct ata_queued_cmd *qc)
1da177e4 6185{
9af5c9c9 6186 struct ata_eh_info *ehi = &ap->link.eh_info;
312f7da2 6187 u8 status, host_stat = 0;
1da177e4 6188
312f7da2 6189 VPRINTK("ata%u: protocol %d task_state %d\n",
44877b4e 6190 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 6191
312f7da2
AL
6192 /* Check whether we are expecting interrupt in this state */
6193 switch (ap->hsm_task_state) {
6194 case HSM_ST_FIRST:
6912ccd5
AL
6195 /* Some pre-ATAPI-4 devices assert INTRQ
6196 * at this state when ready to receive CDB.
6197 */
1da177e4 6198
312f7da2
AL
6199 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
6200 * The flag was turned on only for atapi devices.
6201 * No need to check is_atapi_taskfile(&qc->tf) again.
6202 */
6203 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 6204 goto idle_irq;
1da177e4 6205 break;
312f7da2
AL
6206 case HSM_ST_LAST:
6207 if (qc->tf.protocol == ATA_PROT_DMA ||
6208 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
6209 /* check status of DMA engine */
6210 host_stat = ap->ops->bmdma_status(ap);
44877b4e
TH
6211 VPRINTK("ata%u: host_stat 0x%X\n",
6212 ap->print_id, host_stat);
312f7da2
AL
6213
6214 /* if it's not our irq... */
6215 if (!(host_stat & ATA_DMA_INTR))
6216 goto idle_irq;
6217
6218 /* before we do anything else, clear DMA-Start bit */
6219 ap->ops->bmdma_stop(qc);
a4f16610
AL
6220
6221 if (unlikely(host_stat & ATA_DMA_ERR)) {
6222 /* error when transfering data to/from memory */
6223 qc->err_mask |= AC_ERR_HOST_BUS;
6224 ap->hsm_task_state = HSM_ST_ERR;
6225 }
312f7da2
AL
6226 }
6227 break;
6228 case HSM_ST:
6229 break;
1da177e4
LT
6230 default:
6231 goto idle_irq;
6232 }
6233
312f7da2
AL
6234 /* check altstatus */
6235 status = ata_altstatus(ap);
6236 if (status & ATA_BUSY)
6237 goto idle_irq;
1da177e4 6238
312f7da2
AL
6239 /* check main status, clearing INTRQ */
6240 status = ata_chk_status(ap);
6241 if (unlikely(status & ATA_BUSY))
6242 goto idle_irq;
1da177e4 6243
312f7da2
AL
6244 /* ack bmdma irq events */
6245 ap->ops->irq_clear(ap);
1da177e4 6246
bb5cb290 6247 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
6248
6249 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
6250 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
6251 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
6252
1da177e4
LT
6253 return 1; /* irq handled */
6254
6255idle_irq:
6256 ap->stats.idle_irq++;
6257
6258#ifdef ATA_IRQ_TRAP
6259 if ((ap->stats.idle_irq % 1000) == 0) {
6d32d30f
JG
6260 ata_chk_status(ap);
6261 ap->ops->irq_clear(ap);
f15a1daf 6262 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 6263 return 1;
1da177e4
LT
6264 }
6265#endif
6266 return 0; /* irq not handled */
6267}
6268
6269/**
6270 * ata_interrupt - Default ATA host interrupt handler
0cba632b 6271 * @irq: irq line (unused)
cca3974e 6272 * @dev_instance: pointer to our ata_host information structure
1da177e4 6273 *
0cba632b
JG
6274 * Default interrupt handler for PCI IDE devices. Calls
6275 * ata_host_intr() for each port that is not disabled.
6276 *
1da177e4 6277 * LOCKING:
cca3974e 6278 * Obtains host lock during operation.
1da177e4
LT
6279 *
6280 * RETURNS:
0cba632b 6281 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
6282 */
6283
2dcb407e 6284irqreturn_t ata_interrupt(int irq, void *dev_instance)
1da177e4 6285{
cca3974e 6286 struct ata_host *host = dev_instance;
1da177e4
LT
6287 unsigned int i;
6288 unsigned int handled = 0;
6289 unsigned long flags;
6290
6291 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 6292 spin_lock_irqsave(&host->lock, flags);
1da177e4 6293
cca3974e 6294 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
6295 struct ata_port *ap;
6296
cca3974e 6297 ap = host->ports[i];
c1389503 6298 if (ap &&
029f5468 6299 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
6300 struct ata_queued_cmd *qc;
6301
9af5c9c9 6302 qc = ata_qc_from_tag(ap, ap->link.active_tag);
312f7da2 6303 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 6304 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
6305 handled |= ata_host_intr(ap, qc);
6306 }
6307 }
6308
cca3974e 6309 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
6310
6311 return IRQ_RETVAL(handled);
6312}
6313
34bf2170
TH
6314/**
6315 * sata_scr_valid - test whether SCRs are accessible
936fd732 6316 * @link: ATA link to test SCR accessibility for
34bf2170 6317 *
936fd732 6318 * Test whether SCRs are accessible for @link.
34bf2170
TH
6319 *
6320 * LOCKING:
6321 * None.
6322 *
6323 * RETURNS:
6324 * 1 if SCRs are accessible, 0 otherwise.
6325 */
936fd732 6326int sata_scr_valid(struct ata_link *link)
34bf2170 6327{
936fd732
TH
6328 struct ata_port *ap = link->ap;
6329
a16abc0b 6330 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
34bf2170
TH
6331}
6332
6333/**
6334 * sata_scr_read - read SCR register of the specified port
936fd732 6335 * @link: ATA link to read SCR for
34bf2170
TH
6336 * @reg: SCR to read
6337 * @val: Place to store read value
6338 *
936fd732 6339 * Read SCR register @reg of @link into *@val. This function is
633273a3
TH
6340 * guaranteed to succeed if @link is ap->link, the cable type of
6341 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
6342 *
6343 * LOCKING:
633273a3 6344 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6345 *
6346 * RETURNS:
6347 * 0 on success, negative errno on failure.
6348 */
936fd732 6349int sata_scr_read(struct ata_link *link, int reg, u32 *val)
34bf2170 6350{
633273a3
TH
6351 if (ata_is_host_link(link)) {
6352 struct ata_port *ap = link->ap;
936fd732 6353
633273a3
TH
6354 if (sata_scr_valid(link))
6355 return ap->ops->scr_read(ap, reg, val);
6356 return -EOPNOTSUPP;
6357 }
6358
6359 return sata_pmp_scr_read(link, reg, val);
34bf2170
TH
6360}
6361
6362/**
6363 * sata_scr_write - write SCR register of the specified port
936fd732 6364 * @link: ATA link to write SCR for
34bf2170
TH
6365 * @reg: SCR to write
6366 * @val: value to write
6367 *
936fd732 6368 * Write @val to SCR register @reg of @link. This function is
633273a3
TH
6369 * guaranteed to succeed if @link is ap->link, the cable type of
6370 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
6371 *
6372 * LOCKING:
633273a3 6373 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6374 *
6375 * RETURNS:
6376 * 0 on success, negative errno on failure.
6377 */
936fd732 6378int sata_scr_write(struct ata_link *link, int reg, u32 val)
34bf2170 6379{
633273a3
TH
6380 if (ata_is_host_link(link)) {
6381 struct ata_port *ap = link->ap;
6382
6383 if (sata_scr_valid(link))
6384 return ap->ops->scr_write(ap, reg, val);
6385 return -EOPNOTSUPP;
6386 }
936fd732 6387
633273a3 6388 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
6389}
6390
6391/**
6392 * sata_scr_write_flush - write SCR register of the specified port and flush
936fd732 6393 * @link: ATA link to write SCR for
34bf2170
TH
6394 * @reg: SCR to write
6395 * @val: value to write
6396 *
6397 * This function is identical to sata_scr_write() except that this
6398 * function performs flush after writing to the register.
6399 *
6400 * LOCKING:
633273a3 6401 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6402 *
6403 * RETURNS:
6404 * 0 on success, negative errno on failure.
6405 */
936fd732 6406int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
34bf2170 6407{
633273a3
TH
6408 if (ata_is_host_link(link)) {
6409 struct ata_port *ap = link->ap;
6410 int rc;
da3dbb17 6411
633273a3
TH
6412 if (sata_scr_valid(link)) {
6413 rc = ap->ops->scr_write(ap, reg, val);
6414 if (rc == 0)
6415 rc = ap->ops->scr_read(ap, reg, &val);
6416 return rc;
6417 }
6418 return -EOPNOTSUPP;
34bf2170 6419 }
633273a3
TH
6420
6421 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
6422}
6423
6424/**
936fd732
TH
6425 * ata_link_online - test whether the given link is online
6426 * @link: ATA link to test
34bf2170 6427 *
936fd732
TH
6428 * Test whether @link is online. Note that this function returns
6429 * 0 if online status of @link cannot be obtained, so
6430 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
6431 *
6432 * LOCKING:
6433 * None.
6434 *
6435 * RETURNS:
6436 * 1 if the port online status is available and online.
6437 */
936fd732 6438int ata_link_online(struct ata_link *link)
34bf2170
TH
6439{
6440 u32 sstatus;
6441
936fd732
TH
6442 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6443 (sstatus & 0xf) == 0x3)
34bf2170
TH
6444 return 1;
6445 return 0;
6446}
6447
6448/**
936fd732
TH
6449 * ata_link_offline - test whether the given link is offline
6450 * @link: ATA link to test
34bf2170 6451 *
936fd732
TH
6452 * Test whether @link is offline. Note that this function
6453 * returns 0 if offline status of @link cannot be obtained, so
6454 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
6455 *
6456 * LOCKING:
6457 * None.
6458 *
6459 * RETURNS:
6460 * 1 if the port offline status is available and offline.
6461 */
936fd732 6462int ata_link_offline(struct ata_link *link)
34bf2170
TH
6463{
6464 u32 sstatus;
6465
936fd732
TH
6466 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6467 (sstatus & 0xf) != 0x3)
34bf2170
TH
6468 return 1;
6469 return 0;
6470}
0baab86b 6471
77b08fb5 6472int ata_flush_cache(struct ata_device *dev)
9b847548 6473{
977e6b9f 6474 unsigned int err_mask;
9b847548
JA
6475 u8 cmd;
6476
6477 if (!ata_try_flush_cache(dev))
6478 return 0;
6479
6fc49adb 6480 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
6481 cmd = ATA_CMD_FLUSH_EXT;
6482 else
6483 cmd = ATA_CMD_FLUSH;
6484
4f34337b
AC
6485 /* This is wrong. On a failed flush we get back the LBA of the lost
6486 sector and we should (assuming it wasn't aborted as unknown) issue
2dcb407e 6487 a further flush command to continue the writeback until it
4f34337b 6488 does not error */
977e6b9f
TH
6489 err_mask = ata_do_simple_cmd(dev, cmd);
6490 if (err_mask) {
6491 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
6492 return -EIO;
6493 }
6494
6495 return 0;
9b847548
JA
6496}
6497
6ffa01d8 6498#ifdef CONFIG_PM
cca3974e
JG
6499static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
6500 unsigned int action, unsigned int ehi_flags,
6501 int wait)
500530f6
TH
6502{
6503 unsigned long flags;
6504 int i, rc;
6505
cca3974e
JG
6506 for (i = 0; i < host->n_ports; i++) {
6507 struct ata_port *ap = host->ports[i];
e3667ebf 6508 struct ata_link *link;
500530f6
TH
6509
6510 /* Previous resume operation might still be in
6511 * progress. Wait for PM_PENDING to clear.
6512 */
6513 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
6514 ata_port_wait_eh(ap);
6515 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6516 }
6517
6518 /* request PM ops to EH */
6519 spin_lock_irqsave(ap->lock, flags);
6520
6521 ap->pm_mesg = mesg;
6522 if (wait) {
6523 rc = 0;
6524 ap->pm_result = &rc;
6525 }
6526
6527 ap->pflags |= ATA_PFLAG_PM_PENDING;
e3667ebf
TH
6528 __ata_port_for_each_link(link, ap) {
6529 link->eh_info.action |= action;
6530 link->eh_info.flags |= ehi_flags;
6531 }
500530f6
TH
6532
6533 ata_port_schedule_eh(ap);
6534
6535 spin_unlock_irqrestore(ap->lock, flags);
6536
6537 /* wait and check result */
6538 if (wait) {
6539 ata_port_wait_eh(ap);
6540 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6541 if (rc)
6542 return rc;
6543 }
6544 }
6545
6546 return 0;
6547}
6548
6549/**
cca3974e
JG
6550 * ata_host_suspend - suspend host
6551 * @host: host to suspend
500530f6
TH
6552 * @mesg: PM message
6553 *
cca3974e 6554 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
6555 * function requests EH to perform PM operations and waits for EH
6556 * to finish.
6557 *
6558 * LOCKING:
6559 * Kernel thread context (may sleep).
6560 *
6561 * RETURNS:
6562 * 0 on success, -errno on failure.
6563 */
cca3974e 6564int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6 6565{
9666f400 6566 int rc;
500530f6 6567
ca77329f
KCA
6568 /*
6569 * disable link pm on all ports before requesting
6570 * any pm activity
6571 */
6572 ata_lpm_enable(host);
6573
cca3974e 6574 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
9666f400
TH
6575 if (rc == 0)
6576 host->dev->power.power_state = mesg;
500530f6
TH
6577 return rc;
6578}
6579
6580/**
cca3974e
JG
6581 * ata_host_resume - resume host
6582 * @host: host to resume
500530f6 6583 *
cca3974e 6584 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
6585 * function requests EH to perform PM operations and returns.
6586 * Note that all resume operations are performed parallely.
6587 *
6588 * LOCKING:
6589 * Kernel thread context (may sleep).
6590 */
cca3974e 6591void ata_host_resume(struct ata_host *host)
500530f6 6592{
cca3974e
JG
6593 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
6594 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
6595 host->dev->power.power_state = PMSG_ON;
ca77329f
KCA
6596
6597 /* reenable link pm */
6598 ata_lpm_disable(host);
500530f6 6599}
6ffa01d8 6600#endif
500530f6 6601
c893a3ae
RD
6602/**
6603 * ata_port_start - Set port up for dma.
6604 * @ap: Port to initialize
6605 *
6606 * Called just after data structures for each port are
6607 * initialized. Allocates space for PRD table.
6608 *
6609 * May be used as the port_start() entry in ata_port_operations.
6610 *
6611 * LOCKING:
6612 * Inherited from caller.
6613 */
f0d36efd 6614int ata_port_start(struct ata_port *ap)
1da177e4 6615{
2f1f610b 6616 struct device *dev = ap->dev;
6037d6bb 6617 int rc;
1da177e4 6618
f0d36efd
TH
6619 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6620 GFP_KERNEL);
1da177e4
LT
6621 if (!ap->prd)
6622 return -ENOMEM;
6623
6037d6bb 6624 rc = ata_pad_alloc(ap, dev);
f0d36efd 6625 if (rc)
6037d6bb 6626 return rc;
1da177e4 6627
f0d36efd
TH
6628 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
6629 (unsigned long long)ap->prd_dma);
1da177e4
LT
6630 return 0;
6631}
6632
3ef3b43d
TH
6633/**
6634 * ata_dev_init - Initialize an ata_device structure
6635 * @dev: Device structure to initialize
6636 *
6637 * Initialize @dev in preparation for probing.
6638 *
6639 * LOCKING:
6640 * Inherited from caller.
6641 */
6642void ata_dev_init(struct ata_device *dev)
6643{
9af5c9c9
TH
6644 struct ata_link *link = dev->link;
6645 struct ata_port *ap = link->ap;
72fa4b74
TH
6646 unsigned long flags;
6647
5a04bf4b 6648 /* SATA spd limit is bound to the first device */
9af5c9c9
TH
6649 link->sata_spd_limit = link->hw_sata_spd_limit;
6650 link->sata_spd = 0;
5a04bf4b 6651
72fa4b74
TH
6652 /* High bits of dev->flags are used to record warm plug
6653 * requests which occur asynchronously. Synchronize using
cca3974e 6654 * host lock.
72fa4b74 6655 */
ba6a1308 6656 spin_lock_irqsave(ap->lock, flags);
72fa4b74 6657 dev->flags &= ~ATA_DFLAG_INIT_MASK;
3dcc323f 6658 dev->horkage = 0;
ba6a1308 6659 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 6660
72fa4b74
TH
6661 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
6662 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
6663 dev->pio_mask = UINT_MAX;
6664 dev->mwdma_mask = UINT_MAX;
6665 dev->udma_mask = UINT_MAX;
6666}
6667
4fb37a25
TH
6668/**
6669 * ata_link_init - Initialize an ata_link structure
6670 * @ap: ATA port link is attached to
6671 * @link: Link structure to initialize
8989805d 6672 * @pmp: Port multiplier port number
4fb37a25
TH
6673 *
6674 * Initialize @link.
6675 *
6676 * LOCKING:
6677 * Kernel thread context (may sleep)
6678 */
fb7fd614 6679void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
4fb37a25
TH
6680{
6681 int i;
6682
6683 /* clear everything except for devices */
6684 memset(link, 0, offsetof(struct ata_link, device[0]));
6685
6686 link->ap = ap;
8989805d 6687 link->pmp = pmp;
4fb37a25
TH
6688 link->active_tag = ATA_TAG_POISON;
6689 link->hw_sata_spd_limit = UINT_MAX;
6690
6691 /* can't use iterator, ap isn't initialized yet */
6692 for (i = 0; i < ATA_MAX_DEVICES; i++) {
6693 struct ata_device *dev = &link->device[i];
6694
6695 dev->link = link;
6696 dev->devno = dev - link->device;
6697 ata_dev_init(dev);
6698 }
6699}
6700
6701/**
6702 * sata_link_init_spd - Initialize link->sata_spd_limit
6703 * @link: Link to configure sata_spd_limit for
6704 *
6705 * Initialize @link->[hw_]sata_spd_limit to the currently
6706 * configured value.
6707 *
6708 * LOCKING:
6709 * Kernel thread context (may sleep).
6710 *
6711 * RETURNS:
6712 * 0 on success, -errno on failure.
6713 */
fb7fd614 6714int sata_link_init_spd(struct ata_link *link)
4fb37a25
TH
6715{
6716 u32 scontrol, spd;
6717 int rc;
6718
6719 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
6720 if (rc)
6721 return rc;
6722
6723 spd = (scontrol >> 4) & 0xf;
6724 if (spd)
6725 link->hw_sata_spd_limit &= (1 << spd) - 1;
6726
6727 link->sata_spd_limit = link->hw_sata_spd_limit;
6728
6729 return 0;
6730}
6731
1da177e4 6732/**
f3187195
TH
6733 * ata_port_alloc - allocate and initialize basic ATA port resources
6734 * @host: ATA host this allocated port belongs to
1da177e4 6735 *
f3187195
TH
6736 * Allocate and initialize basic ATA port resources.
6737 *
6738 * RETURNS:
6739 * Allocate ATA port on success, NULL on failure.
0cba632b 6740 *
1da177e4 6741 * LOCKING:
f3187195 6742 * Inherited from calling layer (may sleep).
1da177e4 6743 */
f3187195 6744struct ata_port *ata_port_alloc(struct ata_host *host)
1da177e4 6745{
f3187195 6746 struct ata_port *ap;
1da177e4 6747
f3187195
TH
6748 DPRINTK("ENTER\n");
6749
6750 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6751 if (!ap)
6752 return NULL;
6753
f4d6d004 6754 ap->pflags |= ATA_PFLAG_INITIALIZING;
cca3974e 6755 ap->lock = &host->lock;
198e0fed 6756 ap->flags = ATA_FLAG_DISABLED;
f3187195 6757 ap->print_id = -1;
1da177e4 6758 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 6759 ap->host = host;
f3187195 6760 ap->dev = host->dev;
1da177e4 6761 ap->last_ctl = 0xFF;
bd5d825c
BP
6762
6763#if defined(ATA_VERBOSE_DEBUG)
6764 /* turn on all debugging levels */
6765 ap->msg_enable = 0x00FF;
6766#elif defined(ATA_DEBUG)
6767 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 6768#else
0dd4b21f 6769 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 6770#endif
1da177e4 6771
65f27f38
DH
6772 INIT_DELAYED_WORK(&ap->port_task, NULL);
6773 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6774 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 6775 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 6776 init_waitqueue_head(&ap->eh_wait_q);
5ddf24c5
TH
6777 init_timer_deferrable(&ap->fastdrain_timer);
6778 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
6779 ap->fastdrain_timer.data = (unsigned long)ap;
1da177e4 6780
838df628 6781 ap->cbl = ATA_CBL_NONE;
838df628 6782
8989805d 6783 ata_link_init(ap, &ap->link, 0);
1da177e4
LT
6784
6785#ifdef ATA_IRQ_TRAP
6786 ap->stats.unhandled_irq = 1;
6787 ap->stats.idle_irq = 1;
6788#endif
1da177e4 6789 return ap;
1da177e4
LT
6790}
6791
f0d36efd
TH
6792static void ata_host_release(struct device *gendev, void *res)
6793{
6794 struct ata_host *host = dev_get_drvdata(gendev);
6795 int i;
6796
6797 for (i = 0; i < host->n_ports; i++) {
6798 struct ata_port *ap = host->ports[i];
6799
ecef7253
TH
6800 if (!ap)
6801 continue;
6802
6803 if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop)
f0d36efd 6804 ap->ops->port_stop(ap);
f0d36efd
TH
6805 }
6806
ecef7253 6807 if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop)
f0d36efd 6808 host->ops->host_stop(host);
1aa56cca 6809
1aa506e4
TH
6810 for (i = 0; i < host->n_ports; i++) {
6811 struct ata_port *ap = host->ports[i];
6812
4911487a
TH
6813 if (!ap)
6814 continue;
6815
6816 if (ap->scsi_host)
1aa506e4
TH
6817 scsi_host_put(ap->scsi_host);
6818
633273a3 6819 kfree(ap->pmp_link);
4911487a 6820 kfree(ap);
1aa506e4
TH
6821 host->ports[i] = NULL;
6822 }
6823
1aa56cca 6824 dev_set_drvdata(gendev, NULL);
f0d36efd
TH
6825}
6826
f3187195
TH
6827/**
6828 * ata_host_alloc - allocate and init basic ATA host resources
6829 * @dev: generic device this host is associated with
6830 * @max_ports: maximum number of ATA ports associated with this host
6831 *
6832 * Allocate and initialize basic ATA host resources. LLD calls
6833 * this function to allocate a host, initializes it fully and
6834 * attaches it using ata_host_register().
6835 *
6836 * @max_ports ports are allocated and host->n_ports is
6837 * initialized to @max_ports. The caller is allowed to decrease
6838 * host->n_ports before calling ata_host_register(). The unused
6839 * ports will be automatically freed on registration.
6840 *
6841 * RETURNS:
6842 * Allocate ATA host on success, NULL on failure.
6843 *
6844 * LOCKING:
6845 * Inherited from calling layer (may sleep).
6846 */
6847struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6848{
6849 struct ata_host *host;
6850 size_t sz;
6851 int i;
6852
6853 DPRINTK("ENTER\n");
6854
6855 if (!devres_open_group(dev, NULL, GFP_KERNEL))
6856 return NULL;
6857
6858 /* alloc a container for our list of ATA ports (buses) */
6859 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6860 /* alloc a container for our list of ATA ports (buses) */
6861 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6862 if (!host)
6863 goto err_out;
6864
6865 devres_add(dev, host);
6866 dev_set_drvdata(dev, host);
6867
6868 spin_lock_init(&host->lock);
6869 host->dev = dev;
6870 host->n_ports = max_ports;
6871
6872 /* allocate ports bound to this host */
6873 for (i = 0; i < max_ports; i++) {
6874 struct ata_port *ap;
6875
6876 ap = ata_port_alloc(host);
6877 if (!ap)
6878 goto err_out;
6879
6880 ap->port_no = i;
6881 host->ports[i] = ap;
6882 }
6883
6884 devres_remove_group(dev, NULL);
6885 return host;
6886
6887 err_out:
6888 devres_release_group(dev, NULL);
6889 return NULL;
6890}
6891
f5cda257
TH
6892/**
6893 * ata_host_alloc_pinfo - alloc host and init with port_info array
6894 * @dev: generic device this host is associated with
6895 * @ppi: array of ATA port_info to initialize host with
6896 * @n_ports: number of ATA ports attached to this host
6897 *
6898 * Allocate ATA host and initialize with info from @ppi. If NULL
6899 * terminated, @ppi may contain fewer entries than @n_ports. The
6900 * last entry will be used for the remaining ports.
6901 *
6902 * RETURNS:
6903 * Allocate ATA host on success, NULL on failure.
6904 *
6905 * LOCKING:
6906 * Inherited from calling layer (may sleep).
6907 */
6908struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6909 const struct ata_port_info * const * ppi,
6910 int n_ports)
6911{
6912 const struct ata_port_info *pi;
6913 struct ata_host *host;
6914 int i, j;
6915
6916 host = ata_host_alloc(dev, n_ports);
6917 if (!host)
6918 return NULL;
6919
6920 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6921 struct ata_port *ap = host->ports[i];
6922
6923 if (ppi[j])
6924 pi = ppi[j++];
6925
6926 ap->pio_mask = pi->pio_mask;
6927 ap->mwdma_mask = pi->mwdma_mask;
6928 ap->udma_mask = pi->udma_mask;
6929 ap->flags |= pi->flags;
0c88758b 6930 ap->link.flags |= pi->link_flags;
f5cda257
TH
6931 ap->ops = pi->port_ops;
6932
6933 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6934 host->ops = pi->port_ops;
6935 if (!host->private_data && pi->private_data)
6936 host->private_data = pi->private_data;
6937 }
6938
6939 return host;
6940}
6941
ecef7253
TH
6942/**
6943 * ata_host_start - start and freeze ports of an ATA host
6944 * @host: ATA host to start ports for
6945 *
6946 * Start and then freeze ports of @host. Started status is
6947 * recorded in host->flags, so this function can be called
6948 * multiple times. Ports are guaranteed to get started only
f3187195
TH
6949 * once. If host->ops isn't initialized yet, its set to the
6950 * first non-dummy port ops.
ecef7253
TH
6951 *
6952 * LOCKING:
6953 * Inherited from calling layer (may sleep).
6954 *
6955 * RETURNS:
6956 * 0 if all ports are started successfully, -errno otherwise.
6957 */
6958int ata_host_start(struct ata_host *host)
6959{
6960 int i, rc;
6961
6962 if (host->flags & ATA_HOST_STARTED)
6963 return 0;
6964
6965 for (i = 0; i < host->n_ports; i++) {
6966 struct ata_port *ap = host->ports[i];
6967
f3187195
TH
6968 if (!host->ops && !ata_port_is_dummy(ap))
6969 host->ops = ap->ops;
6970
ecef7253
TH
6971 if (ap->ops->port_start) {
6972 rc = ap->ops->port_start(ap);
6973 if (rc) {
6974 ata_port_printk(ap, KERN_ERR, "failed to "
6975 "start port (errno=%d)\n", rc);
6976 goto err_out;
6977 }
6978 }
6979
6980 ata_eh_freeze_port(ap);
6981 }
6982
6983 host->flags |= ATA_HOST_STARTED;
6984 return 0;
6985
6986 err_out:
6987 while (--i >= 0) {
6988 struct ata_port *ap = host->ports[i];
6989
6990 if (ap->ops->port_stop)
6991 ap->ops->port_stop(ap);
6992 }
6993 return rc;
6994}
6995
b03732f0 6996/**
cca3974e
JG
6997 * ata_sas_host_init - Initialize a host struct
6998 * @host: host to initialize
6999 * @dev: device host is attached to
7000 * @flags: host flags
7001 * @ops: port_ops
b03732f0
BK
7002 *
7003 * LOCKING:
7004 * PCI/etc. bus probe sem.
7005 *
7006 */
f3187195 7007/* KILLME - the only user left is ipr */
cca3974e
JG
7008void ata_host_init(struct ata_host *host, struct device *dev,
7009 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 7010{
cca3974e
JG
7011 spin_lock_init(&host->lock);
7012 host->dev = dev;
7013 host->flags = flags;
7014 host->ops = ops;
b03732f0
BK
7015}
7016
f3187195
TH
7017/**
7018 * ata_host_register - register initialized ATA host
7019 * @host: ATA host to register
7020 * @sht: template for SCSI host
7021 *
7022 * Register initialized ATA host. @host is allocated using
7023 * ata_host_alloc() and fully initialized by LLD. This function
7024 * starts ports, registers @host with ATA and SCSI layers and
7025 * probe registered devices.
7026 *
7027 * LOCKING:
7028 * Inherited from calling layer (may sleep).
7029 *
7030 * RETURNS:
7031 * 0 on success, -errno otherwise.
7032 */
7033int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
7034{
7035 int i, rc;
7036
7037 /* host must have been started */
7038 if (!(host->flags & ATA_HOST_STARTED)) {
7039 dev_printk(KERN_ERR, host->dev,
7040 "BUG: trying to register unstarted host\n");
7041 WARN_ON(1);
7042 return -EINVAL;
7043 }
7044
7045 /* Blow away unused ports. This happens when LLD can't
7046 * determine the exact number of ports to allocate at
7047 * allocation time.
7048 */
7049 for (i = host->n_ports; host->ports[i]; i++)
7050 kfree(host->ports[i]);
7051
7052 /* give ports names and add SCSI hosts */
7053 for (i = 0; i < host->n_ports; i++)
7054 host->ports[i]->print_id = ata_print_id++;
7055
7056 rc = ata_scsi_add_hosts(host, sht);
7057 if (rc)
7058 return rc;
7059
fafbae87
TH
7060 /* associate with ACPI nodes */
7061 ata_acpi_associate(host);
7062
f3187195
TH
7063 /* set cable, sata_spd_limit and report */
7064 for (i = 0; i < host->n_ports; i++) {
7065 struct ata_port *ap = host->ports[i];
f3187195
TH
7066 unsigned long xfer_mask;
7067
7068 /* set SATA cable type if still unset */
7069 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
7070 ap->cbl = ATA_CBL_SATA;
7071
7072 /* init sata_spd_limit to the current value */
4fb37a25 7073 sata_link_init_spd(&ap->link);
f3187195 7074
cbcdd875 7075 /* print per-port info to dmesg */
f3187195
TH
7076 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
7077 ap->udma_mask);
7078
abf6e8ed 7079 if (!ata_port_is_dummy(ap)) {
cbcdd875
TH
7080 ata_port_printk(ap, KERN_INFO,
7081 "%cATA max %s %s\n",
a16abc0b 7082 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
f3187195 7083 ata_mode_string(xfer_mask),
cbcdd875 7084 ap->link.eh_info.desc);
abf6e8ed
TH
7085 ata_ehi_clear_desc(&ap->link.eh_info);
7086 } else
f3187195
TH
7087 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
7088 }
7089
7090 /* perform each probe synchronously */
7091 DPRINTK("probe begin\n");
7092 for (i = 0; i < host->n_ports; i++) {
7093 struct ata_port *ap = host->ports[i];
7094 int rc;
7095
7096 /* probe */
7097 if (ap->ops->error_handler) {
9af5c9c9 7098 struct ata_eh_info *ehi = &ap->link.eh_info;
f3187195
TH
7099 unsigned long flags;
7100
7101 ata_port_probe(ap);
7102
7103 /* kick EH for boot probing */
7104 spin_lock_irqsave(ap->lock, flags);
7105
f58229f8
TH
7106 ehi->probe_mask =
7107 (1 << ata_link_max_devices(&ap->link)) - 1;
f3187195
TH
7108 ehi->action |= ATA_EH_SOFTRESET;
7109 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
7110
f4d6d004 7111 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
f3187195
TH
7112 ap->pflags |= ATA_PFLAG_LOADING;
7113 ata_port_schedule_eh(ap);
7114
7115 spin_unlock_irqrestore(ap->lock, flags);
7116
7117 /* wait for EH to finish */
7118 ata_port_wait_eh(ap);
7119 } else {
7120 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
7121 rc = ata_bus_probe(ap);
7122 DPRINTK("ata%u: bus probe end\n", ap->print_id);
7123
7124 if (rc) {
7125 /* FIXME: do something useful here?
7126 * Current libata behavior will
7127 * tear down everything when
7128 * the module is removed
7129 * or the h/w is unplugged.
7130 */
7131 }
7132 }
7133 }
7134
7135 /* probes are done, now scan each port's disk(s) */
7136 DPRINTK("host probe begin\n");
7137 for (i = 0; i < host->n_ports; i++) {
7138 struct ata_port *ap = host->ports[i];
7139
1ae46317 7140 ata_scsi_scan_host(ap, 1);
ca77329f 7141 ata_lpm_schedule(ap, ap->pm_policy);
f3187195
TH
7142 }
7143
7144 return 0;
7145}
7146
f5cda257
TH
7147/**
7148 * ata_host_activate - start host, request IRQ and register it
7149 * @host: target ATA host
7150 * @irq: IRQ to request
7151 * @irq_handler: irq_handler used when requesting IRQ
7152 * @irq_flags: irq_flags used when requesting IRQ
7153 * @sht: scsi_host_template to use when registering the host
7154 *
7155 * After allocating an ATA host and initializing it, most libata
7156 * LLDs perform three steps to activate the host - start host,
7157 * request IRQ and register it. This helper takes necessasry
7158 * arguments and performs the three steps in one go.
7159 *
7160 * LOCKING:
7161 * Inherited from calling layer (may sleep).
7162 *
7163 * RETURNS:
7164 * 0 on success, -errno otherwise.
7165 */
7166int ata_host_activate(struct ata_host *host, int irq,
7167 irq_handler_t irq_handler, unsigned long irq_flags,
7168 struct scsi_host_template *sht)
7169{
cbcdd875 7170 int i, rc;
f5cda257
TH
7171
7172 rc = ata_host_start(host);
7173 if (rc)
7174 return rc;
7175
7176 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
7177 dev_driver_string(host->dev), host);
7178 if (rc)
7179 return rc;
7180
cbcdd875
TH
7181 for (i = 0; i < host->n_ports; i++)
7182 ata_port_desc(host->ports[i], "irq %d", irq);
4031826b 7183
f5cda257
TH
7184 rc = ata_host_register(host, sht);
7185 /* if failed, just free the IRQ and leave ports alone */
7186 if (rc)
7187 devm_free_irq(host->dev, irq, host);
7188
7189 return rc;
7190}
7191
720ba126
TH
7192/**
7193 * ata_port_detach - Detach ATA port in prepration of device removal
7194 * @ap: ATA port to be detached
7195 *
7196 * Detach all ATA devices and the associated SCSI devices of @ap;
7197 * then, remove the associated SCSI host. @ap is guaranteed to
7198 * be quiescent on return from this function.
7199 *
7200 * LOCKING:
7201 * Kernel thread context (may sleep).
7202 */
741b7763 7203static void ata_port_detach(struct ata_port *ap)
720ba126
TH
7204{
7205 unsigned long flags;
41bda9c9 7206 struct ata_link *link;
f58229f8 7207 struct ata_device *dev;
720ba126
TH
7208
7209 if (!ap->ops->error_handler)
c3cf30a9 7210 goto skip_eh;
720ba126
TH
7211
7212 /* tell EH we're leaving & flush EH */
ba6a1308 7213 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 7214 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 7215 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
7216
7217 ata_port_wait_eh(ap);
7218
7219 /* EH is now guaranteed to see UNLOADING, so no new device
7220 * will be attached. Disable all existing devices.
7221 */
ba6a1308 7222 spin_lock_irqsave(ap->lock, flags);
720ba126 7223
41bda9c9
TH
7224 ata_port_for_each_link(link, ap) {
7225 ata_link_for_each_dev(dev, link)
7226 ata_dev_disable(dev);
7227 }
720ba126 7228
ba6a1308 7229 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
7230
7231 /* Final freeze & EH. All in-flight commands are aborted. EH
7232 * will be skipped and retrials will be terminated with bad
7233 * target.
7234 */
ba6a1308 7235 spin_lock_irqsave(ap->lock, flags);
720ba126 7236 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 7237 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
7238
7239 ata_port_wait_eh(ap);
45a66c1c 7240 cancel_rearming_delayed_work(&ap->hotplug_task);
720ba126 7241
c3cf30a9 7242 skip_eh:
720ba126 7243 /* remove the associated SCSI host */
cca3974e 7244 scsi_remove_host(ap->scsi_host);
720ba126
TH
7245}
7246
0529c159
TH
7247/**
7248 * ata_host_detach - Detach all ports of an ATA host
7249 * @host: Host to detach
7250 *
7251 * Detach all ports of @host.
7252 *
7253 * LOCKING:
7254 * Kernel thread context (may sleep).
7255 */
7256void ata_host_detach(struct ata_host *host)
7257{
7258 int i;
7259
7260 for (i = 0; i < host->n_ports; i++)
7261 ata_port_detach(host->ports[i]);
7262}
7263
1da177e4
LT
7264/**
7265 * ata_std_ports - initialize ioaddr with standard port offsets.
7266 * @ioaddr: IO address structure to be initialized
0baab86b
EF
7267 *
7268 * Utility function which initializes data_addr, error_addr,
7269 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
7270 * device_addr, status_addr, and command_addr to standard offsets
7271 * relative to cmd_addr.
7272 *
7273 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 7274 */
0baab86b 7275
1da177e4
LT
7276void ata_std_ports(struct ata_ioports *ioaddr)
7277{
7278 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
7279 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
7280 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
7281 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
7282 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
7283 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
7284 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
7285 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
7286 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
7287 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
7288}
7289
0baab86b 7290
374b1873
JG
7291#ifdef CONFIG_PCI
7292
1da177e4
LT
7293/**
7294 * ata_pci_remove_one - PCI layer callback for device removal
7295 * @pdev: PCI device that was removed
7296 *
b878ca5d
TH
7297 * PCI layer indicates to libata via this hook that hot-unplug or
7298 * module unload event has occurred. Detach all ports. Resource
7299 * release is handled via devres.
1da177e4
LT
7300 *
7301 * LOCKING:
7302 * Inherited from PCI layer (may sleep).
7303 */
f0d36efd 7304void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4 7305{
2855568b 7306 struct device *dev = &pdev->dev;
cca3974e 7307 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 7308
b878ca5d 7309 ata_host_detach(host);
1da177e4
LT
7310}
7311
7312/* move to PCI subsystem */
057ace5e 7313int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
7314{
7315 unsigned long tmp = 0;
7316
7317 switch (bits->width) {
7318 case 1: {
7319 u8 tmp8 = 0;
7320 pci_read_config_byte(pdev, bits->reg, &tmp8);
7321 tmp = tmp8;
7322 break;
7323 }
7324 case 2: {
7325 u16 tmp16 = 0;
7326 pci_read_config_word(pdev, bits->reg, &tmp16);
7327 tmp = tmp16;
7328 break;
7329 }
7330 case 4: {
7331 u32 tmp32 = 0;
7332 pci_read_config_dword(pdev, bits->reg, &tmp32);
7333 tmp = tmp32;
7334 break;
7335 }
7336
7337 default:
7338 return -EINVAL;
7339 }
7340
7341 tmp &= bits->mask;
7342
7343 return (tmp == bits->val) ? 1 : 0;
7344}
9b847548 7345
6ffa01d8 7346#ifdef CONFIG_PM
3c5100c1 7347void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
7348{
7349 pci_save_state(pdev);
4c90d971 7350 pci_disable_device(pdev);
500530f6 7351
4c90d971 7352 if (mesg.event == PM_EVENT_SUSPEND)
500530f6 7353 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
7354}
7355
553c4aa6 7356int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 7357{
553c4aa6
TH
7358 int rc;
7359
9b847548
JA
7360 pci_set_power_state(pdev, PCI_D0);
7361 pci_restore_state(pdev);
553c4aa6 7362
b878ca5d 7363 rc = pcim_enable_device(pdev);
553c4aa6
TH
7364 if (rc) {
7365 dev_printk(KERN_ERR, &pdev->dev,
7366 "failed to enable device after resume (%d)\n", rc);
7367 return rc;
7368 }
7369
9b847548 7370 pci_set_master(pdev);
553c4aa6 7371 return 0;
500530f6
TH
7372}
7373
3c5100c1 7374int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 7375{
cca3974e 7376 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
7377 int rc = 0;
7378
cca3974e 7379 rc = ata_host_suspend(host, mesg);
500530f6
TH
7380 if (rc)
7381 return rc;
7382
3c5100c1 7383 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
7384
7385 return 0;
7386}
7387
7388int ata_pci_device_resume(struct pci_dev *pdev)
7389{
cca3974e 7390 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 7391 int rc;
500530f6 7392
553c4aa6
TH
7393 rc = ata_pci_device_do_resume(pdev);
7394 if (rc == 0)
7395 ata_host_resume(host);
7396 return rc;
9b847548 7397}
6ffa01d8
TH
7398#endif /* CONFIG_PM */
7399
1da177e4
LT
7400#endif /* CONFIG_PCI */
7401
7402
1da177e4
LT
7403static int __init ata_init(void)
7404{
a8601e5f 7405 ata_probe_timeout *= HZ;
1da177e4
LT
7406 ata_wq = create_workqueue("ata");
7407 if (!ata_wq)
7408 return -ENOMEM;
7409
453b07ac
TH
7410 ata_aux_wq = create_singlethread_workqueue("ata_aux");
7411 if (!ata_aux_wq) {
7412 destroy_workqueue(ata_wq);
7413 return -ENOMEM;
7414 }
7415
1da177e4
LT
7416 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
7417 return 0;
7418}
7419
7420static void __exit ata_exit(void)
7421{
7422 destroy_workqueue(ata_wq);
453b07ac 7423 destroy_workqueue(ata_aux_wq);
1da177e4
LT
7424}
7425
a4625085 7426subsys_initcall(ata_init);
1da177e4
LT
7427module_exit(ata_exit);
7428
67846b30 7429static unsigned long ratelimit_time;
34af946a 7430static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
7431
7432int ata_ratelimit(void)
7433{
7434 int rc;
7435 unsigned long flags;
7436
7437 spin_lock_irqsave(&ata_ratelimit_lock, flags);
7438
7439 if (time_after(jiffies, ratelimit_time)) {
7440 rc = 1;
7441 ratelimit_time = jiffies + (HZ/5);
7442 } else
7443 rc = 0;
7444
7445 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
7446
7447 return rc;
7448}
7449
c22daff4
TH
7450/**
7451 * ata_wait_register - wait until register value changes
7452 * @reg: IO-mapped register
7453 * @mask: Mask to apply to read register value
7454 * @val: Wait condition
7455 * @interval_msec: polling interval in milliseconds
7456 * @timeout_msec: timeout in milliseconds
7457 *
7458 * Waiting for some bits of register to change is a common
7459 * operation for ATA controllers. This function reads 32bit LE
7460 * IO-mapped register @reg and tests for the following condition.
7461 *
7462 * (*@reg & mask) != val
7463 *
7464 * If the condition is met, it returns; otherwise, the process is
7465 * repeated after @interval_msec until timeout.
7466 *
7467 * LOCKING:
7468 * Kernel thread context (may sleep)
7469 *
7470 * RETURNS:
7471 * The final register value.
7472 */
7473u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
7474 unsigned long interval_msec,
7475 unsigned long timeout_msec)
7476{
7477 unsigned long timeout;
7478 u32 tmp;
7479
7480 tmp = ioread32(reg);
7481
7482 /* Calculate timeout _after_ the first read to make sure
7483 * preceding writes reach the controller before starting to
7484 * eat away the timeout.
7485 */
7486 timeout = jiffies + (timeout_msec * HZ) / 1000;
7487
7488 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
7489 msleep(interval_msec);
7490 tmp = ioread32(reg);
7491 }
7492
7493 return tmp;
7494}
7495
dd5b06c4
TH
7496/*
7497 * Dummy port_ops
7498 */
7499static void ata_dummy_noret(struct ata_port *ap) { }
7500static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
7501static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
7502
7503static u8 ata_dummy_check_status(struct ata_port *ap)
7504{
7505 return ATA_DRDY;
7506}
7507
7508static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7509{
7510 return AC_ERR_SYSTEM;
7511}
7512
7513const struct ata_port_operations ata_dummy_port_ops = {
dd5b06c4
TH
7514 .check_status = ata_dummy_check_status,
7515 .check_altstatus = ata_dummy_check_status,
7516 .dev_select = ata_noop_dev_select,
7517 .qc_prep = ata_noop_qc_prep,
7518 .qc_issue = ata_dummy_qc_issue,
7519 .freeze = ata_dummy_noret,
7520 .thaw = ata_dummy_noret,
7521 .error_handler = ata_dummy_noret,
7522 .post_internal_cmd = ata_dummy_qc_noret,
7523 .irq_clear = ata_dummy_noret,
7524 .port_start = ata_dummy_ret0,
7525 .port_stop = ata_dummy_noret,
7526};
7527
21b0ad4f
TH
7528const struct ata_port_info ata_dummy_port_info = {
7529 .port_ops = &ata_dummy_port_ops,
7530};
7531
1da177e4
LT
7532/*
7533 * libata is essentially a library of internal helper functions for
7534 * low-level ATA host controller drivers. As such, the API/ABI is
7535 * likely to change as new drivers are added and updated.
7536 * Do not depend on ABI/API stability.
7537 */
e9c83914
TH
7538EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7539EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7540EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 7541EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
21b0ad4f 7542EXPORT_SYMBOL_GPL(ata_dummy_port_info);
1da177e4
LT
7543EXPORT_SYMBOL_GPL(ata_std_bios_param);
7544EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 7545EXPORT_SYMBOL_GPL(ata_host_init);
f3187195 7546EXPORT_SYMBOL_GPL(ata_host_alloc);
f5cda257 7547EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
ecef7253 7548EXPORT_SYMBOL_GPL(ata_host_start);
f3187195 7549EXPORT_SYMBOL_GPL(ata_host_register);
f5cda257 7550EXPORT_SYMBOL_GPL(ata_host_activate);
0529c159 7551EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4
LT
7552EXPORT_SYMBOL_GPL(ata_sg_init);
7553EXPORT_SYMBOL_GPL(ata_sg_init_one);
9a1004d0 7554EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 7555EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 7556EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 7557EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
7558EXPORT_SYMBOL_GPL(ata_tf_load);
7559EXPORT_SYMBOL_GPL(ata_tf_read);
7560EXPORT_SYMBOL_GPL(ata_noop_dev_select);
7561EXPORT_SYMBOL_GPL(ata_std_dev_select);
43727fbc 7562EXPORT_SYMBOL_GPL(sata_print_link_status);
1da177e4
LT
7563EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7564EXPORT_SYMBOL_GPL(ata_tf_from_fis);
7565EXPORT_SYMBOL_GPL(ata_check_status);
7566EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
7567EXPORT_SYMBOL_GPL(ata_exec_command);
7568EXPORT_SYMBOL_GPL(ata_port_start);
d92e74d3 7569EXPORT_SYMBOL_GPL(ata_sff_port_start);
1da177e4 7570EXPORT_SYMBOL_GPL(ata_interrupt);
04351821 7571EXPORT_SYMBOL_GPL(ata_do_set_mode);
0d5ff566
TH
7572EXPORT_SYMBOL_GPL(ata_data_xfer);
7573EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
31cc23b3 7574EXPORT_SYMBOL_GPL(ata_std_qc_defer);
1da177e4 7575EXPORT_SYMBOL_GPL(ata_qc_prep);
d26fc955 7576EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
e46834cd 7577EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
7578EXPORT_SYMBOL_GPL(ata_bmdma_setup);
7579EXPORT_SYMBOL_GPL(ata_bmdma_start);
7580EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
7581EXPORT_SYMBOL_GPL(ata_bmdma_status);
7582EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
7583EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
7584EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
7585EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
7586EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
7587EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 7588EXPORT_SYMBOL_GPL(ata_port_probe);
10305f0f 7589EXPORT_SYMBOL_GPL(ata_dev_disable);
3c567b7d 7590EXPORT_SYMBOL_GPL(sata_set_spd);
936fd732
TH
7591EXPORT_SYMBOL_GPL(sata_link_debounce);
7592EXPORT_SYMBOL_GPL(sata_link_resume);
1da177e4
LT
7593EXPORT_SYMBOL_GPL(sata_phy_reset);
7594EXPORT_SYMBOL_GPL(__sata_phy_reset);
7595EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 7596EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 7597EXPORT_SYMBOL_GPL(ata_std_softreset);
cc0680a5 7598EXPORT_SYMBOL_GPL(sata_link_hardreset);
c2bd5804
TH
7599EXPORT_SYMBOL_GPL(sata_std_hardreset);
7600EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
7601EXPORT_SYMBOL_GPL(ata_dev_classify);
7602EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 7603EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 7604EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 7605EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 7606EXPORT_SYMBOL_GPL(ata_busy_sleep);
88ff6eaf 7607EXPORT_SYMBOL_GPL(ata_wait_after_reset);
d4b2bab4 7608EXPORT_SYMBOL_GPL(ata_wait_ready);
86e45b6b 7609EXPORT_SYMBOL_GPL(ata_port_queue_task);
1da177e4
LT
7610EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
7611EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 7612EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 7613EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 7614EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4 7615EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
7616EXPORT_SYMBOL_GPL(sata_scr_valid);
7617EXPORT_SYMBOL_GPL(sata_scr_read);
7618EXPORT_SYMBOL_GPL(sata_scr_write);
7619EXPORT_SYMBOL_GPL(sata_scr_write_flush);
936fd732
TH
7620EXPORT_SYMBOL_GPL(ata_link_online);
7621EXPORT_SYMBOL_GPL(ata_link_offline);
6ffa01d8 7622#ifdef CONFIG_PM
cca3974e
JG
7623EXPORT_SYMBOL_GPL(ata_host_suspend);
7624EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 7625#endif /* CONFIG_PM */
6a62a04d
TH
7626EXPORT_SYMBOL_GPL(ata_id_string);
7627EXPORT_SYMBOL_GPL(ata_id_c_string);
10305f0f 7628EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
1da177e4
LT
7629EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7630
1bc4ccff 7631EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
452503f9
AC
7632EXPORT_SYMBOL_GPL(ata_timing_compute);
7633EXPORT_SYMBOL_GPL(ata_timing_merge);
7634
1da177e4
LT
7635#ifdef CONFIG_PCI
7636EXPORT_SYMBOL_GPL(pci_test_config_bits);
d583bc18 7637EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
1626aeb8 7638EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
d583bc18 7639EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
1da177e4
LT
7640EXPORT_SYMBOL_GPL(ata_pci_init_one);
7641EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6ffa01d8 7642#ifdef CONFIG_PM
500530f6
TH
7643EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7644EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
7645EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7646EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 7647#endif /* CONFIG_PM */
67951ade
AC
7648EXPORT_SYMBOL_GPL(ata_pci_default_filter);
7649EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 7650#endif /* CONFIG_PCI */
9b847548 7651
31f88384 7652EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch);
3af9a77a
TH
7653EXPORT_SYMBOL_GPL(sata_pmp_std_prereset);
7654EXPORT_SYMBOL_GPL(sata_pmp_std_hardreset);
7655EXPORT_SYMBOL_GPL(sata_pmp_std_postreset);
7656EXPORT_SYMBOL_GPL(sata_pmp_do_eh);
7657
b64bbc39
TH
7658EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7659EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7660EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
cbcdd875
TH
7661EXPORT_SYMBOL_GPL(ata_port_desc);
7662#ifdef CONFIG_PCI
7663EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7664#endif /* CONFIG_PCI */
ece1d636 7665EXPORT_SYMBOL_GPL(ata_eng_timeout);
7b70fc03 7666EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
dbd82616 7667EXPORT_SYMBOL_GPL(ata_link_abort);
7b70fc03 7668EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499 7669EXPORT_SYMBOL_GPL(ata_port_freeze);
7d77b247 7670EXPORT_SYMBOL_GPL(sata_async_notification);
e3180499
TH
7671EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7672EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
7673EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7674EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 7675EXPORT_SYMBOL_GPL(ata_do_eh);
83625006 7676EXPORT_SYMBOL_GPL(ata_irq_on);
a619f981 7677EXPORT_SYMBOL_GPL(ata_dev_try_classify);
be0d18df
AC
7678
7679EXPORT_SYMBOL_GPL(ata_cable_40wire);
7680EXPORT_SYMBOL_GPL(ata_cable_80wire);
7681EXPORT_SYMBOL_GPL(ata_cable_unknown);
7682EXPORT_SYMBOL_GPL(ata_cable_sata);