]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/ata/libata-core.c
[libata] ata_piix: Use more-robust form of array initialization
[net-next-2.6.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
1da177e4
LT
33 */
34
1da177e4
LT
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/mm.h>
41#include <linux/highmem.h>
42#include <linux/spinlock.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/timer.h>
46#include <linux/interrupt.h>
47#include <linux/completion.h>
48#include <linux/suspend.h>
49#include <linux/workqueue.h>
67846b30 50#include <linux/jiffies.h>
378f058c 51#include <linux/scatterlist.h>
1da177e4 52#include <scsi/scsi.h>
193515d5 53#include <scsi/scsi_cmnd.h>
1da177e4
LT
54#include <scsi/scsi_host.h>
55#include <linux/libata.h>
56#include <asm/io.h>
57#include <asm/semaphore.h>
58#include <asm/byteorder.h>
59
60#include "libata.h"
61
8bc3fc47 62#define DRV_VERSION "2.21" /* must be exactly four chars */
fda0efc5
JG
63
64
d7bb4cc7 65/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
66const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
67const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
68const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 69
3373efd8
TH
70static unsigned int ata_dev_init_params(struct ata_device *dev,
71 u16 heads, u16 sectors);
72static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
9f45cbd3 73static unsigned int ata_dev_set_AN(struct ata_device *dev, u8 enable);
3373efd8 74static void ata_dev_xfermask(struct ata_device *dev);
75683fe7 75static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
1da177e4 76
f3187195 77unsigned int ata_print_id = 1;
1da177e4
LT
78static struct workqueue_struct *ata_wq;
79
453b07ac
TH
80struct workqueue_struct *ata_aux_wq;
81
418dc1f5 82int atapi_enabled = 1;
1623c81e
JG
83module_param(atapi_enabled, int, 0444);
84MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
85
95de719a
AL
86int atapi_dmadir = 0;
87module_param(atapi_dmadir, int, 0444);
88MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
89
baf4fdfa
ML
90int atapi_passthru16 = 1;
91module_param(atapi_passthru16, int, 0444);
92MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
93
c3c013a2
JG
94int libata_fua = 0;
95module_param_named(fua, libata_fua, int, 0444);
96MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
97
1e999736
AC
98static int ata_ignore_hpa = 0;
99module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
100MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
101
a8601e5f
AM
102static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
103module_param(ata_probe_timeout, int, 0444);
104MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
105
d7d0dad6
JG
106int libata_noacpi = 1;
107module_param_named(noacpi, libata_noacpi, int, 0444);
11ef697b
KCA
108MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
109
1da177e4
LT
110MODULE_AUTHOR("Jeff Garzik");
111MODULE_DESCRIPTION("Library module for ATA devices");
112MODULE_LICENSE("GPL");
113MODULE_VERSION(DRV_VERSION);
114
0baab86b 115
1da177e4
LT
116/**
117 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
118 * @tf: Taskfile to convert
1da177e4 119 * @pmp: Port multiplier port
9977126c
TH
120 * @is_cmd: This FIS is for command
121 * @fis: Buffer into which data will output
1da177e4
LT
122 *
123 * Converts a standard ATA taskfile to a Serial ATA
124 * FIS structure (Register - Host to Device).
125 *
126 * LOCKING:
127 * Inherited from caller.
128 */
9977126c 129void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
1da177e4 130{
9977126c
TH
131 fis[0] = 0x27; /* Register - Host to Device FIS */
132 fis[1] = pmp & 0xf; /* Port multiplier number*/
133 if (is_cmd)
134 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
135
1da177e4
LT
136 fis[2] = tf->command;
137 fis[3] = tf->feature;
138
139 fis[4] = tf->lbal;
140 fis[5] = tf->lbam;
141 fis[6] = tf->lbah;
142 fis[7] = tf->device;
143
144 fis[8] = tf->hob_lbal;
145 fis[9] = tf->hob_lbam;
146 fis[10] = tf->hob_lbah;
147 fis[11] = tf->hob_feature;
148
149 fis[12] = tf->nsect;
150 fis[13] = tf->hob_nsect;
151 fis[14] = 0;
152 fis[15] = tf->ctl;
153
154 fis[16] = 0;
155 fis[17] = 0;
156 fis[18] = 0;
157 fis[19] = 0;
158}
159
160/**
161 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
162 * @fis: Buffer from which data will be input
163 * @tf: Taskfile to output
164 *
e12a1be6 165 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
166 *
167 * LOCKING:
168 * Inherited from caller.
169 */
170
057ace5e 171void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
172{
173 tf->command = fis[2]; /* status */
174 tf->feature = fis[3]; /* error */
175
176 tf->lbal = fis[4];
177 tf->lbam = fis[5];
178 tf->lbah = fis[6];
179 tf->device = fis[7];
180
181 tf->hob_lbal = fis[8];
182 tf->hob_lbam = fis[9];
183 tf->hob_lbah = fis[10];
184
185 tf->nsect = fis[12];
186 tf->hob_nsect = fis[13];
187}
188
8cbd6df1
AL
189static const u8 ata_rw_cmds[] = {
190 /* pio multi */
191 ATA_CMD_READ_MULTI,
192 ATA_CMD_WRITE_MULTI,
193 ATA_CMD_READ_MULTI_EXT,
194 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
195 0,
196 0,
197 0,
198 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
199 /* pio */
200 ATA_CMD_PIO_READ,
201 ATA_CMD_PIO_WRITE,
202 ATA_CMD_PIO_READ_EXT,
203 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
204 0,
205 0,
206 0,
207 0,
8cbd6df1
AL
208 /* dma */
209 ATA_CMD_READ,
210 ATA_CMD_WRITE,
211 ATA_CMD_READ_EXT,
9a3dccc4
TH
212 ATA_CMD_WRITE_EXT,
213 0,
214 0,
215 0,
216 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 217};
1da177e4
LT
218
219/**
8cbd6df1 220 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
221 * @tf: command to examine and configure
222 * @dev: device tf belongs to
1da177e4 223 *
2e9edbf8 224 * Examine the device configuration and tf->flags to calculate
8cbd6df1 225 * the proper read/write commands and protocol to use.
1da177e4
LT
226 *
227 * LOCKING:
228 * caller.
229 */
bd056d7e 230static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 231{
9a3dccc4 232 u8 cmd;
1da177e4 233
9a3dccc4 234 int index, fua, lba48, write;
2e9edbf8 235
9a3dccc4 236 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
237 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
238 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 239
8cbd6df1
AL
240 if (dev->flags & ATA_DFLAG_PIO) {
241 tf->protocol = ATA_PROT_PIO;
9a3dccc4 242 index = dev->multi_count ? 0 : 8;
9af5c9c9 243 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
244 /* Unable to use DMA due to host limitation */
245 tf->protocol = ATA_PROT_PIO;
0565c26d 246 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
247 } else {
248 tf->protocol = ATA_PROT_DMA;
9a3dccc4 249 index = 16;
8cbd6df1 250 }
1da177e4 251
9a3dccc4
TH
252 cmd = ata_rw_cmds[index + fua + lba48 + write];
253 if (cmd) {
254 tf->command = cmd;
255 return 0;
256 }
257 return -1;
1da177e4
LT
258}
259
35b649fe
TH
260/**
261 * ata_tf_read_block - Read block address from ATA taskfile
262 * @tf: ATA taskfile of interest
263 * @dev: ATA device @tf belongs to
264 *
265 * LOCKING:
266 * None.
267 *
268 * Read block address from @tf. This function can handle all
269 * three address formats - LBA, LBA48 and CHS. tf->protocol and
270 * flags select the address format to use.
271 *
272 * RETURNS:
273 * Block address read from @tf.
274 */
275u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
276{
277 u64 block = 0;
278
279 if (tf->flags & ATA_TFLAG_LBA) {
280 if (tf->flags & ATA_TFLAG_LBA48) {
281 block |= (u64)tf->hob_lbah << 40;
282 block |= (u64)tf->hob_lbam << 32;
283 block |= tf->hob_lbal << 24;
284 } else
285 block |= (tf->device & 0xf) << 24;
286
287 block |= tf->lbah << 16;
288 block |= tf->lbam << 8;
289 block |= tf->lbal;
290 } else {
291 u32 cyl, head, sect;
292
293 cyl = tf->lbam | (tf->lbah << 8);
294 head = tf->device & 0xf;
295 sect = tf->lbal;
296
297 block = (cyl * dev->heads + head) * dev->sectors + sect;
298 }
299
300 return block;
301}
302
bd056d7e
TH
303/**
304 * ata_build_rw_tf - Build ATA taskfile for given read/write request
305 * @tf: Target ATA taskfile
306 * @dev: ATA device @tf belongs to
307 * @block: Block address
308 * @n_block: Number of blocks
309 * @tf_flags: RW/FUA etc...
310 * @tag: tag
311 *
312 * LOCKING:
313 * None.
314 *
315 * Build ATA taskfile @tf for read/write request described by
316 * @block, @n_block, @tf_flags and @tag on @dev.
317 *
318 * RETURNS:
319 *
320 * 0 on success, -ERANGE if the request is too large for @dev,
321 * -EINVAL if the request is invalid.
322 */
323int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
324 u64 block, u32 n_block, unsigned int tf_flags,
325 unsigned int tag)
326{
327 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
328 tf->flags |= tf_flags;
329
6d1245bf 330 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
331 /* yay, NCQ */
332 if (!lba_48_ok(block, n_block))
333 return -ERANGE;
334
335 tf->protocol = ATA_PROT_NCQ;
336 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
337
338 if (tf->flags & ATA_TFLAG_WRITE)
339 tf->command = ATA_CMD_FPDMA_WRITE;
340 else
341 tf->command = ATA_CMD_FPDMA_READ;
342
343 tf->nsect = tag << 3;
344 tf->hob_feature = (n_block >> 8) & 0xff;
345 tf->feature = n_block & 0xff;
346
347 tf->hob_lbah = (block >> 40) & 0xff;
348 tf->hob_lbam = (block >> 32) & 0xff;
349 tf->hob_lbal = (block >> 24) & 0xff;
350 tf->lbah = (block >> 16) & 0xff;
351 tf->lbam = (block >> 8) & 0xff;
352 tf->lbal = block & 0xff;
353
354 tf->device = 1 << 6;
355 if (tf->flags & ATA_TFLAG_FUA)
356 tf->device |= 1 << 7;
357 } else if (dev->flags & ATA_DFLAG_LBA) {
358 tf->flags |= ATA_TFLAG_LBA;
359
360 if (lba_28_ok(block, n_block)) {
361 /* use LBA28 */
362 tf->device |= (block >> 24) & 0xf;
363 } else if (lba_48_ok(block, n_block)) {
364 if (!(dev->flags & ATA_DFLAG_LBA48))
365 return -ERANGE;
366
367 /* use LBA48 */
368 tf->flags |= ATA_TFLAG_LBA48;
369
370 tf->hob_nsect = (n_block >> 8) & 0xff;
371
372 tf->hob_lbah = (block >> 40) & 0xff;
373 tf->hob_lbam = (block >> 32) & 0xff;
374 tf->hob_lbal = (block >> 24) & 0xff;
375 } else
376 /* request too large even for LBA48 */
377 return -ERANGE;
378
379 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
380 return -EINVAL;
381
382 tf->nsect = n_block & 0xff;
383
384 tf->lbah = (block >> 16) & 0xff;
385 tf->lbam = (block >> 8) & 0xff;
386 tf->lbal = block & 0xff;
387
388 tf->device |= ATA_LBA;
389 } else {
390 /* CHS */
391 u32 sect, head, cyl, track;
392
393 /* The request -may- be too large for CHS addressing. */
394 if (!lba_28_ok(block, n_block))
395 return -ERANGE;
396
397 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
398 return -EINVAL;
399
400 /* Convert LBA to CHS */
401 track = (u32)block / dev->sectors;
402 cyl = track / dev->heads;
403 head = track % dev->heads;
404 sect = (u32)block % dev->sectors + 1;
405
406 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
407 (u32)block, track, cyl, head, sect);
408
409 /* Check whether the converted CHS can fit.
410 Cylinder: 0-65535
411 Head: 0-15
412 Sector: 1-255*/
413 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
414 return -ERANGE;
415
416 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
417 tf->lbal = sect;
418 tf->lbam = cyl;
419 tf->lbah = cyl >> 8;
420 tf->device |= head;
421 }
422
423 return 0;
424}
425
cb95d562
TH
426/**
427 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
428 * @pio_mask: pio_mask
429 * @mwdma_mask: mwdma_mask
430 * @udma_mask: udma_mask
431 *
432 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
433 * unsigned int xfer_mask.
434 *
435 * LOCKING:
436 * None.
437 *
438 * RETURNS:
439 * Packed xfer_mask.
440 */
441static unsigned int ata_pack_xfermask(unsigned int pio_mask,
442 unsigned int mwdma_mask,
443 unsigned int udma_mask)
444{
445 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
446 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
447 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
448}
449
c0489e4e
TH
450/**
451 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
452 * @xfer_mask: xfer_mask to unpack
453 * @pio_mask: resulting pio_mask
454 * @mwdma_mask: resulting mwdma_mask
455 * @udma_mask: resulting udma_mask
456 *
457 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
458 * Any NULL distination masks will be ignored.
459 */
460static void ata_unpack_xfermask(unsigned int xfer_mask,
461 unsigned int *pio_mask,
462 unsigned int *mwdma_mask,
463 unsigned int *udma_mask)
464{
465 if (pio_mask)
466 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
467 if (mwdma_mask)
468 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
469 if (udma_mask)
470 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
471}
472
cb95d562 473static const struct ata_xfer_ent {
be9a50c8 474 int shift, bits;
cb95d562
TH
475 u8 base;
476} ata_xfer_tbl[] = {
477 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
478 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
479 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
480 { -1, },
481};
482
483/**
484 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
485 * @xfer_mask: xfer_mask of interest
486 *
487 * Return matching XFER_* value for @xfer_mask. Only the highest
488 * bit of @xfer_mask is considered.
489 *
490 * LOCKING:
491 * None.
492 *
493 * RETURNS:
494 * Matching XFER_* value, 0 if no match found.
495 */
496static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
497{
498 int highbit = fls(xfer_mask) - 1;
499 const struct ata_xfer_ent *ent;
500
501 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
502 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
503 return ent->base + highbit - ent->shift;
504 return 0;
505}
506
507/**
508 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
509 * @xfer_mode: XFER_* of interest
510 *
511 * Return matching xfer_mask for @xfer_mode.
512 *
513 * LOCKING:
514 * None.
515 *
516 * RETURNS:
517 * Matching xfer_mask, 0 if no match found.
518 */
519static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
520{
521 const struct ata_xfer_ent *ent;
522
523 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
524 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
525 return 1 << (ent->shift + xfer_mode - ent->base);
526 return 0;
527}
528
529/**
530 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
531 * @xfer_mode: XFER_* of interest
532 *
533 * Return matching xfer_shift for @xfer_mode.
534 *
535 * LOCKING:
536 * None.
537 *
538 * RETURNS:
539 * Matching xfer_shift, -1 if no match found.
540 */
541static int ata_xfer_mode2shift(unsigned int xfer_mode)
542{
543 const struct ata_xfer_ent *ent;
544
545 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
546 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
547 return ent->shift;
548 return -1;
549}
550
1da177e4 551/**
1da7b0d0
TH
552 * ata_mode_string - convert xfer_mask to string
553 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
554 *
555 * Determine string which represents the highest speed
1da7b0d0 556 * (highest bit in @modemask).
1da177e4
LT
557 *
558 * LOCKING:
559 * None.
560 *
561 * RETURNS:
562 * Constant C string representing highest speed listed in
1da7b0d0 563 * @mode_mask, or the constant C string "<n/a>".
1da177e4 564 */
1da7b0d0 565static const char *ata_mode_string(unsigned int xfer_mask)
1da177e4 566{
75f554bc
TH
567 static const char * const xfer_mode_str[] = {
568 "PIO0",
569 "PIO1",
570 "PIO2",
571 "PIO3",
572 "PIO4",
b352e57d
AC
573 "PIO5",
574 "PIO6",
75f554bc
TH
575 "MWDMA0",
576 "MWDMA1",
577 "MWDMA2",
b352e57d
AC
578 "MWDMA3",
579 "MWDMA4",
75f554bc
TH
580 "UDMA/16",
581 "UDMA/25",
582 "UDMA/33",
583 "UDMA/44",
584 "UDMA/66",
585 "UDMA/100",
586 "UDMA/133",
587 "UDMA7",
588 };
1da7b0d0 589 int highbit;
1da177e4 590
1da7b0d0
TH
591 highbit = fls(xfer_mask) - 1;
592 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
593 return xfer_mode_str[highbit];
1da177e4 594 return "<n/a>";
1da177e4
LT
595}
596
4c360c81
TH
597static const char *sata_spd_string(unsigned int spd)
598{
599 static const char * const spd_str[] = {
600 "1.5 Gbps",
601 "3.0 Gbps",
602 };
603
604 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
605 return "<unknown>";
606 return spd_str[spd - 1];
607}
608
3373efd8 609void ata_dev_disable(struct ata_device *dev)
0b8efb0a 610{
09d7f9b0 611 if (ata_dev_enabled(dev)) {
9af5c9c9 612 if (ata_msg_drv(dev->link->ap))
09d7f9b0 613 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
4ae72a1e
TH
614 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
615 ATA_DNXFER_QUIET);
0b8efb0a
TH
616 dev->class++;
617 }
618}
619
1da177e4 620/**
0d5ff566 621 * ata_devchk - PATA device presence detection
1da177e4
LT
622 * @ap: ATA channel to examine
623 * @device: Device to examine (starting at zero)
624 *
625 * This technique was originally described in
626 * Hale Landis's ATADRVR (www.ata-atapi.com), and
627 * later found its way into the ATA/ATAPI spec.
628 *
629 * Write a pattern to the ATA shadow registers,
630 * and if a device is present, it will respond by
631 * correctly storing and echoing back the
632 * ATA shadow register contents.
633 *
634 * LOCKING:
635 * caller.
636 */
637
0d5ff566 638static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1da177e4
LT
639{
640 struct ata_ioports *ioaddr = &ap->ioaddr;
641 u8 nsect, lbal;
642
643 ap->ops->dev_select(ap, device);
644
0d5ff566
TH
645 iowrite8(0x55, ioaddr->nsect_addr);
646 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 647
0d5ff566
TH
648 iowrite8(0xaa, ioaddr->nsect_addr);
649 iowrite8(0x55, ioaddr->lbal_addr);
1da177e4 650
0d5ff566
TH
651 iowrite8(0x55, ioaddr->nsect_addr);
652 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 653
0d5ff566
TH
654 nsect = ioread8(ioaddr->nsect_addr);
655 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
656
657 if ((nsect == 0x55) && (lbal == 0xaa))
658 return 1; /* we found a device */
659
660 return 0; /* nothing found */
661}
662
1da177e4
LT
663/**
664 * ata_dev_classify - determine device type based on ATA-spec signature
665 * @tf: ATA taskfile register set for device to be identified
666 *
667 * Determine from taskfile register contents whether a device is
668 * ATA or ATAPI, as per "Signature and persistence" section
669 * of ATA/PI spec (volume 1, sect 5.14).
670 *
671 * LOCKING:
672 * None.
673 *
674 * RETURNS:
675 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
676 * the event of failure.
677 */
678
057ace5e 679unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
680{
681 /* Apple's open source Darwin code hints that some devices only
682 * put a proper signature into the LBA mid/high registers,
683 * So, we only check those. It's sufficient for uniqueness.
684 */
685
686 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
687 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
688 DPRINTK("found ATA device by sig\n");
689 return ATA_DEV_ATA;
690 }
691
692 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
693 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
694 DPRINTK("found ATAPI device by sig\n");
695 return ATA_DEV_ATAPI;
696 }
697
698 DPRINTK("unknown device\n");
699 return ATA_DEV_UNKNOWN;
700}
701
702/**
703 * ata_dev_try_classify - Parse returned ATA device signature
704 * @ap: ATA channel to examine
705 * @device: Device to examine (starting at zero)
b4dc7623 706 * @r_err: Value of error register on completion
1da177e4
LT
707 *
708 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
709 * an ATA/ATAPI-defined set of values is placed in the ATA
710 * shadow registers, indicating the results of device detection
711 * and diagnostics.
712 *
713 * Select the ATA device, and read the values from the ATA shadow
714 * registers. Then parse according to the Error register value,
715 * and the spec-defined values examined by ata_dev_classify().
716 *
717 * LOCKING:
718 * caller.
b4dc7623
TH
719 *
720 * RETURNS:
721 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4
LT
722 */
723
a619f981 724unsigned int
b4dc7623 725ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
1da177e4 726{
1da177e4
LT
727 struct ata_taskfile tf;
728 unsigned int class;
729 u8 err;
730
731 ap->ops->dev_select(ap, device);
732
733 memset(&tf, 0, sizeof(tf));
734
1da177e4 735 ap->ops->tf_read(ap, &tf);
0169e284 736 err = tf.feature;
b4dc7623
TH
737 if (r_err)
738 *r_err = err;
1da177e4 739
93590859
AC
740 /* see if device passed diags: if master then continue and warn later */
741 if (err == 0 && device == 0)
742 /* diagnostic fail : do nothing _YET_ */
9af5c9c9 743 ap->link.device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
93590859 744 else if (err == 1)
1da177e4
LT
745 /* do nothing */ ;
746 else if ((device == 0) && (err == 0x81))
747 /* do nothing */ ;
748 else
b4dc7623 749 return ATA_DEV_NONE;
1da177e4 750
b4dc7623 751 /* determine if device is ATA or ATAPI */
1da177e4 752 class = ata_dev_classify(&tf);
b4dc7623 753
1da177e4 754 if (class == ATA_DEV_UNKNOWN)
b4dc7623 755 return ATA_DEV_NONE;
1da177e4 756 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
b4dc7623
TH
757 return ATA_DEV_NONE;
758 return class;
1da177e4
LT
759}
760
761/**
6a62a04d 762 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
763 * @id: IDENTIFY DEVICE results we will examine
764 * @s: string into which data is output
765 * @ofs: offset into identify device page
766 * @len: length of string to return. must be an even number.
767 *
768 * The strings in the IDENTIFY DEVICE page are broken up into
769 * 16-bit chunks. Run through the string, and output each
770 * 8-bit chunk linearly, regardless of platform.
771 *
772 * LOCKING:
773 * caller.
774 */
775
6a62a04d
TH
776void ata_id_string(const u16 *id, unsigned char *s,
777 unsigned int ofs, unsigned int len)
1da177e4
LT
778{
779 unsigned int c;
780
781 while (len > 0) {
782 c = id[ofs] >> 8;
783 *s = c;
784 s++;
785
786 c = id[ofs] & 0xff;
787 *s = c;
788 s++;
789
790 ofs++;
791 len -= 2;
792 }
793}
794
0e949ff3 795/**
6a62a04d 796 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
797 * @id: IDENTIFY DEVICE results we will examine
798 * @s: string into which data is output
799 * @ofs: offset into identify device page
800 * @len: length of string to return. must be an odd number.
801 *
6a62a04d 802 * This function is identical to ata_id_string except that it
0e949ff3
TH
803 * trims trailing spaces and terminates the resulting string with
804 * null. @len must be actual maximum length (even number) + 1.
805 *
806 * LOCKING:
807 * caller.
808 */
6a62a04d
TH
809void ata_id_c_string(const u16 *id, unsigned char *s,
810 unsigned int ofs, unsigned int len)
0e949ff3
TH
811{
812 unsigned char *p;
813
814 WARN_ON(!(len & 1));
815
6a62a04d 816 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
817
818 p = s + strnlen(s, len - 1);
819 while (p > s && p[-1] == ' ')
820 p--;
821 *p = '\0';
822}
0baab86b 823
1e999736
AC
824static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
825{
826 u64 sectors = 0;
827
828 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
829 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
830 sectors |= (tf->hob_lbal & 0xff) << 24;
831 sectors |= (tf->lbah & 0xff) << 16;
832 sectors |= (tf->lbam & 0xff) << 8;
833 sectors |= (tf->lbal & 0xff);
834
835 return ++sectors;
836}
837
838static u64 ata_tf_to_lba(struct ata_taskfile *tf)
839{
840 u64 sectors = 0;
841
842 sectors |= (tf->device & 0x0f) << 24;
843 sectors |= (tf->lbah & 0xff) << 16;
844 sectors |= (tf->lbam & 0xff) << 8;
845 sectors |= (tf->lbal & 0xff);
846
847 return ++sectors;
848}
849
850/**
851 * ata_read_native_max_address_ext - LBA48 native max query
852 * @dev: Device to query
853 *
854 * Perform an LBA48 size query upon the device in question. Return the
855 * actual LBA48 size or zero if the command fails.
856 */
857
858static u64 ata_read_native_max_address_ext(struct ata_device *dev)
859{
860 unsigned int err;
861 struct ata_taskfile tf;
862
863 ata_tf_init(dev, &tf);
864
865 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
866 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | ATA_TFLAG_ISADDR;
867 tf.protocol |= ATA_PROT_NODATA;
868 tf.device |= 0x40;
869
870 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
871 if (err)
872 return 0;
873
874 return ata_tf_to_lba48(&tf);
875}
876
877/**
878 * ata_read_native_max_address - LBA28 native max query
879 * @dev: Device to query
880 *
881 * Performa an LBA28 size query upon the device in question. Return the
882 * actual LBA28 size or zero if the command fails.
883 */
884
885static u64 ata_read_native_max_address(struct ata_device *dev)
886{
887 unsigned int err;
888 struct ata_taskfile tf;
889
890 ata_tf_init(dev, &tf);
891
892 tf.command = ATA_CMD_READ_NATIVE_MAX;
893 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
894 tf.protocol |= ATA_PROT_NODATA;
895 tf.device |= 0x40;
896
897 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
898 if (err)
899 return 0;
900
901 return ata_tf_to_lba(&tf);
902}
903
904/**
905 * ata_set_native_max_address_ext - LBA48 native max set
906 * @dev: Device to query
6b38d1d1 907 * @new_sectors: new max sectors value to set for the device
1e999736
AC
908 *
909 * Perform an LBA48 size set max upon the device in question. Return the
910 * actual LBA48 size or zero if the command fails.
911 */
912
913static u64 ata_set_native_max_address_ext(struct ata_device *dev, u64 new_sectors)
914{
915 unsigned int err;
916 struct ata_taskfile tf;
917
918 new_sectors--;
919
920 ata_tf_init(dev, &tf);
921
922 tf.command = ATA_CMD_SET_MAX_EXT;
923 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | ATA_TFLAG_ISADDR;
924 tf.protocol |= ATA_PROT_NODATA;
925 tf.device |= 0x40;
926
927 tf.lbal = (new_sectors >> 0) & 0xff;
928 tf.lbam = (new_sectors >> 8) & 0xff;
929 tf.lbah = (new_sectors >> 16) & 0xff;
930
931 tf.hob_lbal = (new_sectors >> 24) & 0xff;
932 tf.hob_lbam = (new_sectors >> 32) & 0xff;
933 tf.hob_lbah = (new_sectors >> 40) & 0xff;
934
935 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
936 if (err)
937 return 0;
938
939 return ata_tf_to_lba48(&tf);
940}
941
942/**
943 * ata_set_native_max_address - LBA28 native max set
944 * @dev: Device to query
6b38d1d1 945 * @new_sectors: new max sectors value to set for the device
1e999736
AC
946 *
947 * Perform an LBA28 size set max upon the device in question. Return the
948 * actual LBA28 size or zero if the command fails.
949 */
950
951static u64 ata_set_native_max_address(struct ata_device *dev, u64 new_sectors)
952{
953 unsigned int err;
954 struct ata_taskfile tf;
955
956 new_sectors--;
957
958 ata_tf_init(dev, &tf);
959
960 tf.command = ATA_CMD_SET_MAX;
961 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
962 tf.protocol |= ATA_PROT_NODATA;
963
964 tf.lbal = (new_sectors >> 0) & 0xff;
965 tf.lbam = (new_sectors >> 8) & 0xff;
966 tf.lbah = (new_sectors >> 16) & 0xff;
967 tf.device |= ((new_sectors >> 24) & 0x0f) | 0x40;
968
969 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
970 if (err)
971 return 0;
972
973 return ata_tf_to_lba(&tf);
974}
975
976/**
977 * ata_hpa_resize - Resize a device with an HPA set
978 * @dev: Device to resize
979 *
980 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
981 * it if required to the full size of the media. The caller must check
982 * the drive has the HPA feature set enabled.
983 */
984
985static u64 ata_hpa_resize(struct ata_device *dev)
986{
987 u64 sectors = dev->n_sectors;
988 u64 hpa_sectors;
a617c09f 989
1e999736
AC
990 if (ata_id_has_lba48(dev->id))
991 hpa_sectors = ata_read_native_max_address_ext(dev);
992 else
993 hpa_sectors = ata_read_native_max_address(dev);
994
1e999736
AC
995 if (hpa_sectors > sectors) {
996 ata_dev_printk(dev, KERN_INFO,
997 "Host Protected Area detected:\n"
998 "\tcurrent size: %lld sectors\n"
999 "\tnative size: %lld sectors\n",
bd1d5ec6 1000 (long long)sectors, (long long)hpa_sectors);
1e999736
AC
1001
1002 if (ata_ignore_hpa) {
1003 if (ata_id_has_lba48(dev->id))
1004 hpa_sectors = ata_set_native_max_address_ext(dev, hpa_sectors);
1005 else
bd1d5ec6
AM
1006 hpa_sectors = ata_set_native_max_address(dev,
1007 hpa_sectors);
1e999736
AC
1008
1009 if (hpa_sectors) {
bd1d5ec6
AM
1010 ata_dev_printk(dev, KERN_INFO, "native size "
1011 "increased to %lld sectors\n",
1012 (long long)hpa_sectors);
1e999736
AC
1013 return hpa_sectors;
1014 }
1015 }
37301a55
TH
1016 } else if (hpa_sectors < sectors)
1017 ata_dev_printk(dev, KERN_WARNING, "%s 1: hpa sectors (%lld) "
1018 "is smaller than sectors (%lld)\n", __FUNCTION__,
1019 (long long)hpa_sectors, (long long)sectors);
1020
1e999736
AC
1021 return sectors;
1022}
1023
2940740b
TH
1024static u64 ata_id_n_sectors(const u16 *id)
1025{
1026 if (ata_id_has_lba(id)) {
1027 if (ata_id_has_lba48(id))
1028 return ata_id_u64(id, 100);
1029 else
1030 return ata_id_u32(id, 60);
1031 } else {
1032 if (ata_id_current_chs_valid(id))
1033 return ata_id_u32(id, 57);
1034 else
1035 return id[1] * id[3] * id[6];
1036 }
1037}
1038
10305f0f
AC
1039/**
1040 * ata_id_to_dma_mode - Identify DMA mode from id block
1041 * @dev: device to identify
cc261267 1042 * @unknown: mode to assume if we cannot tell
10305f0f
AC
1043 *
1044 * Set up the timing values for the device based upon the identify
1045 * reported values for the DMA mode. This function is used by drivers
1046 * which rely upon firmware configured modes, but wish to report the
1047 * mode correctly when possible.
1048 *
1049 * In addition we emit similarly formatted messages to the default
1050 * ata_dev_set_mode handler, in order to provide consistency of
1051 * presentation.
1052 */
1053
1054void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
1055{
1056 unsigned int mask;
1057 u8 mode;
1058
1059 /* Pack the DMA modes */
1060 mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
1061 if (dev->id[53] & 0x04)
1062 mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
1063
1064 /* Select the mode in use */
1065 mode = ata_xfer_mask2mode(mask);
1066
1067 if (mode != 0) {
1068 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
1069 ata_mode_string(mask));
1070 } else {
1071 /* SWDMA perhaps ? */
1072 mode = unknown;
1073 ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
1074 }
1075
1076 /* Configure the device reporting */
1077 dev->xfer_mode = mode;
1078 dev->xfer_shift = ata_xfer_mode2shift(mode);
1079}
1080
0baab86b
EF
1081/**
1082 * ata_noop_dev_select - Select device 0/1 on ATA bus
1083 * @ap: ATA channel to manipulate
1084 * @device: ATA device (numbered from zero) to select
1085 *
1086 * This function performs no actual function.
1087 *
1088 * May be used as the dev_select() entry in ata_port_operations.
1089 *
1090 * LOCKING:
1091 * caller.
1092 */
1da177e4
LT
1093void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
1094{
1095}
1096
0baab86b 1097
1da177e4
LT
1098/**
1099 * ata_std_dev_select - Select device 0/1 on ATA bus
1100 * @ap: ATA channel to manipulate
1101 * @device: ATA device (numbered from zero) to select
1102 *
1103 * Use the method defined in the ATA specification to
1104 * make either device 0, or device 1, active on the
0baab86b
EF
1105 * ATA channel. Works with both PIO and MMIO.
1106 *
1107 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
1108 *
1109 * LOCKING:
1110 * caller.
1111 */
1112
1113void ata_std_dev_select (struct ata_port *ap, unsigned int device)
1114{
1115 u8 tmp;
1116
1117 if (device == 0)
1118 tmp = ATA_DEVICE_OBS;
1119 else
1120 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1121
0d5ff566 1122 iowrite8(tmp, ap->ioaddr.device_addr);
1da177e4
LT
1123 ata_pause(ap); /* needed; also flushes, for mmio */
1124}
1125
1126/**
1127 * ata_dev_select - Select device 0/1 on ATA bus
1128 * @ap: ATA channel to manipulate
1129 * @device: ATA device (numbered from zero) to select
1130 * @wait: non-zero to wait for Status register BSY bit to clear
1131 * @can_sleep: non-zero if context allows sleeping
1132 *
1133 * Use the method defined in the ATA specification to
1134 * make either device 0, or device 1, active on the
1135 * ATA channel.
1136 *
1137 * This is a high-level version of ata_std_dev_select(),
1138 * which additionally provides the services of inserting
1139 * the proper pauses and status polling, where needed.
1140 *
1141 * LOCKING:
1142 * caller.
1143 */
1144
1145void ata_dev_select(struct ata_port *ap, unsigned int device,
1146 unsigned int wait, unsigned int can_sleep)
1147{
88574551 1148 if (ata_msg_probe(ap))
44877b4e
TH
1149 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
1150 "device %u, wait %u\n", device, wait);
1da177e4
LT
1151
1152 if (wait)
1153 ata_wait_idle(ap);
1154
1155 ap->ops->dev_select(ap, device);
1156
1157 if (wait) {
9af5c9c9 1158 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
1da177e4
LT
1159 msleep(150);
1160 ata_wait_idle(ap);
1161 }
1162}
1163
1164/**
1165 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 1166 * @id: IDENTIFY DEVICE page to dump
1da177e4 1167 *
0bd3300a
TH
1168 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1169 * page.
1da177e4
LT
1170 *
1171 * LOCKING:
1172 * caller.
1173 */
1174
0bd3300a 1175static inline void ata_dump_id(const u16 *id)
1da177e4
LT
1176{
1177 DPRINTK("49==0x%04x "
1178 "53==0x%04x "
1179 "63==0x%04x "
1180 "64==0x%04x "
1181 "75==0x%04x \n",
0bd3300a
TH
1182 id[49],
1183 id[53],
1184 id[63],
1185 id[64],
1186 id[75]);
1da177e4
LT
1187 DPRINTK("80==0x%04x "
1188 "81==0x%04x "
1189 "82==0x%04x "
1190 "83==0x%04x "
1191 "84==0x%04x \n",
0bd3300a
TH
1192 id[80],
1193 id[81],
1194 id[82],
1195 id[83],
1196 id[84]);
1da177e4
LT
1197 DPRINTK("88==0x%04x "
1198 "93==0x%04x\n",
0bd3300a
TH
1199 id[88],
1200 id[93]);
1da177e4
LT
1201}
1202
cb95d562
TH
1203/**
1204 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1205 * @id: IDENTIFY data to compute xfer mask from
1206 *
1207 * Compute the xfermask for this device. This is not as trivial
1208 * as it seems if we must consider early devices correctly.
1209 *
1210 * FIXME: pre IDE drive timing (do we care ?).
1211 *
1212 * LOCKING:
1213 * None.
1214 *
1215 * RETURNS:
1216 * Computed xfermask
1217 */
1218static unsigned int ata_id_xfermask(const u16 *id)
1219{
1220 unsigned int pio_mask, mwdma_mask, udma_mask;
1221
1222 /* Usual case. Word 53 indicates word 64 is valid */
1223 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1224 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1225 pio_mask <<= 3;
1226 pio_mask |= 0x7;
1227 } else {
1228 /* If word 64 isn't valid then Word 51 high byte holds
1229 * the PIO timing number for the maximum. Turn it into
1230 * a mask.
1231 */
7a0f1c8a 1232 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb
AC
1233 if (mode < 5) /* Valid PIO range */
1234 pio_mask = (2 << mode) - 1;
1235 else
1236 pio_mask = 1;
cb95d562
TH
1237
1238 /* But wait.. there's more. Design your standards by
1239 * committee and you too can get a free iordy field to
1240 * process. However its the speeds not the modes that
1241 * are supported... Note drivers using the timing API
1242 * will get this right anyway
1243 */
1244 }
1245
1246 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1247
b352e57d
AC
1248 if (ata_id_is_cfa(id)) {
1249 /*
1250 * Process compact flash extended modes
1251 */
1252 int pio = id[163] & 0x7;
1253 int dma = (id[163] >> 3) & 7;
1254
1255 if (pio)
1256 pio_mask |= (1 << 5);
1257 if (pio > 1)
1258 pio_mask |= (1 << 6);
1259 if (dma)
1260 mwdma_mask |= (1 << 3);
1261 if (dma > 1)
1262 mwdma_mask |= (1 << 4);
1263 }
1264
fb21f0d0
TH
1265 udma_mask = 0;
1266 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1267 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1268
1269 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1270}
1271
86e45b6b
TH
1272/**
1273 * ata_port_queue_task - Queue port_task
1274 * @ap: The ata_port to queue port_task for
e2a7f77a 1275 * @fn: workqueue function to be scheduled
65f27f38 1276 * @data: data for @fn to use
e2a7f77a 1277 * @delay: delay time for workqueue function
86e45b6b
TH
1278 *
1279 * Schedule @fn(@data) for execution after @delay jiffies using
1280 * port_task. There is one port_task per port and it's the
1281 * user(low level driver)'s responsibility to make sure that only
1282 * one task is active at any given time.
1283 *
1284 * libata core layer takes care of synchronization between
1285 * port_task and EH. ata_port_queue_task() may be ignored for EH
1286 * synchronization.
1287 *
1288 * LOCKING:
1289 * Inherited from caller.
1290 */
65f27f38 1291void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
86e45b6b
TH
1292 unsigned long delay)
1293{
65f27f38
DH
1294 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1295 ap->port_task_data = data;
86e45b6b 1296
45a66c1c
ON
1297 /* may fail if ata_port_flush_task() in progress */
1298 queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1299}
1300
1301/**
1302 * ata_port_flush_task - Flush port_task
1303 * @ap: The ata_port to flush port_task for
1304 *
1305 * After this function completes, port_task is guranteed not to
1306 * be running or scheduled.
1307 *
1308 * LOCKING:
1309 * Kernel thread context (may sleep)
1310 */
1311void ata_port_flush_task(struct ata_port *ap)
1312{
86e45b6b
TH
1313 DPRINTK("ENTER\n");
1314
45a66c1c 1315 cancel_rearming_delayed_work(&ap->port_task);
86e45b6b 1316
0dd4b21f
BP
1317 if (ata_msg_ctl(ap))
1318 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
86e45b6b
TH
1319}
1320
7102d230 1321static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1322{
77853bf2 1323 struct completion *waiting = qc->private_data;
a2a7a662 1324
a2a7a662 1325 complete(waiting);
a2a7a662
TH
1326}
1327
1328/**
2432697b 1329 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1330 * @dev: Device to which the command is sent
1331 * @tf: Taskfile registers for the command and the result
d69cf37d 1332 * @cdb: CDB for packet command
a2a7a662 1333 * @dma_dir: Data tranfer direction of the command
2432697b
TH
1334 * @sg: sg list for the data buffer of the command
1335 * @n_elem: Number of sg entries
a2a7a662
TH
1336 *
1337 * Executes libata internal command with timeout. @tf contains
1338 * command on entry and result on return. Timeout and error
1339 * conditions are reported via return value. No recovery action
1340 * is taken after a command times out. It's caller's duty to
1341 * clean up after timeout.
1342 *
1343 * LOCKING:
1344 * None. Should be called with kernel context, might sleep.
551e8889
TH
1345 *
1346 * RETURNS:
1347 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1348 */
2432697b
TH
1349unsigned ata_exec_internal_sg(struct ata_device *dev,
1350 struct ata_taskfile *tf, const u8 *cdb,
1351 int dma_dir, struct scatterlist *sg,
1352 unsigned int n_elem)
a2a7a662 1353{
9af5c9c9
TH
1354 struct ata_link *link = dev->link;
1355 struct ata_port *ap = link->ap;
a2a7a662
TH
1356 u8 command = tf->command;
1357 struct ata_queued_cmd *qc;
2ab7db1f 1358 unsigned int tag, preempted_tag;
dedaf2b0 1359 u32 preempted_sactive, preempted_qc_active;
60be6b9a 1360 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1361 unsigned long flags;
77853bf2 1362 unsigned int err_mask;
d95a717f 1363 int rc;
a2a7a662 1364
ba6a1308 1365 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1366
e3180499 1367 /* no internal command while frozen */
b51e9e5d 1368 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1369 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1370 return AC_ERR_SYSTEM;
1371 }
1372
2ab7db1f 1373 /* initialize internal qc */
a2a7a662 1374
2ab7db1f
TH
1375 /* XXX: Tag 0 is used for drivers with legacy EH as some
1376 * drivers choke if any other tag is given. This breaks
1377 * ata_tag_internal() test for those drivers. Don't use new
1378 * EH stuff without converting to it.
1379 */
1380 if (ap->ops->error_handler)
1381 tag = ATA_TAG_INTERNAL;
1382 else
1383 tag = 0;
1384
6cec4a39 1385 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1386 BUG();
f69499f4 1387 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1388
1389 qc->tag = tag;
1390 qc->scsicmd = NULL;
1391 qc->ap = ap;
1392 qc->dev = dev;
1393 ata_qc_reinit(qc);
1394
9af5c9c9
TH
1395 preempted_tag = link->active_tag;
1396 preempted_sactive = link->sactive;
dedaf2b0 1397 preempted_qc_active = ap->qc_active;
9af5c9c9
TH
1398 link->active_tag = ATA_TAG_POISON;
1399 link->sactive = 0;
dedaf2b0 1400 ap->qc_active = 0;
2ab7db1f
TH
1401
1402 /* prepare & issue qc */
a2a7a662 1403 qc->tf = *tf;
d69cf37d
TH
1404 if (cdb)
1405 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1406 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1407 qc->dma_dir = dma_dir;
1408 if (dma_dir != DMA_NONE) {
2432697b
TH
1409 unsigned int i, buflen = 0;
1410
1411 for (i = 0; i < n_elem; i++)
1412 buflen += sg[i].length;
1413
1414 ata_sg_init(qc, sg, n_elem);
49c80429 1415 qc->nbytes = buflen;
a2a7a662
TH
1416 }
1417
77853bf2 1418 qc->private_data = &wait;
a2a7a662
TH
1419 qc->complete_fn = ata_qc_complete_internal;
1420
8e0e694a 1421 ata_qc_issue(qc);
a2a7a662 1422
ba6a1308 1423 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1424
a8601e5f 1425 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
d95a717f
TH
1426
1427 ata_port_flush_task(ap);
41ade50c 1428
d95a717f 1429 if (!rc) {
ba6a1308 1430 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1431
1432 /* We're racing with irq here. If we lose, the
1433 * following test prevents us from completing the qc
d95a717f
TH
1434 * twice. If we win, the port is frozen and will be
1435 * cleaned up by ->post_internal_cmd().
a2a7a662 1436 */
77853bf2 1437 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1438 qc->err_mask |= AC_ERR_TIMEOUT;
1439
1440 if (ap->ops->error_handler)
1441 ata_port_freeze(ap);
1442 else
1443 ata_qc_complete(qc);
f15a1daf 1444
0dd4b21f
BP
1445 if (ata_msg_warn(ap))
1446 ata_dev_printk(dev, KERN_WARNING,
88574551 1447 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1448 }
1449
ba6a1308 1450 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1451 }
1452
d95a717f
TH
1453 /* do post_internal_cmd */
1454 if (ap->ops->post_internal_cmd)
1455 ap->ops->post_internal_cmd(qc);
1456
a51d644a
TH
1457 /* perform minimal error analysis */
1458 if (qc->flags & ATA_QCFLAG_FAILED) {
1459 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1460 qc->err_mask |= AC_ERR_DEV;
1461
1462 if (!qc->err_mask)
1463 qc->err_mask |= AC_ERR_OTHER;
1464
1465 if (qc->err_mask & ~AC_ERR_OTHER)
1466 qc->err_mask &= ~AC_ERR_OTHER;
d95a717f
TH
1467 }
1468
15869303 1469 /* finish up */
ba6a1308 1470 spin_lock_irqsave(ap->lock, flags);
15869303 1471
e61e0672 1472 *tf = qc->result_tf;
77853bf2
TH
1473 err_mask = qc->err_mask;
1474
1475 ata_qc_free(qc);
9af5c9c9
TH
1476 link->active_tag = preempted_tag;
1477 link->sactive = preempted_sactive;
dedaf2b0 1478 ap->qc_active = preempted_qc_active;
77853bf2 1479
1f7dd3e9
TH
1480 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1481 * Until those drivers are fixed, we detect the condition
1482 * here, fail the command with AC_ERR_SYSTEM and reenable the
1483 * port.
1484 *
1485 * Note that this doesn't change any behavior as internal
1486 * command failure results in disabling the device in the
1487 * higher layer for LLDDs without new reset/EH callbacks.
1488 *
1489 * Kill the following code as soon as those drivers are fixed.
1490 */
198e0fed 1491 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1492 err_mask |= AC_ERR_SYSTEM;
1493 ata_port_probe(ap);
1494 }
1495
ba6a1308 1496 spin_unlock_irqrestore(ap->lock, flags);
15869303 1497
77853bf2 1498 return err_mask;
a2a7a662
TH
1499}
1500
2432697b 1501/**
33480a0e 1502 * ata_exec_internal - execute libata internal command
2432697b
TH
1503 * @dev: Device to which the command is sent
1504 * @tf: Taskfile registers for the command and the result
1505 * @cdb: CDB for packet command
1506 * @dma_dir: Data tranfer direction of the command
1507 * @buf: Data buffer of the command
1508 * @buflen: Length of data buffer
1509 *
1510 * Wrapper around ata_exec_internal_sg() which takes simple
1511 * buffer instead of sg list.
1512 *
1513 * LOCKING:
1514 * None. Should be called with kernel context, might sleep.
1515 *
1516 * RETURNS:
1517 * Zero on success, AC_ERR_* mask on failure
1518 */
1519unsigned ata_exec_internal(struct ata_device *dev,
1520 struct ata_taskfile *tf, const u8 *cdb,
1521 int dma_dir, void *buf, unsigned int buflen)
1522{
33480a0e
TH
1523 struct scatterlist *psg = NULL, sg;
1524 unsigned int n_elem = 0;
2432697b 1525
33480a0e
TH
1526 if (dma_dir != DMA_NONE) {
1527 WARN_ON(!buf);
1528 sg_init_one(&sg, buf, buflen);
1529 psg = &sg;
1530 n_elem++;
1531 }
2432697b 1532
33480a0e 1533 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
2432697b
TH
1534}
1535
977e6b9f
TH
1536/**
1537 * ata_do_simple_cmd - execute simple internal command
1538 * @dev: Device to which the command is sent
1539 * @cmd: Opcode to execute
1540 *
1541 * Execute a 'simple' command, that only consists of the opcode
1542 * 'cmd' itself, without filling any other registers
1543 *
1544 * LOCKING:
1545 * Kernel thread context (may sleep).
1546 *
1547 * RETURNS:
1548 * Zero on success, AC_ERR_* mask on failure
e58eb583 1549 */
77b08fb5 1550unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1551{
1552 struct ata_taskfile tf;
e58eb583
TH
1553
1554 ata_tf_init(dev, &tf);
1555
1556 tf.command = cmd;
1557 tf.flags |= ATA_TFLAG_DEVICE;
1558 tf.protocol = ATA_PROT_NODATA;
1559
977e6b9f 1560 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
e58eb583
TH
1561}
1562
1bc4ccff
AC
1563/**
1564 * ata_pio_need_iordy - check if iordy needed
1565 * @adev: ATA device
1566 *
1567 * Check if the current speed of the device requires IORDY. Used
1568 * by various controllers for chip configuration.
1569 */
a617c09f 1570
1bc4ccff
AC
1571unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1572{
432729f0
AC
1573 /* Controller doesn't support IORDY. Probably a pointless check
1574 as the caller should know this */
9af5c9c9 1575 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1bc4ccff 1576 return 0;
432729f0
AC
1577 /* PIO3 and higher it is mandatory */
1578 if (adev->pio_mode > XFER_PIO_2)
1579 return 1;
1580 /* We turn it on when possible */
1581 if (ata_id_has_iordy(adev->id))
1bc4ccff 1582 return 1;
432729f0
AC
1583 return 0;
1584}
2e9edbf8 1585
432729f0
AC
1586/**
1587 * ata_pio_mask_no_iordy - Return the non IORDY mask
1588 * @adev: ATA device
1589 *
1590 * Compute the highest mode possible if we are not using iordy. Return
1591 * -1 if no iordy mode is available.
1592 */
a617c09f 1593
432729f0
AC
1594static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1595{
1bc4ccff 1596 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1bc4ccff 1597 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
432729f0 1598 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1bc4ccff
AC
1599 /* Is the speed faster than the drive allows non IORDY ? */
1600 if (pio) {
1601 /* This is cycle times not frequency - watch the logic! */
1602 if (pio > 240) /* PIO2 is 240nS per cycle */
432729f0
AC
1603 return 3 << ATA_SHIFT_PIO;
1604 return 7 << ATA_SHIFT_PIO;
1bc4ccff
AC
1605 }
1606 }
432729f0 1607 return 3 << ATA_SHIFT_PIO;
1bc4ccff
AC
1608}
1609
1da177e4 1610/**
49016aca 1611 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1612 * @dev: target device
1613 * @p_class: pointer to class of the target device (may be changed)
bff04647 1614 * @flags: ATA_READID_* flags
fe635c7e 1615 * @id: buffer to read IDENTIFY data into
1da177e4 1616 *
49016aca
TH
1617 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1618 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1619 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1620 * for pre-ATA4 drives.
1da177e4 1621 *
50a99018
AC
1622 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1623 * now we abort if we hit that case.
1624 *
1da177e4 1625 * LOCKING:
49016aca
TH
1626 * Kernel thread context (may sleep)
1627 *
1628 * RETURNS:
1629 * 0 on success, -errno otherwise.
1da177e4 1630 */
a9beec95 1631int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1632 unsigned int flags, u16 *id)
1da177e4 1633{
9af5c9c9 1634 struct ata_port *ap = dev->link->ap;
49016aca 1635 unsigned int class = *p_class;
a0123703 1636 struct ata_taskfile tf;
49016aca
TH
1637 unsigned int err_mask = 0;
1638 const char *reason;
54936f8b 1639 int may_fallback = 1, tried_spinup = 0;
49016aca 1640 int rc;
1da177e4 1641
0dd4b21f 1642 if (ata_msg_ctl(ap))
44877b4e 1643 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1644
49016aca 1645 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
49016aca 1646 retry:
3373efd8 1647 ata_tf_init(dev, &tf);
a0123703 1648
49016aca
TH
1649 switch (class) {
1650 case ATA_DEV_ATA:
a0123703 1651 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1652 break;
1653 case ATA_DEV_ATAPI:
a0123703 1654 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1655 break;
1656 default:
1657 rc = -ENODEV;
1658 reason = "unsupported class";
1659 goto err_out;
1da177e4
LT
1660 }
1661
a0123703 1662 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1663
1664 /* Some devices choke if TF registers contain garbage. Make
1665 * sure those are properly initialized.
1666 */
1667 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1668
1669 /* Device presence detection is unreliable on some
1670 * controllers. Always poll IDENTIFY if available.
1671 */
1672 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 1673
3373efd8 1674 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
49016aca 1675 id, sizeof(id[0]) * ATA_ID_WORDS);
a0123703 1676 if (err_mask) {
800b3996 1677 if (err_mask & AC_ERR_NODEV_HINT) {
55a8e2c8 1678 DPRINTK("ata%u.%d: NODEV after polling detection\n",
44877b4e 1679 ap->print_id, dev->devno);
55a8e2c8
TH
1680 return -ENOENT;
1681 }
1682
54936f8b
TH
1683 /* Device or controller might have reported the wrong
1684 * device class. Give a shot at the other IDENTIFY if
1685 * the current one is aborted by the device.
1686 */
1687 if (may_fallback &&
1688 (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1689 may_fallback = 0;
1690
1691 if (class == ATA_DEV_ATA)
1692 class = ATA_DEV_ATAPI;
1693 else
1694 class = ATA_DEV_ATA;
1695 goto retry;
1696 }
1697
49016aca
TH
1698 rc = -EIO;
1699 reason = "I/O error";
1da177e4
LT
1700 goto err_out;
1701 }
1702
54936f8b
TH
1703 /* Falling back doesn't make sense if ID data was read
1704 * successfully at least once.
1705 */
1706 may_fallback = 0;
1707
49016aca 1708 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1709
49016aca 1710 /* sanity check */
a4f5749b 1711 rc = -EINVAL;
6070068b 1712 reason = "device reports invalid type";
a4f5749b
TH
1713
1714 if (class == ATA_DEV_ATA) {
1715 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1716 goto err_out;
1717 } else {
1718 if (ata_id_is_ata(id))
1719 goto err_out;
49016aca
TH
1720 }
1721
169439c2
ML
1722 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1723 tried_spinup = 1;
1724 /*
1725 * Drive powered-up in standby mode, and requires a specific
1726 * SET_FEATURES spin-up subcommand before it will accept
1727 * anything other than the original IDENTIFY command.
1728 */
1729 ata_tf_init(dev, &tf);
1730 tf.command = ATA_CMD_SET_FEATURES;
1731 tf.feature = SETFEATURES_SPINUP;
1732 tf.protocol = ATA_PROT_NODATA;
1733 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1734 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
fb0582f9 1735 if (err_mask && id[2] != 0x738c) {
169439c2
ML
1736 rc = -EIO;
1737 reason = "SPINUP failed";
1738 goto err_out;
1739 }
1740 /*
1741 * If the drive initially returned incomplete IDENTIFY info,
1742 * we now must reissue the IDENTIFY command.
1743 */
1744 if (id[2] == 0x37c8)
1745 goto retry;
1746 }
1747
bff04647 1748 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
1749 /*
1750 * The exact sequence expected by certain pre-ATA4 drives is:
1751 * SRST RESET
50a99018
AC
1752 * IDENTIFY (optional in early ATA)
1753 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
49016aca
TH
1754 * anything else..
1755 * Some drives were very specific about that exact sequence.
50a99018
AC
1756 *
1757 * Note that ATA4 says lba is mandatory so the second check
1758 * shoud never trigger.
49016aca
TH
1759 */
1760 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 1761 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
1762 if (err_mask) {
1763 rc = -EIO;
1764 reason = "INIT_DEV_PARAMS failed";
1765 goto err_out;
1766 }
1767
1768 /* current CHS translation info (id[53-58]) might be
1769 * changed. reread the identify device info.
1770 */
bff04647 1771 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
1772 goto retry;
1773 }
1774 }
1775
1776 *p_class = class;
fe635c7e 1777
49016aca
TH
1778 return 0;
1779
1780 err_out:
88574551 1781 if (ata_msg_warn(ap))
0dd4b21f 1782 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 1783 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
1784 return rc;
1785}
1786
3373efd8 1787static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 1788{
9af5c9c9
TH
1789 struct ata_port *ap = dev->link->ap;
1790 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
1791}
1792
a6e6ce8e
TH
1793static void ata_dev_config_ncq(struct ata_device *dev,
1794 char *desc, size_t desc_sz)
1795{
9af5c9c9 1796 struct ata_port *ap = dev->link->ap;
a6e6ce8e
TH
1797 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1798
1799 if (!ata_id_has_ncq(dev->id)) {
1800 desc[0] = '\0';
1801 return;
1802 }
75683fe7 1803 if (dev->horkage & ATA_HORKAGE_NONCQ) {
6919a0a6
AC
1804 snprintf(desc, desc_sz, "NCQ (not used)");
1805 return;
1806 }
a6e6ce8e 1807 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 1808 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
1809 dev->flags |= ATA_DFLAG_NCQ;
1810 }
1811
1812 if (hdepth >= ddepth)
1813 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1814 else
1815 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1816}
1817
49016aca 1818/**
ffeae418 1819 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
1820 * @dev: Target device to configure
1821 *
1822 * Configure @dev according to @dev->id. Generic and low-level
1823 * driver specific fixups are also applied.
49016aca
TH
1824 *
1825 * LOCKING:
ffeae418
TH
1826 * Kernel thread context (may sleep)
1827 *
1828 * RETURNS:
1829 * 0 on success, -errno otherwise
49016aca 1830 */
efdaedc4 1831int ata_dev_configure(struct ata_device *dev)
49016aca 1832{
9af5c9c9
TH
1833 struct ata_port *ap = dev->link->ap;
1834 struct ata_eh_context *ehc = &dev->link->eh_context;
6746544c 1835 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1148c3a7 1836 const u16 *id = dev->id;
ff8854b2 1837 unsigned int xfer_mask;
b352e57d 1838 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
1839 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
1840 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 1841 int rc;
49016aca 1842
0dd4b21f 1843 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
44877b4e
TH
1844 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
1845 __FUNCTION__);
ffeae418 1846 return 0;
49016aca
TH
1847 }
1848
0dd4b21f 1849 if (ata_msg_probe(ap))
44877b4e 1850 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1851
75683fe7
TH
1852 /* set horkage */
1853 dev->horkage |= ata_dev_blacklisted(dev);
1854
6746544c
TH
1855 /* let ACPI work its magic */
1856 rc = ata_acpi_on_devcfg(dev);
1857 if (rc)
1858 return rc;
08573a86 1859
c39f5ebe 1860 /* print device capabilities */
0dd4b21f 1861 if (ata_msg_probe(ap))
88574551
TH
1862 ata_dev_printk(dev, KERN_DEBUG,
1863 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1864 "85:%04x 86:%04x 87:%04x 88:%04x\n",
0dd4b21f 1865 __FUNCTION__,
f15a1daf
TH
1866 id[49], id[82], id[83], id[84],
1867 id[85], id[86], id[87], id[88]);
c39f5ebe 1868
208a9933 1869 /* initialize to-be-configured parameters */
ea1dd4e1 1870 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
1871 dev->max_sectors = 0;
1872 dev->cdb_len = 0;
1873 dev->n_sectors = 0;
1874 dev->cylinders = 0;
1875 dev->heads = 0;
1876 dev->sectors = 0;
1877
1da177e4
LT
1878 /*
1879 * common ATA, ATAPI feature tests
1880 */
1881
ff8854b2 1882 /* find max transfer mode; for printk only */
1148c3a7 1883 xfer_mask = ata_id_xfermask(id);
1da177e4 1884
0dd4b21f
BP
1885 if (ata_msg_probe(ap))
1886 ata_dump_id(id);
1da177e4 1887
ef143d57
AL
1888 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
1889 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
1890 sizeof(fwrevbuf));
1891
1892 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
1893 sizeof(modelbuf));
1894
1da177e4
LT
1895 /* ATA-specific feature tests */
1896 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
1897 if (ata_id_is_cfa(id)) {
1898 if (id[162] & 1) /* CPRM may make this media unusable */
44877b4e
TH
1899 ata_dev_printk(dev, KERN_WARNING,
1900 "supports DRM functions and may "
1901 "not be fully accessable.\n");
b352e57d
AC
1902 snprintf(revbuf, 7, "CFA");
1903 }
1904 else
1905 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1906
1148c3a7 1907 dev->n_sectors = ata_id_n_sectors(id);
2940740b 1908
3f64f565
EM
1909 if (dev->id[59] & 0x100)
1910 dev->multi_count = dev->id[59] & 0xff;
1911
1148c3a7 1912 if (ata_id_has_lba(id)) {
4c2d721a 1913 const char *lba_desc;
a6e6ce8e 1914 char ncq_desc[20];
8bf62ece 1915
4c2d721a
TH
1916 lba_desc = "LBA";
1917 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 1918 if (ata_id_has_lba48(id)) {
8bf62ece 1919 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 1920 lba_desc = "LBA48";
6fc49adb
TH
1921
1922 if (dev->n_sectors >= (1UL << 28) &&
1923 ata_id_has_flush_ext(id))
1924 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 1925 }
8bf62ece 1926
16c55b03
TH
1927 if (!(dev->horkage & ATA_HORKAGE_BROKEN_HPA) &&
1928 ata_id_hpa_enabled(dev->id))
1929 dev->n_sectors = ata_hpa_resize(dev);
1e999736 1930
a6e6ce8e
TH
1931 /* config NCQ */
1932 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1933
8bf62ece 1934 /* print device info to dmesg */
3f64f565
EM
1935 if (ata_msg_drv(ap) && print_info) {
1936 ata_dev_printk(dev, KERN_INFO,
1937 "%s: %s, %s, max %s\n",
1938 revbuf, modelbuf, fwrevbuf,
1939 ata_mode_string(xfer_mask));
1940 ata_dev_printk(dev, KERN_INFO,
1941 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 1942 (unsigned long long)dev->n_sectors,
3f64f565
EM
1943 dev->multi_count, lba_desc, ncq_desc);
1944 }
ffeae418 1945 } else {
8bf62ece
AL
1946 /* CHS */
1947
1948 /* Default translation */
1148c3a7
TH
1949 dev->cylinders = id[1];
1950 dev->heads = id[3];
1951 dev->sectors = id[6];
8bf62ece 1952
1148c3a7 1953 if (ata_id_current_chs_valid(id)) {
8bf62ece 1954 /* Current CHS translation is valid. */
1148c3a7
TH
1955 dev->cylinders = id[54];
1956 dev->heads = id[55];
1957 dev->sectors = id[56];
8bf62ece
AL
1958 }
1959
1960 /* print device info to dmesg */
3f64f565 1961 if (ata_msg_drv(ap) && print_info) {
88574551 1962 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
1963 "%s: %s, %s, max %s\n",
1964 revbuf, modelbuf, fwrevbuf,
1965 ata_mode_string(xfer_mask));
a84471fe 1966 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
1967 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
1968 (unsigned long long)dev->n_sectors,
1969 dev->multi_count, dev->cylinders,
1970 dev->heads, dev->sectors);
1971 }
07f6f7d0
AL
1972 }
1973
6e7846e9 1974 dev->cdb_len = 16;
1da177e4
LT
1975 }
1976
1977 /* ATAPI-specific feature tests */
2c13b7ce 1978 else if (dev->class == ATA_DEV_ATAPI) {
08a556db
AL
1979 char *cdb_intr_string = "";
1980
1148c3a7 1981 rc = atapi_cdb_len(id);
1da177e4 1982 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 1983 if (ata_msg_warn(ap))
88574551
TH
1984 ata_dev_printk(dev, KERN_WARNING,
1985 "unsupported CDB len\n");
ffeae418 1986 rc = -EINVAL;
1da177e4
LT
1987 goto err_out_nosup;
1988 }
6e7846e9 1989 dev->cdb_len = (unsigned int) rc;
1da177e4 1990
9f45cbd3
KCA
1991 /*
1992 * check to see if this ATAPI device supports
1993 * Asynchronous Notification
1994 */
1995 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_AN(id)) {
1996 int err;
1997 /* issue SET feature command to turn this on */
1998 err = ata_dev_set_AN(dev, SETFEATURES_SATA_ENABLE);
1999 if (err)
2000 ata_dev_printk(dev, KERN_ERR,
2001 "unable to set AN, err %x\n",
2002 err);
2003 else
2004 dev->flags |= ATA_DFLAG_AN;
2005 }
2006
08a556db 2007 if (ata_id_cdb_intr(dev->id)) {
312f7da2 2008 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
2009 cdb_intr_string = ", CDB intr";
2010 }
312f7da2 2011
1da177e4 2012 /* print device info to dmesg */
5afc8142 2013 if (ata_msg_drv(ap) && print_info)
ef143d57
AL
2014 ata_dev_printk(dev, KERN_INFO,
2015 "ATAPI: %s, %s, max %s%s\n",
2016 modelbuf, fwrevbuf,
12436c30
TH
2017 ata_mode_string(xfer_mask),
2018 cdb_intr_string);
1da177e4
LT
2019 }
2020
914ed354
TH
2021 /* determine max_sectors */
2022 dev->max_sectors = ATA_MAX_SECTORS;
2023 if (dev->flags & ATA_DFLAG_LBA48)
2024 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2025
93590859
AC
2026 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2027 /* Let the user know. We don't want to disallow opens for
2028 rescue purposes, or in case the vendor is just a blithering
2029 idiot */
2030 if (print_info) {
2031 ata_dev_printk(dev, KERN_WARNING,
2032"Drive reports diagnostics failure. This may indicate a drive\n");
2033 ata_dev_printk(dev, KERN_WARNING,
2034"fault or invalid emulation. Contact drive vendor for information.\n");
2035 }
2036 }
2037
4b2f3ede 2038 /* limit bridge transfers to udma5, 200 sectors */
3373efd8 2039 if (ata_dev_knobble(dev)) {
5afc8142 2040 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
2041 ata_dev_printk(dev, KERN_INFO,
2042 "applying bridge limits\n");
5a529139 2043 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
2044 dev->max_sectors = ATA_MAX_SECTORS;
2045 }
2046
75683fe7 2047 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
03ec52de
TH
2048 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2049 dev->max_sectors);
18d6e9d5 2050
4b2f3ede 2051 if (ap->ops->dev_config)
cd0d3bbc 2052 ap->ops->dev_config(dev);
4b2f3ede 2053
0dd4b21f
BP
2054 if (ata_msg_probe(ap))
2055 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2056 __FUNCTION__, ata_chk_status(ap));
ffeae418 2057 return 0;
1da177e4
LT
2058
2059err_out_nosup:
0dd4b21f 2060 if (ata_msg_probe(ap))
88574551
TH
2061 ata_dev_printk(dev, KERN_DEBUG,
2062 "%s: EXIT, err\n", __FUNCTION__);
ffeae418 2063 return rc;
1da177e4
LT
2064}
2065
be0d18df 2066/**
2e41e8e6 2067 * ata_cable_40wire - return 40 wire cable type
be0d18df
AC
2068 * @ap: port
2069 *
2e41e8e6 2070 * Helper method for drivers which want to hardwire 40 wire cable
be0d18df
AC
2071 * detection.
2072 */
2073
2074int ata_cable_40wire(struct ata_port *ap)
2075{
2076 return ATA_CBL_PATA40;
2077}
2078
2079/**
2e41e8e6 2080 * ata_cable_80wire - return 80 wire cable type
be0d18df
AC
2081 * @ap: port
2082 *
2e41e8e6 2083 * Helper method for drivers which want to hardwire 80 wire cable
be0d18df
AC
2084 * detection.
2085 */
2086
2087int ata_cable_80wire(struct ata_port *ap)
2088{
2089 return ATA_CBL_PATA80;
2090}
2091
2092/**
2093 * ata_cable_unknown - return unknown PATA cable.
2094 * @ap: port
2095 *
2096 * Helper method for drivers which have no PATA cable detection.
2097 */
2098
2099int ata_cable_unknown(struct ata_port *ap)
2100{
2101 return ATA_CBL_PATA_UNK;
2102}
2103
2104/**
2105 * ata_cable_sata - return SATA cable type
2106 * @ap: port
2107 *
2108 * Helper method for drivers which have SATA cables
2109 */
2110
2111int ata_cable_sata(struct ata_port *ap)
2112{
2113 return ATA_CBL_SATA;
2114}
2115
1da177e4
LT
2116/**
2117 * ata_bus_probe - Reset and probe ATA bus
2118 * @ap: Bus to probe
2119 *
0cba632b
JG
2120 * Master ATA bus probing function. Initiates a hardware-dependent
2121 * bus reset, then attempts to identify any devices found on
2122 * the bus.
2123 *
1da177e4 2124 * LOCKING:
0cba632b 2125 * PCI/etc. bus probe sem.
1da177e4
LT
2126 *
2127 * RETURNS:
96072e69 2128 * Zero on success, negative errno otherwise.
1da177e4
LT
2129 */
2130
80289167 2131int ata_bus_probe(struct ata_port *ap)
1da177e4 2132{
28ca5c57 2133 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 2134 int tries[ATA_MAX_DEVICES];
f58229f8 2135 int rc;
e82cbdb9 2136 struct ata_device *dev;
1da177e4 2137
28ca5c57 2138 ata_port_probe(ap);
c19ba8af 2139
f58229f8
TH
2140 ata_link_for_each_dev(dev, &ap->link)
2141 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
14d2bac1
TH
2142
2143 retry:
2044470c 2144 /* reset and determine device classes */
52783c5d 2145 ap->ops->phy_reset(ap);
2061a47a 2146
f58229f8 2147 ata_link_for_each_dev(dev, &ap->link) {
52783c5d
TH
2148 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2149 dev->class != ATA_DEV_UNKNOWN)
2150 classes[dev->devno] = dev->class;
2151 else
2152 classes[dev->devno] = ATA_DEV_NONE;
2044470c 2153
52783c5d 2154 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 2155 }
1da177e4 2156
52783c5d 2157 ata_port_probe(ap);
2044470c 2158
b6079ca4
AC
2159 /* after the reset the device state is PIO 0 and the controller
2160 state is undefined. Record the mode */
2161
f58229f8
TH
2162 ata_link_for_each_dev(dev, &ap->link)
2163 dev->pio_mode = XFER_PIO_0;
b6079ca4 2164
f31f0cc2
JG
2165 /* read IDENTIFY page and configure devices. We have to do the identify
2166 specific sequence bass-ackwards so that PDIAG- is released by
2167 the slave device */
2168
f58229f8
TH
2169 ata_link_for_each_dev(dev, &ap->link) {
2170 if (tries[dev->devno])
2171 dev->class = classes[dev->devno];
ffeae418 2172
14d2bac1 2173 if (!ata_dev_enabled(dev))
ffeae418 2174 continue;
ffeae418 2175
bff04647
TH
2176 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2177 dev->id);
14d2bac1
TH
2178 if (rc)
2179 goto fail;
f31f0cc2
JG
2180 }
2181
be0d18df
AC
2182 /* Now ask for the cable type as PDIAG- should have been released */
2183 if (ap->ops->cable_detect)
2184 ap->cbl = ap->ops->cable_detect(ap);
2185
614fe29b
AC
2186 /* We may have SATA bridge glue hiding here irrespective of the
2187 reported cable types and sensed types */
2188 ata_link_for_each_dev(dev, &ap->link) {
2189 if (!ata_dev_enabled(dev))
2190 continue;
2191 /* SATA drives indicate we have a bridge. We don't know which
2192 end of the link the bridge is which is a problem */
2193 if (ata_id_is_sata(dev->id))
2194 ap->cbl = ATA_CBL_SATA;
2195 }
2196
f31f0cc2
JG
2197 /* After the identify sequence we can now set up the devices. We do
2198 this in the normal order so that the user doesn't get confused */
2199
f58229f8 2200 ata_link_for_each_dev(dev, &ap->link) {
f31f0cc2
JG
2201 if (!ata_dev_enabled(dev))
2202 continue;
14d2bac1 2203
9af5c9c9 2204 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
efdaedc4 2205 rc = ata_dev_configure(dev);
9af5c9c9 2206 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
2207 if (rc)
2208 goto fail;
1da177e4
LT
2209 }
2210
e82cbdb9 2211 /* configure transfer mode */
0260731f 2212 rc = ata_set_mode(&ap->link, &dev);
4ae72a1e 2213 if (rc)
51713d35 2214 goto fail;
1da177e4 2215
f58229f8
TH
2216 ata_link_for_each_dev(dev, &ap->link)
2217 if (ata_dev_enabled(dev))
e82cbdb9 2218 return 0;
1da177e4 2219
e82cbdb9
TH
2220 /* no device present, disable port */
2221 ata_port_disable(ap);
96072e69 2222 return -ENODEV;
14d2bac1
TH
2223
2224 fail:
4ae72a1e
TH
2225 tries[dev->devno]--;
2226
14d2bac1
TH
2227 switch (rc) {
2228 case -EINVAL:
4ae72a1e 2229 /* eeek, something went very wrong, give up */
14d2bac1
TH
2230 tries[dev->devno] = 0;
2231 break;
4ae72a1e
TH
2232
2233 case -ENODEV:
2234 /* give it just one more chance */
2235 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 2236 case -EIO:
4ae72a1e
TH
2237 if (tries[dev->devno] == 1) {
2238 /* This is the last chance, better to slow
2239 * down than lose it.
2240 */
936fd732 2241 sata_down_spd_limit(&ap->link);
4ae72a1e
TH
2242 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2243 }
14d2bac1
TH
2244 }
2245
4ae72a1e 2246 if (!tries[dev->devno])
3373efd8 2247 ata_dev_disable(dev);
ec573755 2248
14d2bac1 2249 goto retry;
1da177e4
LT
2250}
2251
2252/**
0cba632b
JG
2253 * ata_port_probe - Mark port as enabled
2254 * @ap: Port for which we indicate enablement
1da177e4 2255 *
0cba632b
JG
2256 * Modify @ap data structure such that the system
2257 * thinks that the entire port is enabled.
2258 *
cca3974e 2259 * LOCKING: host lock, or some other form of
0cba632b 2260 * serialization.
1da177e4
LT
2261 */
2262
2263void ata_port_probe(struct ata_port *ap)
2264{
198e0fed 2265 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
2266}
2267
3be680b7
TH
2268/**
2269 * sata_print_link_status - Print SATA link status
936fd732 2270 * @link: SATA link to printk link status about
3be680b7
TH
2271 *
2272 * This function prints link speed and status of a SATA link.
2273 *
2274 * LOCKING:
2275 * None.
2276 */
936fd732 2277void sata_print_link_status(struct ata_link *link)
3be680b7 2278{
6d5f9732 2279 u32 sstatus, scontrol, tmp;
3be680b7 2280
936fd732 2281 if (sata_scr_read(link, SCR_STATUS, &sstatus))
3be680b7 2282 return;
936fd732 2283 sata_scr_read(link, SCR_CONTROL, &scontrol);
3be680b7 2284
936fd732 2285 if (ata_link_online(link)) {
3be680b7 2286 tmp = (sstatus >> 4) & 0xf;
936fd732 2287 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2288 "SATA link up %s (SStatus %X SControl %X)\n",
2289 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 2290 } else {
936fd732 2291 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2292 "SATA link down (SStatus %X SControl %X)\n",
2293 sstatus, scontrol);
3be680b7
TH
2294 }
2295}
2296
1da177e4 2297/**
780a87f7
JG
2298 * __sata_phy_reset - Wake/reset a low-level SATA PHY
2299 * @ap: SATA port associated with target SATA PHY.
1da177e4 2300 *
780a87f7
JG
2301 * This function issues commands to standard SATA Sxxx
2302 * PHY registers, to wake up the phy (and device), and
2303 * clear any reset condition.
1da177e4
LT
2304 *
2305 * LOCKING:
0cba632b 2306 * PCI/etc. bus probe sem.
1da177e4
LT
2307 *
2308 */
2309void __sata_phy_reset(struct ata_port *ap)
2310{
936fd732 2311 struct ata_link *link = &ap->link;
1da177e4 2312 unsigned long timeout = jiffies + (HZ * 5);
936fd732 2313 u32 sstatus;
1da177e4
LT
2314
2315 if (ap->flags & ATA_FLAG_SATA_RESET) {
cdcca89e 2316 /* issue phy wake/reset */
936fd732 2317 sata_scr_write_flush(link, SCR_CONTROL, 0x301);
62ba2841
TH
2318 /* Couldn't find anything in SATA I/II specs, but
2319 * AHCI-1.1 10.4.2 says at least 1 ms. */
2320 mdelay(1);
1da177e4 2321 }
81952c54 2322 /* phy wake/clear reset */
936fd732 2323 sata_scr_write_flush(link, SCR_CONTROL, 0x300);
1da177e4
LT
2324
2325 /* wait for phy to become ready, if necessary */
2326 do {
2327 msleep(200);
936fd732 2328 sata_scr_read(link, SCR_STATUS, &sstatus);
1da177e4
LT
2329 if ((sstatus & 0xf) != 1)
2330 break;
2331 } while (time_before(jiffies, timeout));
2332
3be680b7 2333 /* print link status */
936fd732 2334 sata_print_link_status(link);
656563e3 2335
3be680b7 2336 /* TODO: phy layer with polling, timeouts, etc. */
936fd732 2337 if (!ata_link_offline(link))
1da177e4 2338 ata_port_probe(ap);
3be680b7 2339 else
1da177e4 2340 ata_port_disable(ap);
1da177e4 2341
198e0fed 2342 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2343 return;
2344
2345 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2346 ata_port_disable(ap);
2347 return;
2348 }
2349
2350 ap->cbl = ATA_CBL_SATA;
2351}
2352
2353/**
780a87f7
JG
2354 * sata_phy_reset - Reset SATA bus.
2355 * @ap: SATA port associated with target SATA PHY.
1da177e4 2356 *
780a87f7
JG
2357 * This function resets the SATA bus, and then probes
2358 * the bus for devices.
1da177e4
LT
2359 *
2360 * LOCKING:
0cba632b 2361 * PCI/etc. bus probe sem.
1da177e4
LT
2362 *
2363 */
2364void sata_phy_reset(struct ata_port *ap)
2365{
2366 __sata_phy_reset(ap);
198e0fed 2367 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2368 return;
2369 ata_bus_reset(ap);
2370}
2371
ebdfca6e
AC
2372/**
2373 * ata_dev_pair - return other device on cable
ebdfca6e
AC
2374 * @adev: device
2375 *
2376 * Obtain the other device on the same cable, or if none is
2377 * present NULL is returned
2378 */
2e9edbf8 2379
3373efd8 2380struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2381{
9af5c9c9
TH
2382 struct ata_link *link = adev->link;
2383 struct ata_device *pair = &link->device[1 - adev->devno];
e1211e3f 2384 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2385 return NULL;
2386 return pair;
2387}
2388
1da177e4 2389/**
780a87f7
JG
2390 * ata_port_disable - Disable port.
2391 * @ap: Port to be disabled.
1da177e4 2392 *
780a87f7
JG
2393 * Modify @ap data structure such that the system
2394 * thinks that the entire port is disabled, and should
2395 * never attempt to probe or communicate with devices
2396 * on this port.
2397 *
cca3974e 2398 * LOCKING: host lock, or some other form of
780a87f7 2399 * serialization.
1da177e4
LT
2400 */
2401
2402void ata_port_disable(struct ata_port *ap)
2403{
9af5c9c9
TH
2404 ap->link.device[0].class = ATA_DEV_NONE;
2405 ap->link.device[1].class = ATA_DEV_NONE;
198e0fed 2406 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2407}
2408
1c3fae4d 2409/**
3c567b7d 2410 * sata_down_spd_limit - adjust SATA spd limit downward
936fd732 2411 * @link: Link to adjust SATA spd limit for
1c3fae4d 2412 *
936fd732 2413 * Adjust SATA spd limit of @link downward. Note that this
1c3fae4d 2414 * function only adjusts the limit. The change must be applied
3c567b7d 2415 * using sata_set_spd().
1c3fae4d
TH
2416 *
2417 * LOCKING:
2418 * Inherited from caller.
2419 *
2420 * RETURNS:
2421 * 0 on success, negative errno on failure
2422 */
936fd732 2423int sata_down_spd_limit(struct ata_link *link)
1c3fae4d 2424{
81952c54
TH
2425 u32 sstatus, spd, mask;
2426 int rc, highbit;
1c3fae4d 2427
936fd732 2428 if (!sata_scr_valid(link))
008a7896
TH
2429 return -EOPNOTSUPP;
2430
2431 /* If SCR can be read, use it to determine the current SPD.
936fd732 2432 * If not, use cached value in link->sata_spd.
008a7896 2433 */
936fd732 2434 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
008a7896
TH
2435 if (rc == 0)
2436 spd = (sstatus >> 4) & 0xf;
2437 else
936fd732 2438 spd = link->sata_spd;
1c3fae4d 2439
936fd732 2440 mask = link->sata_spd_limit;
1c3fae4d
TH
2441 if (mask <= 1)
2442 return -EINVAL;
008a7896
TH
2443
2444 /* unconditionally mask off the highest bit */
1c3fae4d
TH
2445 highbit = fls(mask) - 1;
2446 mask &= ~(1 << highbit);
2447
008a7896
TH
2448 /* Mask off all speeds higher than or equal to the current
2449 * one. Force 1.5Gbps if current SPD is not available.
2450 */
2451 if (spd > 1)
2452 mask &= (1 << (spd - 1)) - 1;
2453 else
2454 mask &= 1;
2455
2456 /* were we already at the bottom? */
1c3fae4d
TH
2457 if (!mask)
2458 return -EINVAL;
2459
936fd732 2460 link->sata_spd_limit = mask;
1c3fae4d 2461
936fd732 2462 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
f15a1daf 2463 sata_spd_string(fls(mask)));
1c3fae4d
TH
2464
2465 return 0;
2466}
2467
936fd732 2468static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
1c3fae4d
TH
2469{
2470 u32 spd, limit;
2471
936fd732 2472 if (link->sata_spd_limit == UINT_MAX)
1c3fae4d
TH
2473 limit = 0;
2474 else
936fd732 2475 limit = fls(link->sata_spd_limit);
1c3fae4d
TH
2476
2477 spd = (*scontrol >> 4) & 0xf;
2478 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2479
2480 return spd != limit;
2481}
2482
2483/**
3c567b7d 2484 * sata_set_spd_needed - is SATA spd configuration needed
936fd732 2485 * @link: Link in question
1c3fae4d
TH
2486 *
2487 * Test whether the spd limit in SControl matches
936fd732 2488 * @link->sata_spd_limit. This function is used to determine
1c3fae4d
TH
2489 * whether hardreset is necessary to apply SATA spd
2490 * configuration.
2491 *
2492 * LOCKING:
2493 * Inherited from caller.
2494 *
2495 * RETURNS:
2496 * 1 if SATA spd configuration is needed, 0 otherwise.
2497 */
936fd732 2498int sata_set_spd_needed(struct ata_link *link)
1c3fae4d
TH
2499{
2500 u32 scontrol;
2501
936fd732 2502 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
1c3fae4d
TH
2503 return 0;
2504
936fd732 2505 return __sata_set_spd_needed(link, &scontrol);
1c3fae4d
TH
2506}
2507
2508/**
3c567b7d 2509 * sata_set_spd - set SATA spd according to spd limit
936fd732 2510 * @link: Link to set SATA spd for
1c3fae4d 2511 *
936fd732 2512 * Set SATA spd of @link according to sata_spd_limit.
1c3fae4d
TH
2513 *
2514 * LOCKING:
2515 * Inherited from caller.
2516 *
2517 * RETURNS:
2518 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2519 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2520 */
936fd732 2521int sata_set_spd(struct ata_link *link)
1c3fae4d
TH
2522{
2523 u32 scontrol;
81952c54 2524 int rc;
1c3fae4d 2525
936fd732 2526 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 2527 return rc;
1c3fae4d 2528
936fd732 2529 if (!__sata_set_spd_needed(link, &scontrol))
1c3fae4d
TH
2530 return 0;
2531
936fd732 2532 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54
TH
2533 return rc;
2534
1c3fae4d
TH
2535 return 1;
2536}
2537
452503f9
AC
2538/*
2539 * This mode timing computation functionality is ported over from
2540 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2541 */
2542/*
b352e57d 2543 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2544 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2545 * for UDMA6, which is currently supported only by Maxtor drives.
2546 *
2547 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2548 */
2549
2550static const struct ata_timing ata_timing[] = {
2551
2552 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2553 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2554 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2555 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2556
b352e57d
AC
2557 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2558 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
452503f9
AC
2559 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2560 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2561 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2562
2563/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2e9edbf8 2564
452503f9
AC
2565 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2566 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2567 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2e9edbf8 2568
452503f9
AC
2569 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2570 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2571 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2572
b352e57d
AC
2573 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2574 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
452503f9
AC
2575 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2576 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2577
2578 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2579 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2580 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2581
2582/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2583
2584 { 0xFF }
2585};
2586
2587#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2588#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2589
2590static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2591{
2592 q->setup = EZ(t->setup * 1000, T);
2593 q->act8b = EZ(t->act8b * 1000, T);
2594 q->rec8b = EZ(t->rec8b * 1000, T);
2595 q->cyc8b = EZ(t->cyc8b * 1000, T);
2596 q->active = EZ(t->active * 1000, T);
2597 q->recover = EZ(t->recover * 1000, T);
2598 q->cycle = EZ(t->cycle * 1000, T);
2599 q->udma = EZ(t->udma * 1000, UT);
2600}
2601
2602void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2603 struct ata_timing *m, unsigned int what)
2604{
2605 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2606 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2607 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2608 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2609 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2610 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2611 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2612 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2613}
2614
2615static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2616{
2617 const struct ata_timing *t;
2618
2619 for (t = ata_timing; t->mode != speed; t++)
91190758 2620 if (t->mode == 0xFF)
452503f9 2621 return NULL;
2e9edbf8 2622 return t;
452503f9
AC
2623}
2624
2625int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2626 struct ata_timing *t, int T, int UT)
2627{
2628 const struct ata_timing *s;
2629 struct ata_timing p;
2630
2631 /*
2e9edbf8 2632 * Find the mode.
75b1f2f8 2633 */
452503f9
AC
2634
2635 if (!(s = ata_timing_find_mode(speed)))
2636 return -EINVAL;
2637
75b1f2f8
AL
2638 memcpy(t, s, sizeof(*s));
2639
452503f9
AC
2640 /*
2641 * If the drive is an EIDE drive, it can tell us it needs extended
2642 * PIO/MW_DMA cycle timing.
2643 */
2644
2645 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2646 memset(&p, 0, sizeof(p));
2647 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2648 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2649 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2650 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2651 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2652 }
2653 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2654 }
2655
2656 /*
2657 * Convert the timing to bus clock counts.
2658 */
2659
75b1f2f8 2660 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2661
2662 /*
c893a3ae
RD
2663 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2664 * S.M.A.R.T * and some other commands. We have to ensure that the
2665 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2666 */
2667
fd3367af 2668 if (speed > XFER_PIO_6) {
452503f9
AC
2669 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2670 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2671 }
2672
2673 /*
c893a3ae 2674 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2675 */
2676
2677 if (t->act8b + t->rec8b < t->cyc8b) {
2678 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2679 t->rec8b = t->cyc8b - t->act8b;
2680 }
2681
2682 if (t->active + t->recover < t->cycle) {
2683 t->active += (t->cycle - (t->active + t->recover)) / 2;
2684 t->recover = t->cycle - t->active;
2685 }
a617c09f 2686
4f701d1e
AC
2687 /* In a few cases quantisation may produce enough errors to
2688 leave t->cycle too low for the sum of active and recovery
2689 if so we must correct this */
2690 if (t->active + t->recover > t->cycle)
2691 t->cycle = t->active + t->recover;
452503f9
AC
2692
2693 return 0;
2694}
2695
cf176e1a
TH
2696/**
2697 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 2698 * @dev: Device to adjust xfer masks
458337db 2699 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
2700 *
2701 * Adjust xfer masks of @dev downward. Note that this function
2702 * does not apply the change. Invoking ata_set_mode() afterwards
2703 * will apply the limit.
2704 *
2705 * LOCKING:
2706 * Inherited from caller.
2707 *
2708 * RETURNS:
2709 * 0 on success, negative errno on failure
2710 */
458337db 2711int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 2712{
458337db
TH
2713 char buf[32];
2714 unsigned int orig_mask, xfer_mask;
2715 unsigned int pio_mask, mwdma_mask, udma_mask;
2716 int quiet, highbit;
cf176e1a 2717
458337db
TH
2718 quiet = !!(sel & ATA_DNXFER_QUIET);
2719 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 2720
458337db
TH
2721 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2722 dev->mwdma_mask,
2723 dev->udma_mask);
2724 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 2725
458337db
TH
2726 switch (sel) {
2727 case ATA_DNXFER_PIO:
2728 highbit = fls(pio_mask) - 1;
2729 pio_mask &= ~(1 << highbit);
2730 break;
2731
2732 case ATA_DNXFER_DMA:
2733 if (udma_mask) {
2734 highbit = fls(udma_mask) - 1;
2735 udma_mask &= ~(1 << highbit);
2736 if (!udma_mask)
2737 return -ENOENT;
2738 } else if (mwdma_mask) {
2739 highbit = fls(mwdma_mask) - 1;
2740 mwdma_mask &= ~(1 << highbit);
2741 if (!mwdma_mask)
2742 return -ENOENT;
2743 }
2744 break;
2745
2746 case ATA_DNXFER_40C:
2747 udma_mask &= ATA_UDMA_MASK_40C;
2748 break;
2749
2750 case ATA_DNXFER_FORCE_PIO0:
2751 pio_mask &= 1;
2752 case ATA_DNXFER_FORCE_PIO:
2753 mwdma_mask = 0;
2754 udma_mask = 0;
2755 break;
2756
458337db
TH
2757 default:
2758 BUG();
2759 }
2760
2761 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2762
2763 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2764 return -ENOENT;
2765
2766 if (!quiet) {
2767 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2768 snprintf(buf, sizeof(buf), "%s:%s",
2769 ata_mode_string(xfer_mask),
2770 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2771 else
2772 snprintf(buf, sizeof(buf), "%s",
2773 ata_mode_string(xfer_mask));
2774
2775 ata_dev_printk(dev, KERN_WARNING,
2776 "limiting speed to %s\n", buf);
2777 }
cf176e1a
TH
2778
2779 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2780 &dev->udma_mask);
2781
cf176e1a 2782 return 0;
cf176e1a
TH
2783}
2784
3373efd8 2785static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 2786{
9af5c9c9 2787 struct ata_eh_context *ehc = &dev->link->eh_context;
83206a29
TH
2788 unsigned int err_mask;
2789 int rc;
1da177e4 2790
e8384607 2791 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
2792 if (dev->xfer_shift == ATA_SHIFT_PIO)
2793 dev->flags |= ATA_DFLAG_PIO;
2794
3373efd8 2795 err_mask = ata_dev_set_xfermode(dev);
11750a40
AC
2796 /* Old CFA may refuse this command, which is just fine */
2797 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2798 err_mask &= ~AC_ERR_DEV;
0bc2a79a
AC
2799 /* Some very old devices and some bad newer ones fail any kind of
2800 SET_XFERMODE request but support PIO0-2 timings and no IORDY */
2801 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
2802 dev->pio_mode <= XFER_PIO_2)
2803 err_mask &= ~AC_ERR_DEV;
83206a29 2804 if (err_mask) {
f15a1daf
TH
2805 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2806 "(err_mask=0x%x)\n", err_mask);
83206a29
TH
2807 return -EIO;
2808 }
1da177e4 2809
baa1e78a 2810 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3373efd8 2811 rc = ata_dev_revalidate(dev, 0);
baa1e78a 2812 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
5eb45c02 2813 if (rc)
83206a29 2814 return rc;
48a8a14f 2815
23e71c3d
TH
2816 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2817 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 2818
f15a1daf
TH
2819 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2820 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
83206a29 2821 return 0;
1da177e4
LT
2822}
2823
1da177e4 2824/**
04351821 2825 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 2826 * @link: link on which timings will be programmed
e82cbdb9 2827 * @r_failed_dev: out paramter for failed device
1da177e4 2828 *
04351821
AC
2829 * Standard implementation of the function used to tune and set
2830 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2831 * ata_dev_set_mode() fails, pointer to the failing device is
e82cbdb9 2832 * returned in @r_failed_dev.
780a87f7 2833 *
1da177e4 2834 * LOCKING:
0cba632b 2835 * PCI/etc. bus probe sem.
e82cbdb9
TH
2836 *
2837 * RETURNS:
2838 * 0 on success, negative errno otherwise
1da177e4 2839 */
04351821 2840
0260731f 2841int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
1da177e4 2842{
0260731f 2843 struct ata_port *ap = link->ap;
e8e0619f 2844 struct ata_device *dev;
f58229f8 2845 int rc = 0, used_dma = 0, found = 0;
3adcebb2 2846
a6d5a51c 2847 /* step 1: calculate xfer_mask */
f58229f8 2848 ata_link_for_each_dev(dev, link) {
acf356b1 2849 unsigned int pio_mask, dma_mask;
a6d5a51c 2850
e1211e3f 2851 if (!ata_dev_enabled(dev))
a6d5a51c
TH
2852 continue;
2853
3373efd8 2854 ata_dev_xfermask(dev);
1da177e4 2855
acf356b1
TH
2856 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2857 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2858 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2859 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 2860
4f65977d 2861 found = 1;
5444a6f4
AC
2862 if (dev->dma_mode)
2863 used_dma = 1;
a6d5a51c 2864 }
4f65977d 2865 if (!found)
e82cbdb9 2866 goto out;
a6d5a51c
TH
2867
2868 /* step 2: always set host PIO timings */
f58229f8 2869 ata_link_for_each_dev(dev, link) {
e8e0619f
TH
2870 if (!ata_dev_enabled(dev))
2871 continue;
2872
2873 if (!dev->pio_mode) {
f15a1daf 2874 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 2875 rc = -EINVAL;
e82cbdb9 2876 goto out;
e8e0619f
TH
2877 }
2878
2879 dev->xfer_mode = dev->pio_mode;
2880 dev->xfer_shift = ATA_SHIFT_PIO;
2881 if (ap->ops->set_piomode)
2882 ap->ops->set_piomode(ap, dev);
2883 }
1da177e4 2884
a6d5a51c 2885 /* step 3: set host DMA timings */
f58229f8 2886 ata_link_for_each_dev(dev, link) {
e8e0619f
TH
2887 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2888 continue;
2889
2890 dev->xfer_mode = dev->dma_mode;
2891 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2892 if (ap->ops->set_dmamode)
2893 ap->ops->set_dmamode(ap, dev);
2894 }
1da177e4
LT
2895
2896 /* step 4: update devices' xfer mode */
f58229f8 2897 ata_link_for_each_dev(dev, link) {
18d90deb 2898 /* don't update suspended devices' xfer mode */
9666f400 2899 if (!ata_dev_enabled(dev))
83206a29
TH
2900 continue;
2901
3373efd8 2902 rc = ata_dev_set_mode(dev);
5bbc53f4 2903 if (rc)
e82cbdb9 2904 goto out;
83206a29 2905 }
1da177e4 2906
e8e0619f
TH
2907 /* Record simplex status. If we selected DMA then the other
2908 * host channels are not permitted to do so.
5444a6f4 2909 */
cca3974e 2910 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 2911 ap->host->simplex_claimed = ap;
5444a6f4 2912
e82cbdb9
TH
2913 out:
2914 if (rc)
2915 *r_failed_dev = dev;
2916 return rc;
1da177e4
LT
2917}
2918
04351821
AC
2919/**
2920 * ata_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 2921 * @link: link on which timings will be programmed
04351821
AC
2922 * @r_failed_dev: out paramter for failed device
2923 *
2924 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2925 * ata_set_mode() fails, pointer to the failing device is
2926 * returned in @r_failed_dev.
2927 *
2928 * LOCKING:
2929 * PCI/etc. bus probe sem.
2930 *
2931 * RETURNS:
2932 * 0 on success, negative errno otherwise
2933 */
0260731f 2934int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
04351821 2935{
0260731f
TH
2936 struct ata_port *ap = link->ap;
2937
04351821
AC
2938 /* has private set_mode? */
2939 if (ap->ops->set_mode)
0260731f
TH
2940 return ap->ops->set_mode(link, r_failed_dev);
2941 return ata_do_set_mode(link, r_failed_dev);
04351821
AC
2942}
2943
1fdffbce
JG
2944/**
2945 * ata_tf_to_host - issue ATA taskfile to host controller
2946 * @ap: port to which command is being issued
2947 * @tf: ATA taskfile register set
2948 *
2949 * Issues ATA taskfile register set to ATA host controller,
2950 * with proper synchronization with interrupt handler and
2951 * other threads.
2952 *
2953 * LOCKING:
cca3974e 2954 * spin_lock_irqsave(host lock)
1fdffbce
JG
2955 */
2956
2957static inline void ata_tf_to_host(struct ata_port *ap,
2958 const struct ata_taskfile *tf)
2959{
2960 ap->ops->tf_load(ap, tf);
2961 ap->ops->exec_command(ap, tf);
2962}
2963
1da177e4
LT
2964/**
2965 * ata_busy_sleep - sleep until BSY clears, or timeout
2966 * @ap: port containing status register to be polled
2967 * @tmout_pat: impatience timeout
2968 * @tmout: overall timeout
2969 *
780a87f7
JG
2970 * Sleep until ATA Status register bit BSY clears,
2971 * or a timeout occurs.
2972 *
d1adc1bb
TH
2973 * LOCKING:
2974 * Kernel thread context (may sleep).
2975 *
2976 * RETURNS:
2977 * 0 on success, -errno otherwise.
1da177e4 2978 */
d1adc1bb
TH
2979int ata_busy_sleep(struct ata_port *ap,
2980 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
2981{
2982 unsigned long timer_start, timeout;
2983 u8 status;
2984
2985 status = ata_busy_wait(ap, ATA_BUSY, 300);
2986 timer_start = jiffies;
2987 timeout = timer_start + tmout_pat;
d1adc1bb
TH
2988 while (status != 0xff && (status & ATA_BUSY) &&
2989 time_before(jiffies, timeout)) {
1da177e4
LT
2990 msleep(50);
2991 status = ata_busy_wait(ap, ATA_BUSY, 3);
2992 }
2993
d1adc1bb 2994 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 2995 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
2996 "port is slow to respond, please be patient "
2997 "(Status 0x%x)\n", status);
1da177e4
LT
2998
2999 timeout = timer_start + tmout;
d1adc1bb
TH
3000 while (status != 0xff && (status & ATA_BUSY) &&
3001 time_before(jiffies, timeout)) {
1da177e4
LT
3002 msleep(50);
3003 status = ata_chk_status(ap);
3004 }
3005
d1adc1bb
TH
3006 if (status == 0xff)
3007 return -ENODEV;
3008
1da177e4 3009 if (status & ATA_BUSY) {
f15a1daf 3010 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
3011 "(%lu secs, Status 0x%x)\n",
3012 tmout / HZ, status);
d1adc1bb 3013 return -EBUSY;
1da177e4
LT
3014 }
3015
3016 return 0;
3017}
3018
d4b2bab4
TH
3019/**
3020 * ata_wait_ready - sleep until BSY clears, or timeout
3021 * @ap: port containing status register to be polled
3022 * @deadline: deadline jiffies for the operation
3023 *
3024 * Sleep until ATA Status register bit BSY clears, or timeout
3025 * occurs.
3026 *
3027 * LOCKING:
3028 * Kernel thread context (may sleep).
3029 *
3030 * RETURNS:
3031 * 0 on success, -errno otherwise.
3032 */
3033int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3034{
3035 unsigned long start = jiffies;
3036 int warned = 0;
3037
3038 while (1) {
3039 u8 status = ata_chk_status(ap);
3040 unsigned long now = jiffies;
3041
3042 if (!(status & ATA_BUSY))
3043 return 0;
936fd732 3044 if (!ata_link_online(&ap->link) && status == 0xff)
d4b2bab4
TH
3045 return -ENODEV;
3046 if (time_after(now, deadline))
3047 return -EBUSY;
3048
3049 if (!warned && time_after(now, start + 5 * HZ) &&
3050 (deadline - now > 3 * HZ)) {
3051 ata_port_printk(ap, KERN_WARNING,
3052 "port is slow to respond, please be patient "
3053 "(Status 0x%x)\n", status);
3054 warned = 1;
3055 }
3056
3057 msleep(50);
3058 }
3059}
3060
3061static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3062 unsigned long deadline)
1da177e4
LT
3063{
3064 struct ata_ioports *ioaddr = &ap->ioaddr;
3065 unsigned int dev0 = devmask & (1 << 0);
3066 unsigned int dev1 = devmask & (1 << 1);
9b89391c 3067 int rc, ret = 0;
1da177e4
LT
3068
3069 /* if device 0 was found in ata_devchk, wait for its
3070 * BSY bit to clear
3071 */
d4b2bab4
TH
3072 if (dev0) {
3073 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3074 if (rc) {
3075 if (rc != -ENODEV)
3076 return rc;
3077 ret = rc;
3078 }
d4b2bab4 3079 }
1da177e4 3080
e141d999
TH
3081 /* if device 1 was found in ata_devchk, wait for register
3082 * access briefly, then wait for BSY to clear.
1da177e4 3083 */
e141d999
TH
3084 if (dev1) {
3085 int i;
1da177e4
LT
3086
3087 ap->ops->dev_select(ap, 1);
e141d999
TH
3088
3089 /* Wait for register access. Some ATAPI devices fail
3090 * to set nsect/lbal after reset, so don't waste too
3091 * much time on it. We're gonna wait for !BSY anyway.
3092 */
3093 for (i = 0; i < 2; i++) {
3094 u8 nsect, lbal;
3095
3096 nsect = ioread8(ioaddr->nsect_addr);
3097 lbal = ioread8(ioaddr->lbal_addr);
3098 if ((nsect == 1) && (lbal == 1))
3099 break;
3100 msleep(50); /* give drive a breather */
3101 }
3102
d4b2bab4 3103 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3104 if (rc) {
3105 if (rc != -ENODEV)
3106 return rc;
3107 ret = rc;
3108 }
d4b2bab4 3109 }
1da177e4
LT
3110
3111 /* is all this really necessary? */
3112 ap->ops->dev_select(ap, 0);
3113 if (dev1)
3114 ap->ops->dev_select(ap, 1);
3115 if (dev0)
3116 ap->ops->dev_select(ap, 0);
d4b2bab4 3117
9b89391c 3118 return ret;
1da177e4
LT
3119}
3120
d4b2bab4
TH
3121static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3122 unsigned long deadline)
1da177e4
LT
3123{
3124 struct ata_ioports *ioaddr = &ap->ioaddr;
3125
44877b4e 3126 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
1da177e4
LT
3127
3128 /* software reset. causes dev0 to be selected */
0d5ff566
TH
3129 iowrite8(ap->ctl, ioaddr->ctl_addr);
3130 udelay(20); /* FIXME: flush */
3131 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3132 udelay(20); /* FIXME: flush */
3133 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3134
3135 /* spec mandates ">= 2ms" before checking status.
3136 * We wait 150ms, because that was the magic delay used for
3137 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
3138 * between when the ATA command register is written, and then
3139 * status is checked. Because waiting for "a while" before
3140 * checking status is fine, post SRST, we perform this magic
3141 * delay here as well.
09c7ad79
AC
3142 *
3143 * Old drivers/ide uses the 2mS rule and then waits for ready
1da177e4
LT
3144 */
3145 msleep(150);
3146
2e9edbf8 3147 /* Before we perform post reset processing we want to see if
298a41ca
TH
3148 * the bus shows 0xFF because the odd clown forgets the D7
3149 * pulldown resistor.
3150 */
d1adc1bb 3151 if (ata_check_status(ap) == 0xFF)
9b89391c 3152 return -ENODEV;
09c7ad79 3153
d4b2bab4 3154 return ata_bus_post_reset(ap, devmask, deadline);
1da177e4
LT
3155}
3156
3157/**
3158 * ata_bus_reset - reset host port and associated ATA channel
3159 * @ap: port to reset
3160 *
3161 * This is typically the first time we actually start issuing
3162 * commands to the ATA channel. We wait for BSY to clear, then
3163 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3164 * result. Determine what devices, if any, are on the channel
3165 * by looking at the device 0/1 error register. Look at the signature
3166 * stored in each device's taskfile registers, to determine if
3167 * the device is ATA or ATAPI.
3168 *
3169 * LOCKING:
0cba632b 3170 * PCI/etc. bus probe sem.
cca3974e 3171 * Obtains host lock.
1da177e4
LT
3172 *
3173 * SIDE EFFECTS:
198e0fed 3174 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
3175 */
3176
3177void ata_bus_reset(struct ata_port *ap)
3178{
9af5c9c9 3179 struct ata_device *device = ap->link.device;
1da177e4
LT
3180 struct ata_ioports *ioaddr = &ap->ioaddr;
3181 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3182 u8 err;
aec5c3c1 3183 unsigned int dev0, dev1 = 0, devmask = 0;
9b89391c 3184 int rc;
1da177e4 3185
44877b4e 3186 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
1da177e4
LT
3187
3188 /* determine if device 0/1 are present */
3189 if (ap->flags & ATA_FLAG_SATA_RESET)
3190 dev0 = 1;
3191 else {
3192 dev0 = ata_devchk(ap, 0);
3193 if (slave_possible)
3194 dev1 = ata_devchk(ap, 1);
3195 }
3196
3197 if (dev0)
3198 devmask |= (1 << 0);
3199 if (dev1)
3200 devmask |= (1 << 1);
3201
3202 /* select device 0 again */
3203 ap->ops->dev_select(ap, 0);
3204
3205 /* issue bus reset */
9b89391c
TH
3206 if (ap->flags & ATA_FLAG_SRST) {
3207 rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
3208 if (rc && rc != -ENODEV)
aec5c3c1 3209 goto err_out;
9b89391c 3210 }
1da177e4
LT
3211
3212 /*
3213 * determine by signature whether we have ATA or ATAPI devices
3214 */
9af5c9c9 3215 device[0].class = ata_dev_try_classify(ap, 0, &err);
1da177e4 3216 if ((slave_possible) && (err != 0x81))
9af5c9c9 3217 device[1].class = ata_dev_try_classify(ap, 1, &err);
1da177e4 3218
1da177e4 3219 /* is double-select really necessary? */
9af5c9c9 3220 if (device[1].class != ATA_DEV_NONE)
1da177e4 3221 ap->ops->dev_select(ap, 1);
9af5c9c9 3222 if (device[0].class != ATA_DEV_NONE)
1da177e4
LT
3223 ap->ops->dev_select(ap, 0);
3224
3225 /* if no devices were detected, disable this port */
9af5c9c9
TH
3226 if ((device[0].class == ATA_DEV_NONE) &&
3227 (device[1].class == ATA_DEV_NONE))
1da177e4
LT
3228 goto err_out;
3229
3230 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3231 /* set up device control for ATA_FLAG_SATA_RESET */
0d5ff566 3232 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3233 }
3234
3235 DPRINTK("EXIT\n");
3236 return;
3237
3238err_out:
f15a1daf 3239 ata_port_printk(ap, KERN_ERR, "disabling port\n");
ac8869d5 3240 ata_port_disable(ap);
1da177e4
LT
3241
3242 DPRINTK("EXIT\n");
3243}
3244
d7bb4cc7 3245/**
936fd732
TH
3246 * sata_link_debounce - debounce SATA phy status
3247 * @link: ATA link to debounce SATA phy status for
d7bb4cc7 3248 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3249 * @deadline: deadline jiffies for the operation
d7bb4cc7 3250 *
936fd732 3251* Make sure SStatus of @link reaches stable state, determined by
d7bb4cc7
TH
3252 * holding the same value where DET is not 1 for @duration polled
3253 * every @interval, before @timeout. Timeout constraints the
d4b2bab4
TH
3254 * beginning of the stable state. Because DET gets stuck at 1 on
3255 * some controllers after hot unplugging, this functions waits
d7bb4cc7
TH
3256 * until timeout then returns 0 if DET is stable at 1.
3257 *
d4b2bab4
TH
3258 * @timeout is further limited by @deadline. The sooner of the
3259 * two is used.
3260 *
d7bb4cc7
TH
3261 * LOCKING:
3262 * Kernel thread context (may sleep)
3263 *
3264 * RETURNS:
3265 * 0 on success, -errno on failure.
3266 */
936fd732
TH
3267int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3268 unsigned long deadline)
7a7921e8 3269{
d7bb4cc7 3270 unsigned long interval_msec = params[0];
d4b2bab4
TH
3271 unsigned long duration = msecs_to_jiffies(params[1]);
3272 unsigned long last_jiffies, t;
d7bb4cc7
TH
3273 u32 last, cur;
3274 int rc;
3275
d4b2bab4
TH
3276 t = jiffies + msecs_to_jiffies(params[2]);
3277 if (time_before(t, deadline))
3278 deadline = t;
3279
936fd732 3280 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3281 return rc;
3282 cur &= 0xf;
3283
3284 last = cur;
3285 last_jiffies = jiffies;
3286
3287 while (1) {
3288 msleep(interval_msec);
936fd732 3289 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3290 return rc;
3291 cur &= 0xf;
3292
3293 /* DET stable? */
3294 if (cur == last) {
d4b2bab4 3295 if (cur == 1 && time_before(jiffies, deadline))
d7bb4cc7
TH
3296 continue;
3297 if (time_after(jiffies, last_jiffies + duration))
3298 return 0;
3299 continue;
3300 }
3301
3302 /* unstable, start over */
3303 last = cur;
3304 last_jiffies = jiffies;
3305
f1545154
TH
3306 /* Check deadline. If debouncing failed, return
3307 * -EPIPE to tell upper layer to lower link speed.
3308 */
d4b2bab4 3309 if (time_after(jiffies, deadline))
f1545154 3310 return -EPIPE;
d7bb4cc7
TH
3311 }
3312}
3313
3314/**
936fd732
TH
3315 * sata_link_resume - resume SATA link
3316 * @link: ATA link to resume SATA
d7bb4cc7 3317 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3318 * @deadline: deadline jiffies for the operation
d7bb4cc7 3319 *
936fd732 3320 * Resume SATA phy @link and debounce it.
d7bb4cc7
TH
3321 *
3322 * LOCKING:
3323 * Kernel thread context (may sleep)
3324 *
3325 * RETURNS:
3326 * 0 on success, -errno on failure.
3327 */
936fd732
TH
3328int sata_link_resume(struct ata_link *link, const unsigned long *params,
3329 unsigned long deadline)
d7bb4cc7
TH
3330{
3331 u32 scontrol;
81952c54
TH
3332 int rc;
3333
936fd732 3334 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 3335 return rc;
7a7921e8 3336
852ee16a 3337 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54 3338
936fd732 3339 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54 3340 return rc;
7a7921e8 3341
d7bb4cc7
TH
3342 /* Some PHYs react badly if SStatus is pounded immediately
3343 * after resuming. Delay 200ms before debouncing.
3344 */
3345 msleep(200);
7a7921e8 3346
936fd732 3347 return sata_link_debounce(link, params, deadline);
7a7921e8
TH
3348}
3349
f5914a46
TH
3350/**
3351 * ata_std_prereset - prepare for reset
cc0680a5 3352 * @link: ATA link to be reset
d4b2bab4 3353 * @deadline: deadline jiffies for the operation
f5914a46 3354 *
cc0680a5 3355 * @link is about to be reset. Initialize it. Failure from
b8cffc6a
TH
3356 * prereset makes libata abort whole reset sequence and give up
3357 * that port, so prereset should be best-effort. It does its
3358 * best to prepare for reset sequence but if things go wrong, it
3359 * should just whine, not fail.
f5914a46
TH
3360 *
3361 * LOCKING:
3362 * Kernel thread context (may sleep)
3363 *
3364 * RETURNS:
3365 * 0 on success, -errno otherwise.
3366 */
cc0680a5 3367int ata_std_prereset(struct ata_link *link, unsigned long deadline)
f5914a46 3368{
cc0680a5 3369 struct ata_port *ap = link->ap;
936fd732 3370 struct ata_eh_context *ehc = &link->eh_context;
e9c83914 3371 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
3372 int rc;
3373
31daabda 3374 /* handle link resume */
28324304 3375 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
0c88758b 3376 (link->flags & ATA_LFLAG_HRST_TO_RESUME))
28324304
TH
3377 ehc->i.action |= ATA_EH_HARDRESET;
3378
f5914a46
TH
3379 /* if we're about to do hardreset, nothing more to do */
3380 if (ehc->i.action & ATA_EH_HARDRESET)
3381 return 0;
3382
936fd732 3383 /* if SATA, resume link */
a16abc0b 3384 if (ap->flags & ATA_FLAG_SATA) {
936fd732 3385 rc = sata_link_resume(link, timing, deadline);
b8cffc6a
TH
3386 /* whine about phy resume failure but proceed */
3387 if (rc && rc != -EOPNOTSUPP)
cc0680a5 3388 ata_link_printk(link, KERN_WARNING, "failed to resume "
f5914a46 3389 "link for reset (errno=%d)\n", rc);
f5914a46
TH
3390 }
3391
3392 /* Wait for !BSY if the controller can wait for the first D2H
3393 * Reg FIS and we don't know that no device is attached.
3394 */
0c88758b 3395 if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) {
b8cffc6a 3396 rc = ata_wait_ready(ap, deadline);
6dffaf61 3397 if (rc && rc != -ENODEV) {
cc0680a5 3398 ata_link_printk(link, KERN_WARNING, "device not ready "
b8cffc6a
TH
3399 "(errno=%d), forcing hardreset\n", rc);
3400 ehc->i.action |= ATA_EH_HARDRESET;
3401 }
3402 }
f5914a46
TH
3403
3404 return 0;
3405}
3406
c2bd5804
TH
3407/**
3408 * ata_std_softreset - reset host port via ATA SRST
cc0680a5 3409 * @link: ATA link to reset
c2bd5804 3410 * @classes: resulting classes of attached devices
d4b2bab4 3411 * @deadline: deadline jiffies for the operation
c2bd5804 3412 *
52783c5d 3413 * Reset host port using ATA SRST.
c2bd5804
TH
3414 *
3415 * LOCKING:
3416 * Kernel thread context (may sleep)
3417 *
3418 * RETURNS:
3419 * 0 on success, -errno otherwise.
3420 */
cc0680a5 3421int ata_std_softreset(struct ata_link *link, unsigned int *classes,
d4b2bab4 3422 unsigned long deadline)
c2bd5804 3423{
cc0680a5 3424 struct ata_port *ap = link->ap;
c2bd5804 3425 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
d4b2bab4
TH
3426 unsigned int devmask = 0;
3427 int rc;
c2bd5804
TH
3428 u8 err;
3429
3430 DPRINTK("ENTER\n");
3431
936fd732 3432 if (ata_link_offline(link)) {
3a39746a
TH
3433 classes[0] = ATA_DEV_NONE;
3434 goto out;
3435 }
3436
c2bd5804
TH
3437 /* determine if device 0/1 are present */
3438 if (ata_devchk(ap, 0))
3439 devmask |= (1 << 0);
3440 if (slave_possible && ata_devchk(ap, 1))
3441 devmask |= (1 << 1);
3442
c2bd5804
TH
3443 /* select device 0 again */
3444 ap->ops->dev_select(ap, 0);
3445
3446 /* issue bus reset */
3447 DPRINTK("about to softreset, devmask=%x\n", devmask);
d4b2bab4 3448 rc = ata_bus_softreset(ap, devmask, deadline);
9b89391c 3449 /* if link is occupied, -ENODEV too is an error */
936fd732 3450 if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
cc0680a5 3451 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
d4b2bab4 3452 return rc;
c2bd5804
TH
3453 }
3454
3455 /* determine by signature whether we have ATA or ATAPI devices */
3456 classes[0] = ata_dev_try_classify(ap, 0, &err);
3457 if (slave_possible && err != 0x81)
3458 classes[1] = ata_dev_try_classify(ap, 1, &err);
3459
3a39746a 3460 out:
c2bd5804
TH
3461 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3462 return 0;
3463}
3464
3465/**
cc0680a5
TH
3466 * sata_link_hardreset - reset link via SATA phy reset
3467 * @link: link to reset
b6103f6d 3468 * @timing: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3469 * @deadline: deadline jiffies for the operation
c2bd5804 3470 *
cc0680a5 3471 * SATA phy-reset @link using DET bits of SControl register.
c2bd5804
TH
3472 *
3473 * LOCKING:
3474 * Kernel thread context (may sleep)
3475 *
3476 * RETURNS:
3477 * 0 on success, -errno otherwise.
3478 */
cc0680a5 3479int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
d4b2bab4 3480 unsigned long deadline)
c2bd5804 3481{
852ee16a 3482 u32 scontrol;
81952c54 3483 int rc;
852ee16a 3484
c2bd5804
TH
3485 DPRINTK("ENTER\n");
3486
936fd732 3487 if (sata_set_spd_needed(link)) {
1c3fae4d
TH
3488 /* SATA spec says nothing about how to reconfigure
3489 * spd. To be on the safe side, turn off phy during
3490 * reconfiguration. This works for at least ICH7 AHCI
3491 * and Sil3124.
3492 */
936fd732 3493 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3494 goto out;
81952c54 3495
a34b6fc0 3496 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54 3497
936fd732 3498 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
b6103f6d 3499 goto out;
1c3fae4d 3500
936fd732 3501 sata_set_spd(link);
1c3fae4d
TH
3502 }
3503
3504 /* issue phy wake/reset */
936fd732 3505 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3506 goto out;
81952c54 3507
852ee16a 3508 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54 3509
936fd732 3510 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
b6103f6d 3511 goto out;
c2bd5804 3512
1c3fae4d 3513 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
3514 * 10.4.2 says at least 1 ms.
3515 */
3516 msleep(1);
3517
936fd732
TH
3518 /* bring link back */
3519 rc = sata_link_resume(link, timing, deadline);
b6103f6d
TH
3520 out:
3521 DPRINTK("EXIT, rc=%d\n", rc);
3522 return rc;
3523}
3524
3525/**
3526 * sata_std_hardreset - reset host port via SATA phy reset
cc0680a5 3527 * @link: link to reset
b6103f6d 3528 * @class: resulting class of attached device
d4b2bab4 3529 * @deadline: deadline jiffies for the operation
b6103f6d
TH
3530 *
3531 * SATA phy-reset host port using DET bits of SControl register,
3532 * wait for !BSY and classify the attached device.
3533 *
3534 * LOCKING:
3535 * Kernel thread context (may sleep)
3536 *
3537 * RETURNS:
3538 * 0 on success, -errno otherwise.
3539 */
cc0680a5 3540int sata_std_hardreset(struct ata_link *link, unsigned int *class,
d4b2bab4 3541 unsigned long deadline)
b6103f6d 3542{
cc0680a5 3543 struct ata_port *ap = link->ap;
936fd732 3544 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
b6103f6d
TH
3545 int rc;
3546
3547 DPRINTK("ENTER\n");
3548
3549 /* do hardreset */
cc0680a5 3550 rc = sata_link_hardreset(link, timing, deadline);
b6103f6d 3551 if (rc) {
cc0680a5 3552 ata_link_printk(link, KERN_ERR,
b6103f6d
TH
3553 "COMRESET failed (errno=%d)\n", rc);
3554 return rc;
3555 }
c2bd5804 3556
c2bd5804 3557 /* TODO: phy layer with polling, timeouts, etc. */
936fd732 3558 if (ata_link_offline(link)) {
c2bd5804
TH
3559 *class = ATA_DEV_NONE;
3560 DPRINTK("EXIT, link offline\n");
3561 return 0;
3562 }
3563
34fee227
TH
3564 /* wait a while before checking status, see SRST for more info */
3565 msleep(150);
3566
d4b2bab4 3567 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3568 /* link occupied, -ENODEV too is an error */
3569 if (rc) {
cc0680a5 3570 ata_link_printk(link, KERN_ERR,
d4b2bab4
TH
3571 "COMRESET failed (errno=%d)\n", rc);
3572 return rc;
c2bd5804
TH
3573 }
3574
3a39746a
TH
3575 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3576
c2bd5804
TH
3577 *class = ata_dev_try_classify(ap, 0, NULL);
3578
3579 DPRINTK("EXIT, class=%u\n", *class);
3580 return 0;
3581}
3582
3583/**
3584 * ata_std_postreset - standard postreset callback
cc0680a5 3585 * @link: the target ata_link
c2bd5804
TH
3586 * @classes: classes of attached devices
3587 *
3588 * This function is invoked after a successful reset. Note that
3589 * the device might have been reset more than once using
3590 * different reset methods before postreset is invoked.
c2bd5804 3591 *
c2bd5804
TH
3592 * LOCKING:
3593 * Kernel thread context (may sleep)
3594 */
cc0680a5 3595void ata_std_postreset(struct ata_link *link, unsigned int *classes)
c2bd5804 3596{
cc0680a5 3597 struct ata_port *ap = link->ap;
dc2b3515
TH
3598 u32 serror;
3599
c2bd5804
TH
3600 DPRINTK("ENTER\n");
3601
c2bd5804 3602 /* print link status */
936fd732 3603 sata_print_link_status(link);
c2bd5804 3604
dc2b3515 3605 /* clear SError */
936fd732
TH
3606 if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
3607 sata_scr_write(link, SCR_ERROR, serror);
dc2b3515 3608
c2bd5804
TH
3609 /* is double-select really necessary? */
3610 if (classes[0] != ATA_DEV_NONE)
3611 ap->ops->dev_select(ap, 1);
3612 if (classes[1] != ATA_DEV_NONE)
3613 ap->ops->dev_select(ap, 0);
3614
3a39746a
TH
3615 /* bail out if no device is present */
3616 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3617 DPRINTK("EXIT, no device\n");
3618 return;
3619 }
3620
3621 /* set up device control */
0d5ff566
TH
3622 if (ap->ioaddr.ctl_addr)
3623 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
c2bd5804
TH
3624
3625 DPRINTK("EXIT\n");
3626}
3627
623a3128
TH
3628/**
3629 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3630 * @dev: device to compare against
3631 * @new_class: class of the new device
3632 * @new_id: IDENTIFY page of the new device
3633 *
3634 * Compare @new_class and @new_id against @dev and determine
3635 * whether @dev is the device indicated by @new_class and
3636 * @new_id.
3637 *
3638 * LOCKING:
3639 * None.
3640 *
3641 * RETURNS:
3642 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3643 */
3373efd8
TH
3644static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3645 const u16 *new_id)
623a3128
TH
3646{
3647 const u16 *old_id = dev->id;
a0cf733b
TH
3648 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3649 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
3650
3651 if (dev->class != new_class) {
f15a1daf
TH
3652 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3653 dev->class, new_class);
623a3128
TH
3654 return 0;
3655 }
3656
a0cf733b
TH
3657 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3658 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3659 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3660 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
3661
3662 if (strcmp(model[0], model[1])) {
f15a1daf
TH
3663 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3664 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
3665 return 0;
3666 }
3667
3668 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
3669 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3670 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
3671 return 0;
3672 }
3673
623a3128
TH
3674 return 1;
3675}
3676
3677/**
fe30911b 3678 * ata_dev_reread_id - Re-read IDENTIFY data
3fae450c 3679 * @dev: target ATA device
bff04647 3680 * @readid_flags: read ID flags
623a3128
TH
3681 *
3682 * Re-read IDENTIFY page and make sure @dev is still attached to
3683 * the port.
3684 *
3685 * LOCKING:
3686 * Kernel thread context (may sleep)
3687 *
3688 * RETURNS:
3689 * 0 on success, negative errno otherwise
3690 */
fe30911b 3691int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
623a3128 3692{
5eb45c02 3693 unsigned int class = dev->class;
9af5c9c9 3694 u16 *id = (void *)dev->link->ap->sector_buf;
623a3128
TH
3695 int rc;
3696
fe635c7e 3697 /* read ID data */
bff04647 3698 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128 3699 if (rc)
fe30911b 3700 return rc;
623a3128
TH
3701
3702 /* is the device still there? */
fe30911b
TH
3703 if (!ata_dev_same_device(dev, class, id))
3704 return -ENODEV;
623a3128 3705
fe635c7e 3706 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
fe30911b
TH
3707 return 0;
3708}
3709
3710/**
3711 * ata_dev_revalidate - Revalidate ATA device
3712 * @dev: device to revalidate
3713 * @readid_flags: read ID flags
3714 *
3715 * Re-read IDENTIFY page, make sure @dev is still attached to the
3716 * port and reconfigure it according to the new IDENTIFY page.
3717 *
3718 * LOCKING:
3719 * Kernel thread context (may sleep)
3720 *
3721 * RETURNS:
3722 * 0 on success, negative errno otherwise
3723 */
3724int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
3725{
6ddcd3b0 3726 u64 n_sectors = dev->n_sectors;
fe30911b
TH
3727 int rc;
3728
3729 if (!ata_dev_enabled(dev))
3730 return -ENODEV;
3731
3732 /* re-read ID */
3733 rc = ata_dev_reread_id(dev, readid_flags);
3734 if (rc)
3735 goto fail;
623a3128
TH
3736
3737 /* configure device according to the new ID */
efdaedc4 3738 rc = ata_dev_configure(dev);
6ddcd3b0
TH
3739 if (rc)
3740 goto fail;
3741
3742 /* verify n_sectors hasn't changed */
b54eebd6
TH
3743 if (dev->class == ATA_DEV_ATA && n_sectors &&
3744 dev->n_sectors != n_sectors) {
6ddcd3b0
TH
3745 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3746 "%llu != %llu\n",
3747 (unsigned long long)n_sectors,
3748 (unsigned long long)dev->n_sectors);
8270bec4
TH
3749
3750 /* restore original n_sectors */
3751 dev->n_sectors = n_sectors;
3752
6ddcd3b0
TH
3753 rc = -ENODEV;
3754 goto fail;
3755 }
3756
3757 return 0;
623a3128
TH
3758
3759 fail:
f15a1daf 3760 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
3761 return rc;
3762}
3763
6919a0a6
AC
3764struct ata_blacklist_entry {
3765 const char *model_num;
3766 const char *model_rev;
3767 unsigned long horkage;
3768};
3769
3770static const struct ata_blacklist_entry ata_device_blacklist [] = {
3771 /* Devices with DMA related problems under Linux */
3772 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3773 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3774 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3775 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3776 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3777 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3778 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3779 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3780 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3781 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3782 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3783 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3784 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3785 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3786 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3787 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3788 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3789 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3790 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3791 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3792 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3793 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3794 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3795 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3796 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3797 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
3798 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3799 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3800 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
39f19886 3801 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
5acd50f6 3802 { "IOMEGA ZIP 250 ATAPI", NULL, ATA_HORKAGE_NODMA }, /* temporary fix */
39ce7128
TH
3803 { "IOMEGA ZIP 250 ATAPI Floppy",
3804 NULL, ATA_HORKAGE_NODMA },
6919a0a6 3805
18d6e9d5 3806 /* Weird ATAPI devices */
40a1d531 3807 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
18d6e9d5 3808
6919a0a6
AC
3809 /* Devices we expect to fail diagnostics */
3810
3811 /* Devices where NCQ should be avoided */
3812 /* NCQ is slow */
3813 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
09125ea6
TH
3814 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3815 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30
PR
3816 /* NCQ is broken */
3817 { "Maxtor 6L250S0", "BANC1G10", ATA_HORKAGE_NONCQ },
e8361fc4 3818 { "Maxtor 6B200M0", "BANC1BM0", ATA_HORKAGE_NONCQ },
471e44b2 3819 { "Maxtor 6B200M0", "BANC1B10", ATA_HORKAGE_NONCQ },
0e3dbc01
AC
3820 { "Maxtor 7B250S0", "BANC1B70", ATA_HORKAGE_NONCQ, },
3821 { "Maxtor 7B300S0", "BANC1B70", ATA_HORKAGE_NONCQ },
3822 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
2f8d90ab
PB
3823 { "HITACHI HDS7250SASUN500G 0621KTAWSD", "K2AOAJ0AHITACHI",
3824 ATA_HORKAGE_NONCQ },
96442925
JA
3825 /* NCQ hard hangs device under heavier load, needs hard power cycle */
3826 { "Maxtor 6B250S0", "BANC1B70", ATA_HORKAGE_NONCQ },
36e337d0
RH
3827 /* Blacklist entries taken from Silicon Image 3124/3132
3828 Windows driver .inf file - also several Linux problem reports */
3829 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
3830 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
3831 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
bd9c5a39
TH
3832 /* Drives which do spurious command completion */
3833 { "HTS541680J9SA00", "SB2IC7EP", ATA_HORKAGE_NONCQ, },
2f8fcebb 3834 { "HTS541612J9SA00", "SBDIC7JP", ATA_HORKAGE_NONCQ, },
e14cbfa6 3835 { "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, },
2f8fcebb 3836 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
a520f261 3837 { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ, },
3fb6589c 3838 { "ST9160821AS", "3.CLF", ATA_HORKAGE_NONCQ, },
0e3dbc01 3839 { "ST3160812AS", "3.AD", ATA_HORKAGE_NONCQ, },
5d6aca8d 3840 { "SAMSUNG HD401LJ", "ZZ100-15", ATA_HORKAGE_NONCQ, },
6919a0a6 3841
16c55b03
TH
3842 /* devices which puke on READ_NATIVE_MAX */
3843 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
3844 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
3845 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
3846 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
6919a0a6
AC
3847
3848 /* End Marker */
3849 { }
1da177e4 3850};
2e9edbf8 3851
75683fe7 3852static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
1da177e4 3853{
8bfa79fc
TH
3854 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3855 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 3856 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 3857
8bfa79fc
TH
3858 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3859 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 3860
6919a0a6 3861 while (ad->model_num) {
8bfa79fc 3862 if (!strcmp(ad->model_num, model_num)) {
6919a0a6
AC
3863 if (ad->model_rev == NULL)
3864 return ad->horkage;
8bfa79fc 3865 if (!strcmp(ad->model_rev, model_rev))
6919a0a6 3866 return ad->horkage;
f4b15fef 3867 }
6919a0a6 3868 ad++;
f4b15fef 3869 }
1da177e4
LT
3870 return 0;
3871}
3872
6919a0a6
AC
3873static int ata_dma_blacklisted(const struct ata_device *dev)
3874{
3875 /* We don't support polling DMA.
3876 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3877 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3878 */
9af5c9c9 3879 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
6919a0a6
AC
3880 (dev->flags & ATA_DFLAG_CDB_INTR))
3881 return 1;
75683fe7 3882 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
6919a0a6
AC
3883}
3884
a6d5a51c
TH
3885/**
3886 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
3887 * @dev: Device to compute xfermask for
3888 *
acf356b1
TH
3889 * Compute supported xfermask of @dev and store it in
3890 * dev->*_mask. This function is responsible for applying all
3891 * known limits including host controller limits, device
3892 * blacklist, etc...
a6d5a51c
TH
3893 *
3894 * LOCKING:
3895 * None.
a6d5a51c 3896 */
3373efd8 3897static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 3898{
9af5c9c9
TH
3899 struct ata_link *link = dev->link;
3900 struct ata_port *ap = link->ap;
cca3974e 3901 struct ata_host *host = ap->host;
a6d5a51c 3902 unsigned long xfer_mask;
1da177e4 3903
37deecb5 3904 /* controller modes available */
565083e1
TH
3905 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3906 ap->mwdma_mask, ap->udma_mask);
3907
8343f889 3908 /* drive modes available */
37deecb5
TH
3909 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3910 dev->mwdma_mask, dev->udma_mask);
3911 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 3912
b352e57d
AC
3913 /*
3914 * CFA Advanced TrueIDE timings are not allowed on a shared
3915 * cable
3916 */
3917 if (ata_dev_pair(dev)) {
3918 /* No PIO5 or PIO6 */
3919 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3920 /* No MWDMA3 or MWDMA 4 */
3921 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3922 }
3923
37deecb5
TH
3924 if (ata_dma_blacklisted(dev)) {
3925 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
3926 ata_dev_printk(dev, KERN_WARNING,
3927 "device is on DMA blacklist, disabling DMA\n");
37deecb5 3928 }
a6d5a51c 3929
14d66ab7
PV
3930 if ((host->flags & ATA_HOST_SIMPLEX) &&
3931 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5
TH
3932 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3933 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3934 "other device, disabling DMA\n");
5444a6f4 3935 }
565083e1 3936
e424675f
JG
3937 if (ap->flags & ATA_FLAG_NO_IORDY)
3938 xfer_mask &= ata_pio_mask_no_iordy(dev);
3939
5444a6f4 3940 if (ap->ops->mode_filter)
a76b62ca 3941 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
5444a6f4 3942
8343f889
RH
3943 /* Apply cable rule here. Don't apply it early because when
3944 * we handle hot plug the cable type can itself change.
3945 * Check this last so that we know if the transfer rate was
3946 * solely limited by the cable.
3947 * Unknown or 80 wire cables reported host side are checked
3948 * drive side as well. Cases where we know a 40wire cable
3949 * is used safely for 80 are not checked here.
3950 */
3951 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
3952 /* UDMA/44 or higher would be available */
3953 if((ap->cbl == ATA_CBL_PATA40) ||
3954 (ata_drive_40wire(dev->id) &&
3955 (ap->cbl == ATA_CBL_PATA_UNK ||
3956 ap->cbl == ATA_CBL_PATA80))) {
3957 ata_dev_printk(dev, KERN_WARNING,
3958 "limited to UDMA/33 due to 40-wire cable\n");
3959 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3960 }
3961
565083e1
TH
3962 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3963 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
3964}
3965
1da177e4
LT
3966/**
3967 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
3968 * @dev: Device to which command will be sent
3969 *
780a87f7
JG
3970 * Issue SET FEATURES - XFER MODE command to device @dev
3971 * on port @ap.
3972 *
1da177e4 3973 * LOCKING:
0cba632b 3974 * PCI/etc. bus probe sem.
83206a29
TH
3975 *
3976 * RETURNS:
3977 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
3978 */
3979
3373efd8 3980static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 3981{
a0123703 3982 struct ata_taskfile tf;
83206a29 3983 unsigned int err_mask;
1da177e4
LT
3984
3985 /* set up set-features taskfile */
3986 DPRINTK("set features - xfer mode\n");
3987
464cf177
TH
3988 /* Some controllers and ATAPI devices show flaky interrupt
3989 * behavior after setting xfer mode. Use polling instead.
3990 */
3373efd8 3991 ata_tf_init(dev, &tf);
a0123703
TH
3992 tf.command = ATA_CMD_SET_FEATURES;
3993 tf.feature = SETFEATURES_XFER;
464cf177 3994 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
a0123703
TH
3995 tf.protocol = ATA_PROT_NODATA;
3996 tf.nsect = dev->xfer_mode;
1da177e4 3997
3373efd8 3998 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
9f45cbd3
KCA
3999
4000 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4001 return err_mask;
4002}
4003
4004/**
4005 * ata_dev_set_AN - Issue SET FEATURES - SATA FEATURES
4006 * @dev: Device to which command will be sent
4007 * @enable: Whether to enable or disable the feature
4008 *
4009 * Issue SET FEATURES - SATA FEATURES command to device @dev
4010 * on port @ap with sector count set to indicate Asynchronous
4011 * Notification feature
4012 *
4013 * LOCKING:
4014 * PCI/etc. bus probe sem.
4015 *
4016 * RETURNS:
4017 * 0 on success, AC_ERR_* mask otherwise.
4018 */
4019static unsigned int ata_dev_set_AN(struct ata_device *dev, u8 enable)
4020{
4021 struct ata_taskfile tf;
4022 unsigned int err_mask;
4023
4024 /* set up set-features taskfile */
4025 DPRINTK("set features - SATA features\n");
4026
4027 ata_tf_init(dev, &tf);
4028 tf.command = ATA_CMD_SET_FEATURES;
4029 tf.feature = enable;
4030 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4031 tf.protocol = ATA_PROT_NODATA;
4032 tf.nsect = SATA_AN;
4033
4034 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1da177e4 4035
83206a29
TH
4036 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4037 return err_mask;
1da177e4
LT
4038}
4039
8bf62ece
AL
4040/**
4041 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 4042 * @dev: Device to which command will be sent
e2a7f77a
RD
4043 * @heads: Number of heads (taskfile parameter)
4044 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
4045 *
4046 * LOCKING:
6aff8f1f
TH
4047 * Kernel thread context (may sleep)
4048 *
4049 * RETURNS:
4050 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 4051 */
3373efd8
TH
4052static unsigned int ata_dev_init_params(struct ata_device *dev,
4053 u16 heads, u16 sectors)
8bf62ece 4054{
a0123703 4055 struct ata_taskfile tf;
6aff8f1f 4056 unsigned int err_mask;
8bf62ece
AL
4057
4058 /* Number of sectors per track 1-255. Number of heads 1-16 */
4059 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 4060 return AC_ERR_INVALID;
8bf62ece
AL
4061
4062 /* set up init dev params taskfile */
4063 DPRINTK("init dev params \n");
4064
3373efd8 4065 ata_tf_init(dev, &tf);
a0123703
TH
4066 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4067 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4068 tf.protocol = ATA_PROT_NODATA;
4069 tf.nsect = sectors;
4070 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 4071
3373efd8 4072 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
18b2466c
AC
4073 /* A clean abort indicates an original or just out of spec drive
4074 and we should continue as we issue the setup based on the
4075 drive reported working geometry */
4076 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4077 err_mask = 0;
8bf62ece 4078
6aff8f1f
TH
4079 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4080 return err_mask;
8bf62ece
AL
4081}
4082
1da177e4 4083/**
0cba632b
JG
4084 * ata_sg_clean - Unmap DMA memory associated with command
4085 * @qc: Command containing DMA memory to be released
4086 *
4087 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
4088 *
4089 * LOCKING:
cca3974e 4090 * spin_lock_irqsave(host lock)
1da177e4 4091 */
70e6ad0c 4092void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
4093{
4094 struct ata_port *ap = qc->ap;
cedc9a47 4095 struct scatterlist *sg = qc->__sg;
1da177e4 4096 int dir = qc->dma_dir;
cedc9a47 4097 void *pad_buf = NULL;
1da177e4 4098
a4631474
TH
4099 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
4100 WARN_ON(sg == NULL);
1da177e4
LT
4101
4102 if (qc->flags & ATA_QCFLAG_SINGLE)
f131883e 4103 WARN_ON(qc->n_elem > 1);
1da177e4 4104
2c13b7ce 4105 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 4106
cedc9a47
JG
4107 /* if we padded the buffer out to 32-bit bound, and data
4108 * xfer direction is from-device, we must copy from the
4109 * pad buffer back into the supplied buffer
4110 */
4111 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4112 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4113
4114 if (qc->flags & ATA_QCFLAG_SG) {
e1410f2d 4115 if (qc->n_elem)
2f1f610b 4116 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
cedc9a47
JG
4117 /* restore last sg */
4118 sg[qc->orig_n_elem - 1].length += qc->pad_len;
4119 if (pad_buf) {
4120 struct scatterlist *psg = &qc->pad_sgent;
4121 void *addr = kmap_atomic(psg->page, KM_IRQ0);
4122 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
dfa15988 4123 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4124 }
4125 } else {
2e242fa9 4126 if (qc->n_elem)
2f1f610b 4127 dma_unmap_single(ap->dev,
e1410f2d
JG
4128 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
4129 dir);
cedc9a47
JG
4130 /* restore sg */
4131 sg->length += qc->pad_len;
4132 if (pad_buf)
4133 memcpy(qc->buf_virt + sg->length - qc->pad_len,
4134 pad_buf, qc->pad_len);
4135 }
1da177e4
LT
4136
4137 qc->flags &= ~ATA_QCFLAG_DMAMAP;
cedc9a47 4138 qc->__sg = NULL;
1da177e4
LT
4139}
4140
4141/**
4142 * ata_fill_sg - Fill PCI IDE PRD table
4143 * @qc: Metadata associated with taskfile to be transferred
4144 *
780a87f7
JG
4145 * Fill PCI IDE PRD (scatter-gather) table with segments
4146 * associated with the current disk command.
4147 *
1da177e4 4148 * LOCKING:
cca3974e 4149 * spin_lock_irqsave(host lock)
1da177e4
LT
4150 *
4151 */
4152static void ata_fill_sg(struct ata_queued_cmd *qc)
4153{
1da177e4 4154 struct ata_port *ap = qc->ap;
cedc9a47
JG
4155 struct scatterlist *sg;
4156 unsigned int idx;
1da177e4 4157
a4631474 4158 WARN_ON(qc->__sg == NULL);
f131883e 4159 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1da177e4
LT
4160
4161 idx = 0;
cedc9a47 4162 ata_for_each_sg(sg, qc) {
1da177e4
LT
4163 u32 addr, offset;
4164 u32 sg_len, len;
4165
4166 /* determine if physical DMA addr spans 64K boundary.
4167 * Note h/w doesn't support 64-bit, so we unconditionally
4168 * truncate dma_addr_t to u32.
4169 */
4170 addr = (u32) sg_dma_address(sg);
4171 sg_len = sg_dma_len(sg);
4172
4173 while (sg_len) {
4174 offset = addr & 0xffff;
4175 len = sg_len;
4176 if ((offset + sg_len) > 0x10000)
4177 len = 0x10000 - offset;
4178
4179 ap->prd[idx].addr = cpu_to_le32(addr);
4180 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
4181 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4182
4183 idx++;
4184 sg_len -= len;
4185 addr += len;
4186 }
4187 }
4188
4189 if (idx)
4190 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4191}
b9a4197e 4192
d26fc955
AC
4193/**
4194 * ata_fill_sg_dumb - Fill PCI IDE PRD table
4195 * @qc: Metadata associated with taskfile to be transferred
4196 *
4197 * Fill PCI IDE PRD (scatter-gather) table with segments
4198 * associated with the current disk command. Perform the fill
4199 * so that we avoid writing any length 64K records for
4200 * controllers that don't follow the spec.
4201 *
4202 * LOCKING:
4203 * spin_lock_irqsave(host lock)
4204 *
4205 */
4206static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4207{
4208 struct ata_port *ap = qc->ap;
4209 struct scatterlist *sg;
4210 unsigned int idx;
4211
4212 WARN_ON(qc->__sg == NULL);
4213 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4214
4215 idx = 0;
4216 ata_for_each_sg(sg, qc) {
4217 u32 addr, offset;
4218 u32 sg_len, len, blen;
4219
4220 /* determine if physical DMA addr spans 64K boundary.
4221 * Note h/w doesn't support 64-bit, so we unconditionally
4222 * truncate dma_addr_t to u32.
4223 */
4224 addr = (u32) sg_dma_address(sg);
4225 sg_len = sg_dma_len(sg);
4226
4227 while (sg_len) {
4228 offset = addr & 0xffff;
4229 len = sg_len;
4230 if ((offset + sg_len) > 0x10000)
4231 len = 0x10000 - offset;
4232
4233 blen = len & 0xffff;
4234 ap->prd[idx].addr = cpu_to_le32(addr);
4235 if (blen == 0) {
4236 /* Some PATA chipsets like the CS5530 can't
4237 cope with 0x0000 meaning 64K as the spec says */
4238 ap->prd[idx].flags_len = cpu_to_le32(0x8000);
4239 blen = 0x8000;
4240 ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000);
4241 }
4242 ap->prd[idx].flags_len = cpu_to_le32(blen);
4243 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4244
4245 idx++;
4246 sg_len -= len;
4247 addr += len;
4248 }
4249 }
4250
4251 if (idx)
4252 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4253}
4254
1da177e4
LT
4255/**
4256 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4257 * @qc: Metadata associated with taskfile to check
4258 *
780a87f7
JG
4259 * Allow low-level driver to filter ATA PACKET commands, returning
4260 * a status indicating whether or not it is OK to use DMA for the
4261 * supplied PACKET command.
4262 *
1da177e4 4263 * LOCKING:
cca3974e 4264 * spin_lock_irqsave(host lock)
0cba632b 4265 *
1da177e4
LT
4266 * RETURNS: 0 when ATAPI DMA can be used
4267 * nonzero otherwise
4268 */
4269int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4270{
4271 struct ata_port *ap = qc->ap;
b9a4197e
TH
4272
4273 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4274 * few ATAPI devices choke on such DMA requests.
4275 */
4276 if (unlikely(qc->nbytes & 15))
4277 return 1;
6f23a31d 4278
1da177e4 4279 if (ap->ops->check_atapi_dma)
b9a4197e 4280 return ap->ops->check_atapi_dma(qc);
1da177e4 4281
b9a4197e 4282 return 0;
1da177e4 4283}
b9a4197e 4284
1da177e4
LT
4285/**
4286 * ata_qc_prep - Prepare taskfile for submission
4287 * @qc: Metadata associated with taskfile to be prepared
4288 *
780a87f7
JG
4289 * Prepare ATA taskfile for submission.
4290 *
1da177e4 4291 * LOCKING:
cca3974e 4292 * spin_lock_irqsave(host lock)
1da177e4
LT
4293 */
4294void ata_qc_prep(struct ata_queued_cmd *qc)
4295{
4296 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4297 return;
4298
4299 ata_fill_sg(qc);
4300}
4301
d26fc955
AC
4302/**
4303 * ata_dumb_qc_prep - Prepare taskfile for submission
4304 * @qc: Metadata associated with taskfile to be prepared
4305 *
4306 * Prepare ATA taskfile for submission.
4307 *
4308 * LOCKING:
4309 * spin_lock_irqsave(host lock)
4310 */
4311void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4312{
4313 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4314 return;
4315
4316 ata_fill_sg_dumb(qc);
4317}
4318
e46834cd
BK
4319void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4320
0cba632b
JG
4321/**
4322 * ata_sg_init_one - Associate command with memory buffer
4323 * @qc: Command to be associated
4324 * @buf: Memory buffer
4325 * @buflen: Length of memory buffer, in bytes.
4326 *
4327 * Initialize the data-related elements of queued_cmd @qc
4328 * to point to a single memory buffer, @buf of byte length @buflen.
4329 *
4330 * LOCKING:
cca3974e 4331 * spin_lock_irqsave(host lock)
0cba632b
JG
4332 */
4333
1da177e4
LT
4334void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4335{
1da177e4
LT
4336 qc->flags |= ATA_QCFLAG_SINGLE;
4337
cedc9a47 4338 qc->__sg = &qc->sgent;
1da177e4 4339 qc->n_elem = 1;
cedc9a47 4340 qc->orig_n_elem = 1;
1da177e4 4341 qc->buf_virt = buf;
233277ca 4342 qc->nbytes = buflen;
1da177e4 4343
61c0596c 4344 sg_init_one(&qc->sgent, buf, buflen);
1da177e4
LT
4345}
4346
0cba632b
JG
4347/**
4348 * ata_sg_init - Associate command with scatter-gather table.
4349 * @qc: Command to be associated
4350 * @sg: Scatter-gather table.
4351 * @n_elem: Number of elements in s/g table.
4352 *
4353 * Initialize the data-related elements of queued_cmd @qc
4354 * to point to a scatter-gather table @sg, containing @n_elem
4355 * elements.
4356 *
4357 * LOCKING:
cca3974e 4358 * spin_lock_irqsave(host lock)
0cba632b
JG
4359 */
4360
1da177e4
LT
4361void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4362 unsigned int n_elem)
4363{
4364 qc->flags |= ATA_QCFLAG_SG;
cedc9a47 4365 qc->__sg = sg;
1da177e4 4366 qc->n_elem = n_elem;
cedc9a47 4367 qc->orig_n_elem = n_elem;
1da177e4
LT
4368}
4369
4370/**
0cba632b
JG
4371 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4372 * @qc: Command with memory buffer to be mapped.
4373 *
4374 * DMA-map the memory buffer associated with queued_cmd @qc.
1da177e4
LT
4375 *
4376 * LOCKING:
cca3974e 4377 * spin_lock_irqsave(host lock)
1da177e4
LT
4378 *
4379 * RETURNS:
0cba632b 4380 * Zero on success, negative on error.
1da177e4
LT
4381 */
4382
4383static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4384{
4385 struct ata_port *ap = qc->ap;
4386 int dir = qc->dma_dir;
cedc9a47 4387 struct scatterlist *sg = qc->__sg;
1da177e4 4388 dma_addr_t dma_address;
2e242fa9 4389 int trim_sg = 0;
1da177e4 4390
cedc9a47
JG
4391 /* we must lengthen transfers to end on a 32-bit boundary */
4392 qc->pad_len = sg->length & 3;
4393 if (qc->pad_len) {
4394 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4395 struct scatterlist *psg = &qc->pad_sgent;
4396
a4631474 4397 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4398
4399 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4400
4401 if (qc->tf.flags & ATA_TFLAG_WRITE)
4402 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
4403 qc->pad_len);
4404
4405 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4406 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4407 /* trim sg */
4408 sg->length -= qc->pad_len;
2e242fa9
TH
4409 if (sg->length == 0)
4410 trim_sg = 1;
cedc9a47
JG
4411
4412 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4413 sg->length, qc->pad_len);
4414 }
4415
2e242fa9
TH
4416 if (trim_sg) {
4417 qc->n_elem--;
e1410f2d
JG
4418 goto skip_map;
4419 }
4420
2f1f610b 4421 dma_address = dma_map_single(ap->dev, qc->buf_virt,
32529e01 4422 sg->length, dir);
537a95d9
TH
4423 if (dma_mapping_error(dma_address)) {
4424 /* restore sg */
4425 sg->length += qc->pad_len;
1da177e4 4426 return -1;
537a95d9 4427 }
1da177e4
LT
4428
4429 sg_dma_address(sg) = dma_address;
32529e01 4430 sg_dma_len(sg) = sg->length;
1da177e4 4431
2e242fa9 4432skip_map:
1da177e4
LT
4433 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
4434 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4435
4436 return 0;
4437}
4438
4439/**
0cba632b
JG
4440 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4441 * @qc: Command with scatter-gather table to be mapped.
4442 *
4443 * DMA-map the scatter-gather table associated with queued_cmd @qc.
1da177e4
LT
4444 *
4445 * LOCKING:
cca3974e 4446 * spin_lock_irqsave(host lock)
1da177e4
LT
4447 *
4448 * RETURNS:
0cba632b 4449 * Zero on success, negative on error.
1da177e4
LT
4450 *
4451 */
4452
4453static int ata_sg_setup(struct ata_queued_cmd *qc)
4454{
4455 struct ata_port *ap = qc->ap;
cedc9a47
JG
4456 struct scatterlist *sg = qc->__sg;
4457 struct scatterlist *lsg = &sg[qc->n_elem - 1];
e1410f2d 4458 int n_elem, pre_n_elem, dir, trim_sg = 0;
1da177e4 4459
44877b4e 4460 VPRINTK("ENTER, ata%u\n", ap->print_id);
a4631474 4461 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
1da177e4 4462
cedc9a47
JG
4463 /* we must lengthen transfers to end on a 32-bit boundary */
4464 qc->pad_len = lsg->length & 3;
4465 if (qc->pad_len) {
4466 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4467 struct scatterlist *psg = &qc->pad_sgent;
4468 unsigned int offset;
4469
a4631474 4470 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4471
4472 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4473
4474 /*
4475 * psg->page/offset are used to copy to-be-written
4476 * data in this function or read data in ata_sg_clean.
4477 */
4478 offset = lsg->offset + lsg->length - qc->pad_len;
4479 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
4480 psg->offset = offset_in_page(offset);
4481
4482 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4483 void *addr = kmap_atomic(psg->page, KM_IRQ0);
4484 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
dfa15988 4485 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4486 }
4487
4488 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4489 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4490 /* trim last sg */
4491 lsg->length -= qc->pad_len;
e1410f2d
JG
4492 if (lsg->length == 0)
4493 trim_sg = 1;
cedc9a47
JG
4494
4495 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4496 qc->n_elem - 1, lsg->length, qc->pad_len);
4497 }
4498
e1410f2d
JG
4499 pre_n_elem = qc->n_elem;
4500 if (trim_sg && pre_n_elem)
4501 pre_n_elem--;
4502
4503 if (!pre_n_elem) {
4504 n_elem = 0;
4505 goto skip_map;
4506 }
4507
1da177e4 4508 dir = qc->dma_dir;
2f1f610b 4509 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
537a95d9
TH
4510 if (n_elem < 1) {
4511 /* restore last sg */
4512 lsg->length += qc->pad_len;
1da177e4 4513 return -1;
537a95d9 4514 }
1da177e4
LT
4515
4516 DPRINTK("%d sg elements mapped\n", n_elem);
4517
e1410f2d 4518skip_map:
1da177e4
LT
4519 qc->n_elem = n_elem;
4520
4521 return 0;
4522}
4523
0baab86b 4524/**
c893a3ae 4525 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
4526 * @buf: Buffer to swap
4527 * @buf_words: Number of 16-bit words in buffer.
4528 *
4529 * Swap halves of 16-bit words if needed to convert from
4530 * little-endian byte order to native cpu byte order, or
4531 * vice-versa.
4532 *
4533 * LOCKING:
6f0ef4fa 4534 * Inherited from caller.
0baab86b 4535 */
1da177e4
LT
4536void swap_buf_le16(u16 *buf, unsigned int buf_words)
4537{
4538#ifdef __BIG_ENDIAN
4539 unsigned int i;
4540
4541 for (i = 0; i < buf_words; i++)
4542 buf[i] = le16_to_cpu(buf[i]);
4543#endif /* __BIG_ENDIAN */
4544}
4545
6ae4cfb5 4546/**
0d5ff566 4547 * ata_data_xfer - Transfer data by PIO
a6b2c5d4 4548 * @adev: device to target
6ae4cfb5
AL
4549 * @buf: data buffer
4550 * @buflen: buffer length
344babaa 4551 * @write_data: read/write
6ae4cfb5
AL
4552 *
4553 * Transfer data from/to the device data register by PIO.
4554 *
4555 * LOCKING:
4556 * Inherited from caller.
6ae4cfb5 4557 */
0d5ff566
TH
4558void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
4559 unsigned int buflen, int write_data)
1da177e4 4560{
9af5c9c9 4561 struct ata_port *ap = adev->link->ap;
6ae4cfb5 4562 unsigned int words = buflen >> 1;
1da177e4 4563
6ae4cfb5 4564 /* Transfer multiple of 2 bytes */
1da177e4 4565 if (write_data)
0d5ff566 4566 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
1da177e4 4567 else
0d5ff566 4568 ioread16_rep(ap->ioaddr.data_addr, buf, words);
6ae4cfb5
AL
4569
4570 /* Transfer trailing 1 byte, if any. */
4571 if (unlikely(buflen & 0x01)) {
4572 u16 align_buf[1] = { 0 };
4573 unsigned char *trailing_buf = buf + buflen - 1;
4574
4575 if (write_data) {
4576 memcpy(align_buf, trailing_buf, 1);
0d5ff566 4577 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
6ae4cfb5 4578 } else {
0d5ff566 4579 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
6ae4cfb5
AL
4580 memcpy(trailing_buf, align_buf, 1);
4581 }
4582 }
1da177e4
LT
4583}
4584
75e99585 4585/**
0d5ff566 4586 * ata_data_xfer_noirq - Transfer data by PIO
75e99585
AC
4587 * @adev: device to target
4588 * @buf: data buffer
4589 * @buflen: buffer length
4590 * @write_data: read/write
4591 *
88574551 4592 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
4593 * transfer with interrupts disabled.
4594 *
4595 * LOCKING:
4596 * Inherited from caller.
4597 */
0d5ff566
TH
4598void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
4599 unsigned int buflen, int write_data)
75e99585
AC
4600{
4601 unsigned long flags;
4602 local_irq_save(flags);
0d5ff566 4603 ata_data_xfer(adev, buf, buflen, write_data);
75e99585
AC
4604 local_irq_restore(flags);
4605}
4606
4607
6ae4cfb5 4608/**
5a5dbd18 4609 * ata_pio_sector - Transfer a sector of data.
6ae4cfb5
AL
4610 * @qc: Command on going
4611 *
5a5dbd18 4612 * Transfer qc->sect_size bytes of data from/to the ATA device.
6ae4cfb5
AL
4613 *
4614 * LOCKING:
4615 * Inherited from caller.
4616 */
4617
1da177e4
LT
4618static void ata_pio_sector(struct ata_queued_cmd *qc)
4619{
4620 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4621 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4622 struct ata_port *ap = qc->ap;
4623 struct page *page;
4624 unsigned int offset;
4625 unsigned char *buf;
4626
5a5dbd18 4627 if (qc->curbytes == qc->nbytes - qc->sect_size)
14be71f4 4628 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4629
4630 page = sg[qc->cursg].page;
726f0785 4631 offset = sg[qc->cursg].offset + qc->cursg_ofs;
1da177e4
LT
4632
4633 /* get the current page and offset */
4634 page = nth_page(page, (offset >> PAGE_SHIFT));
4635 offset %= PAGE_SIZE;
4636
1da177e4
LT
4637 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4638
91b8b313
AL
4639 if (PageHighMem(page)) {
4640 unsigned long flags;
4641
a6b2c5d4 4642 /* FIXME: use a bounce buffer */
91b8b313
AL
4643 local_irq_save(flags);
4644 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4645
91b8b313 4646 /* do the actual data transfer */
5a5dbd18 4647 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
1da177e4 4648
91b8b313
AL
4649 kunmap_atomic(buf, KM_IRQ0);
4650 local_irq_restore(flags);
4651 } else {
4652 buf = page_address(page);
5a5dbd18 4653 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
91b8b313 4654 }
1da177e4 4655
5a5dbd18
ML
4656 qc->curbytes += qc->sect_size;
4657 qc->cursg_ofs += qc->sect_size;
1da177e4 4658
726f0785 4659 if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
1da177e4
LT
4660 qc->cursg++;
4661 qc->cursg_ofs = 0;
4662 }
1da177e4 4663}
1da177e4 4664
07f6f7d0 4665/**
5a5dbd18 4666 * ata_pio_sectors - Transfer one or many sectors.
07f6f7d0
AL
4667 * @qc: Command on going
4668 *
5a5dbd18 4669 * Transfer one or many sectors of data from/to the
07f6f7d0
AL
4670 * ATA device for the DRQ request.
4671 *
4672 * LOCKING:
4673 * Inherited from caller.
4674 */
1da177e4 4675
07f6f7d0
AL
4676static void ata_pio_sectors(struct ata_queued_cmd *qc)
4677{
4678 if (is_multi_taskfile(&qc->tf)) {
4679 /* READ/WRITE MULTIPLE */
4680 unsigned int nsect;
4681
587005de 4682 WARN_ON(qc->dev->multi_count == 0);
1da177e4 4683
5a5dbd18 4684 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
726f0785 4685 qc->dev->multi_count);
07f6f7d0
AL
4686 while (nsect--)
4687 ata_pio_sector(qc);
4688 } else
4689 ata_pio_sector(qc);
4cc980b3
AL
4690
4691 ata_altstatus(qc->ap); /* flush */
07f6f7d0
AL
4692}
4693
c71c1857
AL
4694/**
4695 * atapi_send_cdb - Write CDB bytes to hardware
4696 * @ap: Port to which ATAPI device is attached.
4697 * @qc: Taskfile currently active
4698 *
4699 * When device has indicated its readiness to accept
4700 * a CDB, this function is called. Send the CDB.
4701 *
4702 * LOCKING:
4703 * caller.
4704 */
4705
4706static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4707{
4708 /* send SCSI cdb */
4709 DPRINTK("send cdb\n");
db024d53 4710 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 4711
a6b2c5d4 4712 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
4713 ata_altstatus(ap); /* flush */
4714
4715 switch (qc->tf.protocol) {
4716 case ATA_PROT_ATAPI:
4717 ap->hsm_task_state = HSM_ST;
4718 break;
4719 case ATA_PROT_ATAPI_NODATA:
4720 ap->hsm_task_state = HSM_ST_LAST;
4721 break;
4722 case ATA_PROT_ATAPI_DMA:
4723 ap->hsm_task_state = HSM_ST_LAST;
4724 /* initiate bmdma */
4725 ap->ops->bmdma_start(qc);
4726 break;
4727 }
1da177e4
LT
4728}
4729
6ae4cfb5
AL
4730/**
4731 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
4732 * @qc: Command on going
4733 * @bytes: number of bytes
4734 *
4735 * Transfer Transfer data from/to the ATAPI device.
4736 *
4737 * LOCKING:
4738 * Inherited from caller.
4739 *
4740 */
4741
1da177e4
LT
4742static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4743{
4744 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4745 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4746 struct ata_port *ap = qc->ap;
4747 struct page *page;
4748 unsigned char *buf;
4749 unsigned int offset, count;
4750
563a6e1f 4751 if (qc->curbytes + bytes >= qc->nbytes)
14be71f4 4752 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4753
4754next_sg:
563a6e1f 4755 if (unlikely(qc->cursg >= qc->n_elem)) {
7fb6ec28 4756 /*
563a6e1f
AL
4757 * The end of qc->sg is reached and the device expects
4758 * more data to transfer. In order not to overrun qc->sg
4759 * and fulfill length specified in the byte count register,
4760 * - for read case, discard trailing data from the device
4761 * - for write case, padding zero data to the device
4762 */
4763 u16 pad_buf[1] = { 0 };
4764 unsigned int words = bytes >> 1;
4765 unsigned int i;
4766
4767 if (words) /* warning if bytes > 1 */
f15a1daf
TH
4768 ata_dev_printk(qc->dev, KERN_WARNING,
4769 "%u bytes trailing data\n", bytes);
563a6e1f
AL
4770
4771 for (i = 0; i < words; i++)
a6b2c5d4 4772 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
563a6e1f 4773
14be71f4 4774 ap->hsm_task_state = HSM_ST_LAST;
563a6e1f
AL
4775 return;
4776 }
4777
cedc9a47 4778 sg = &qc->__sg[qc->cursg];
1da177e4 4779
1da177e4
LT
4780 page = sg->page;
4781 offset = sg->offset + qc->cursg_ofs;
4782
4783 /* get the current page and offset */
4784 page = nth_page(page, (offset >> PAGE_SHIFT));
4785 offset %= PAGE_SIZE;
4786
6952df03 4787 /* don't overrun current sg */
32529e01 4788 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
4789
4790 /* don't cross page boundaries */
4791 count = min(count, (unsigned int)PAGE_SIZE - offset);
4792
7282aa4b
AL
4793 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4794
91b8b313
AL
4795 if (PageHighMem(page)) {
4796 unsigned long flags;
4797
a6b2c5d4 4798 /* FIXME: use bounce buffer */
91b8b313
AL
4799 local_irq_save(flags);
4800 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4801
91b8b313 4802 /* do the actual data transfer */
a6b2c5d4 4803 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
7282aa4b 4804
91b8b313
AL
4805 kunmap_atomic(buf, KM_IRQ0);
4806 local_irq_restore(flags);
4807 } else {
4808 buf = page_address(page);
a6b2c5d4 4809 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
91b8b313 4810 }
1da177e4
LT
4811
4812 bytes -= count;
4813 qc->curbytes += count;
4814 qc->cursg_ofs += count;
4815
32529e01 4816 if (qc->cursg_ofs == sg->length) {
1da177e4
LT
4817 qc->cursg++;
4818 qc->cursg_ofs = 0;
4819 }
4820
563a6e1f 4821 if (bytes)
1da177e4 4822 goto next_sg;
1da177e4
LT
4823}
4824
6ae4cfb5
AL
4825/**
4826 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
4827 * @qc: Command on going
4828 *
4829 * Transfer Transfer data from/to the ATAPI device.
4830 *
4831 * LOCKING:
4832 * Inherited from caller.
6ae4cfb5
AL
4833 */
4834
1da177e4
LT
4835static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4836{
4837 struct ata_port *ap = qc->ap;
4838 struct ata_device *dev = qc->dev;
4839 unsigned int ireason, bc_lo, bc_hi, bytes;
4840 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4841
eec4c3f3
AL
4842 /* Abuse qc->result_tf for temp storage of intermediate TF
4843 * here to save some kernel stack usage.
4844 * For normal completion, qc->result_tf is not relevant. For
4845 * error, qc->result_tf is later overwritten by ata_qc_complete().
4846 * So, the correctness of qc->result_tf is not affected.
4847 */
4848 ap->ops->tf_read(ap, &qc->result_tf);
4849 ireason = qc->result_tf.nsect;
4850 bc_lo = qc->result_tf.lbam;
4851 bc_hi = qc->result_tf.lbah;
1da177e4
LT
4852 bytes = (bc_hi << 8) | bc_lo;
4853
4854 /* shall be cleared to zero, indicating xfer of data */
4855 if (ireason & (1 << 0))
4856 goto err_out;
4857
4858 /* make sure transfer direction matches expected */
4859 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4860 if (do_write != i_write)
4861 goto err_out;
4862
44877b4e 4863 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
312f7da2 4864
1da177e4 4865 __atapi_pio_bytes(qc, bytes);
4cc980b3 4866 ata_altstatus(ap); /* flush */
1da177e4
LT
4867
4868 return;
4869
4870err_out:
f15a1daf 4871 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
11a56d24 4872 qc->err_mask |= AC_ERR_HSM;
14be71f4 4873 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
4874}
4875
4876/**
c234fb00
AL
4877 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4878 * @ap: the target ata_port
4879 * @qc: qc on going
1da177e4 4880 *
c234fb00
AL
4881 * RETURNS:
4882 * 1 if ok in workqueue, 0 otherwise.
1da177e4 4883 */
c234fb00
AL
4884
4885static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 4886{
c234fb00
AL
4887 if (qc->tf.flags & ATA_TFLAG_POLLING)
4888 return 1;
1da177e4 4889
c234fb00
AL
4890 if (ap->hsm_task_state == HSM_ST_FIRST) {
4891 if (qc->tf.protocol == ATA_PROT_PIO &&
4892 (qc->tf.flags & ATA_TFLAG_WRITE))
4893 return 1;
1da177e4 4894
c234fb00
AL
4895 if (is_atapi_taskfile(&qc->tf) &&
4896 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4897 return 1;
fe79e683
AL
4898 }
4899
c234fb00
AL
4900 return 0;
4901}
1da177e4 4902
c17ea20d
TH
4903/**
4904 * ata_hsm_qc_complete - finish a qc running on standard HSM
4905 * @qc: Command to complete
4906 * @in_wq: 1 if called from workqueue, 0 otherwise
4907 *
4908 * Finish @qc which is running on standard HSM.
4909 *
4910 * LOCKING:
cca3974e 4911 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
4912 * Otherwise, none on entry and grabs host lock.
4913 */
4914static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4915{
4916 struct ata_port *ap = qc->ap;
4917 unsigned long flags;
4918
4919 if (ap->ops->error_handler) {
4920 if (in_wq) {
ba6a1308 4921 spin_lock_irqsave(ap->lock, flags);
c17ea20d 4922
cca3974e
JG
4923 /* EH might have kicked in while host lock is
4924 * released.
c17ea20d
TH
4925 */
4926 qc = ata_qc_from_tag(ap, qc->tag);
4927 if (qc) {
4928 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
83625006 4929 ap->ops->irq_on(ap);
c17ea20d
TH
4930 ata_qc_complete(qc);
4931 } else
4932 ata_port_freeze(ap);
4933 }
4934
ba6a1308 4935 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4936 } else {
4937 if (likely(!(qc->err_mask & AC_ERR_HSM)))
4938 ata_qc_complete(qc);
4939 else
4940 ata_port_freeze(ap);
4941 }
4942 } else {
4943 if (in_wq) {
ba6a1308 4944 spin_lock_irqsave(ap->lock, flags);
83625006 4945 ap->ops->irq_on(ap);
c17ea20d 4946 ata_qc_complete(qc);
ba6a1308 4947 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4948 } else
4949 ata_qc_complete(qc);
4950 }
4951}
4952
bb5cb290
AL
4953/**
4954 * ata_hsm_move - move the HSM to the next state.
4955 * @ap: the target ata_port
4956 * @qc: qc on going
4957 * @status: current device status
4958 * @in_wq: 1 if called from workqueue, 0 otherwise
4959 *
4960 * RETURNS:
4961 * 1 when poll next status needed, 0 otherwise.
4962 */
9a1004d0
TH
4963int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4964 u8 status, int in_wq)
e2cec771 4965{
bb5cb290
AL
4966 unsigned long flags = 0;
4967 int poll_next;
4968
6912ccd5
AL
4969 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4970
bb5cb290
AL
4971 /* Make sure ata_qc_issue_prot() does not throw things
4972 * like DMA polling into the workqueue. Notice that
4973 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4974 */
c234fb00 4975 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 4976
e2cec771 4977fsm_start:
999bb6f4 4978 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
44877b4e 4979 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
999bb6f4 4980
e2cec771
AL
4981 switch (ap->hsm_task_state) {
4982 case HSM_ST_FIRST:
bb5cb290
AL
4983 /* Send first data block or PACKET CDB */
4984
4985 /* If polling, we will stay in the work queue after
4986 * sending the data. Otherwise, interrupt handler
4987 * takes over after sending the data.
4988 */
4989 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4990
e2cec771 4991 /* check device status */
3655d1d3
AL
4992 if (unlikely((status & ATA_DRQ) == 0)) {
4993 /* handle BSY=0, DRQ=0 as error */
4994 if (likely(status & (ATA_ERR | ATA_DF)))
4995 /* device stops HSM for abort/error */
4996 qc->err_mask |= AC_ERR_DEV;
4997 else
4998 /* HSM violation. Let EH handle this */
4999 qc->err_mask |= AC_ERR_HSM;
5000
14be71f4 5001 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 5002 goto fsm_start;
1da177e4
LT
5003 }
5004
71601958
AL
5005 /* Device should not ask for data transfer (DRQ=1)
5006 * when it finds something wrong.
eee6c32f
AL
5007 * We ignore DRQ here and stop the HSM by
5008 * changing hsm_task_state to HSM_ST_ERR and
5009 * let the EH abort the command or reset the device.
71601958
AL
5010 */
5011 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
5012 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
5013 "error, dev_stat 0x%X\n", status);
3655d1d3 5014 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
5015 ap->hsm_task_state = HSM_ST_ERR;
5016 goto fsm_start;
71601958 5017 }
1da177e4 5018
bb5cb290
AL
5019 /* Send the CDB (atapi) or the first data block (ata pio out).
5020 * During the state transition, interrupt handler shouldn't
5021 * be invoked before the data transfer is complete and
5022 * hsm_task_state is changed. Hence, the following locking.
5023 */
5024 if (in_wq)
ba6a1308 5025 spin_lock_irqsave(ap->lock, flags);
1da177e4 5026
bb5cb290
AL
5027 if (qc->tf.protocol == ATA_PROT_PIO) {
5028 /* PIO data out protocol.
5029 * send first data block.
5030 */
0565c26d 5031
bb5cb290
AL
5032 /* ata_pio_sectors() might change the state
5033 * to HSM_ST_LAST. so, the state is changed here
5034 * before ata_pio_sectors().
5035 */
5036 ap->hsm_task_state = HSM_ST;
5037 ata_pio_sectors(qc);
bb5cb290
AL
5038 } else
5039 /* send CDB */
5040 atapi_send_cdb(ap, qc);
5041
5042 if (in_wq)
ba6a1308 5043 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
5044
5045 /* if polling, ata_pio_task() handles the rest.
5046 * otherwise, interrupt handler takes over from here.
5047 */
e2cec771 5048 break;
1c848984 5049
e2cec771
AL
5050 case HSM_ST:
5051 /* complete command or read/write the data register */
5052 if (qc->tf.protocol == ATA_PROT_ATAPI) {
5053 /* ATAPI PIO protocol */
5054 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
5055 /* No more data to transfer or device error.
5056 * Device error will be tagged in HSM_ST_LAST.
5057 */
e2cec771
AL
5058 ap->hsm_task_state = HSM_ST_LAST;
5059 goto fsm_start;
5060 }
1da177e4 5061
71601958
AL
5062 /* Device should not ask for data transfer (DRQ=1)
5063 * when it finds something wrong.
eee6c32f
AL
5064 * We ignore DRQ here and stop the HSM by
5065 * changing hsm_task_state to HSM_ST_ERR and
5066 * let the EH abort the command or reset the device.
71601958
AL
5067 */
5068 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
5069 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
5070 "device error, dev_stat 0x%X\n",
5071 status);
3655d1d3 5072 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
5073 ap->hsm_task_state = HSM_ST_ERR;
5074 goto fsm_start;
71601958 5075 }
1da177e4 5076
e2cec771 5077 atapi_pio_bytes(qc);
7fb6ec28 5078
e2cec771
AL
5079 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
5080 /* bad ireason reported by device */
5081 goto fsm_start;
1da177e4 5082
e2cec771
AL
5083 } else {
5084 /* ATA PIO protocol */
5085 if (unlikely((status & ATA_DRQ) == 0)) {
5086 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
5087 if (likely(status & (ATA_ERR | ATA_DF)))
5088 /* device stops HSM for abort/error */
5089 qc->err_mask |= AC_ERR_DEV;
5090 else
55a8e2c8
TH
5091 /* HSM violation. Let EH handle this.
5092 * Phantom devices also trigger this
5093 * condition. Mark hint.
5094 */
5095 qc->err_mask |= AC_ERR_HSM |
5096 AC_ERR_NODEV_HINT;
3655d1d3 5097
e2cec771
AL
5098 ap->hsm_task_state = HSM_ST_ERR;
5099 goto fsm_start;
5100 }
1da177e4 5101
eee6c32f
AL
5102 /* For PIO reads, some devices may ask for
5103 * data transfer (DRQ=1) alone with ERR=1.
5104 * We respect DRQ here and transfer one
5105 * block of junk data before changing the
5106 * hsm_task_state to HSM_ST_ERR.
5107 *
5108 * For PIO writes, ERR=1 DRQ=1 doesn't make
5109 * sense since the data block has been
5110 * transferred to the device.
71601958
AL
5111 */
5112 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
5113 /* data might be corrputed */
5114 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
5115
5116 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5117 ata_pio_sectors(qc);
eee6c32f
AL
5118 status = ata_wait_idle(ap);
5119 }
5120
3655d1d3
AL
5121 if (status & (ATA_BUSY | ATA_DRQ))
5122 qc->err_mask |= AC_ERR_HSM;
5123
eee6c32f
AL
5124 /* ata_pio_sectors() might change the
5125 * state to HSM_ST_LAST. so, the state
5126 * is changed after ata_pio_sectors().
5127 */
5128 ap->hsm_task_state = HSM_ST_ERR;
5129 goto fsm_start;
71601958
AL
5130 }
5131
e2cec771
AL
5132 ata_pio_sectors(qc);
5133
5134 if (ap->hsm_task_state == HSM_ST_LAST &&
5135 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5136 /* all data read */
52a32205 5137 status = ata_wait_idle(ap);
e2cec771
AL
5138 goto fsm_start;
5139 }
5140 }
5141
bb5cb290 5142 poll_next = 1;
1da177e4
LT
5143 break;
5144
14be71f4 5145 case HSM_ST_LAST:
6912ccd5
AL
5146 if (unlikely(!ata_ok(status))) {
5147 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
5148 ap->hsm_task_state = HSM_ST_ERR;
5149 goto fsm_start;
5150 }
5151
5152 /* no more data to transfer */
4332a771 5153 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
44877b4e 5154 ap->print_id, qc->dev->devno, status);
e2cec771 5155
6912ccd5
AL
5156 WARN_ON(qc->err_mask);
5157
e2cec771 5158 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 5159
e2cec771 5160 /* complete taskfile transaction */
c17ea20d 5161 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5162
5163 poll_next = 0;
1da177e4
LT
5164 break;
5165
14be71f4 5166 case HSM_ST_ERR:
e2cec771
AL
5167 /* make sure qc->err_mask is available to
5168 * know what's wrong and recover
5169 */
5170 WARN_ON(qc->err_mask == 0);
5171
5172 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 5173
999bb6f4 5174 /* complete taskfile transaction */
c17ea20d 5175 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5176
5177 poll_next = 0;
e2cec771
AL
5178 break;
5179 default:
bb5cb290 5180 poll_next = 0;
6912ccd5 5181 BUG();
1da177e4
LT
5182 }
5183
bb5cb290 5184 return poll_next;
1da177e4
LT
5185}
5186
65f27f38 5187static void ata_pio_task(struct work_struct *work)
8061f5f0 5188{
65f27f38
DH
5189 struct ata_port *ap =
5190 container_of(work, struct ata_port, port_task.work);
5191 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 5192 u8 status;
a1af3734 5193 int poll_next;
8061f5f0 5194
7fb6ec28 5195fsm_start:
a1af3734 5196 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 5197
a1af3734
AL
5198 /*
5199 * This is purely heuristic. This is a fast path.
5200 * Sometimes when we enter, BSY will be cleared in
5201 * a chk-status or two. If not, the drive is probably seeking
5202 * or something. Snooze for a couple msecs, then
5203 * chk-status again. If still busy, queue delayed work.
5204 */
5205 status = ata_busy_wait(ap, ATA_BUSY, 5);
5206 if (status & ATA_BUSY) {
5207 msleep(2);
5208 status = ata_busy_wait(ap, ATA_BUSY, 10);
5209 if (status & ATA_BUSY) {
31ce6dae 5210 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
a1af3734
AL
5211 return;
5212 }
8061f5f0
TH
5213 }
5214
a1af3734
AL
5215 /* move the HSM */
5216 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 5217
a1af3734
AL
5218 /* another command or interrupt handler
5219 * may be running at this point.
5220 */
5221 if (poll_next)
7fb6ec28 5222 goto fsm_start;
8061f5f0
TH
5223}
5224
1da177e4
LT
5225/**
5226 * ata_qc_new - Request an available ATA command, for queueing
5227 * @ap: Port associated with device @dev
5228 * @dev: Device from whom we request an available command structure
5229 *
5230 * LOCKING:
0cba632b 5231 * None.
1da177e4
LT
5232 */
5233
5234static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5235{
5236 struct ata_queued_cmd *qc = NULL;
5237 unsigned int i;
5238
e3180499 5239 /* no command while frozen */
b51e9e5d 5240 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
5241 return NULL;
5242
2ab7db1f
TH
5243 /* the last tag is reserved for internal command. */
5244 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 5245 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 5246 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
5247 break;
5248 }
5249
5250 if (qc)
5251 qc->tag = i;
5252
5253 return qc;
5254}
5255
5256/**
5257 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
5258 * @dev: Device from whom we request an available command structure
5259 *
5260 * LOCKING:
0cba632b 5261 * None.
1da177e4
LT
5262 */
5263
3373efd8 5264struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 5265{
9af5c9c9 5266 struct ata_port *ap = dev->link->ap;
1da177e4
LT
5267 struct ata_queued_cmd *qc;
5268
5269 qc = ata_qc_new(ap);
5270 if (qc) {
1da177e4
LT
5271 qc->scsicmd = NULL;
5272 qc->ap = ap;
5273 qc->dev = dev;
1da177e4 5274
2c13b7ce 5275 ata_qc_reinit(qc);
1da177e4
LT
5276 }
5277
5278 return qc;
5279}
5280
1da177e4
LT
5281/**
5282 * ata_qc_free - free unused ata_queued_cmd
5283 * @qc: Command to complete
5284 *
5285 * Designed to free unused ata_queued_cmd object
5286 * in case something prevents using it.
5287 *
5288 * LOCKING:
cca3974e 5289 * spin_lock_irqsave(host lock)
1da177e4
LT
5290 */
5291void ata_qc_free(struct ata_queued_cmd *qc)
5292{
4ba946e9
TH
5293 struct ata_port *ap = qc->ap;
5294 unsigned int tag;
5295
a4631474 5296 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 5297
4ba946e9
TH
5298 qc->flags = 0;
5299 tag = qc->tag;
5300 if (likely(ata_tag_valid(tag))) {
4ba946e9 5301 qc->tag = ATA_TAG_POISON;
6cec4a39 5302 clear_bit(tag, &ap->qc_allocated);
4ba946e9 5303 }
1da177e4
LT
5304}
5305
76014427 5306void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 5307{
dedaf2b0 5308 struct ata_port *ap = qc->ap;
9af5c9c9 5309 struct ata_link *link = qc->dev->link;
dedaf2b0 5310
a4631474
TH
5311 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5312 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
5313
5314 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5315 ata_sg_clean(qc);
5316
7401abf2 5317 /* command should be marked inactive atomically with qc completion */
dedaf2b0 5318 if (qc->tf.protocol == ATA_PROT_NCQ)
9af5c9c9 5319 link->sactive &= ~(1 << qc->tag);
dedaf2b0 5320 else
9af5c9c9 5321 link->active_tag = ATA_TAG_POISON;
7401abf2 5322
3f3791d3
AL
5323 /* atapi: mark qc as inactive to prevent the interrupt handler
5324 * from completing the command twice later, before the error handler
5325 * is called. (when rc != 0 and atapi request sense is needed)
5326 */
5327 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 5328 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 5329
1da177e4 5330 /* call completion callback */
77853bf2 5331 qc->complete_fn(qc);
1da177e4
LT
5332}
5333
39599a53
TH
5334static void fill_result_tf(struct ata_queued_cmd *qc)
5335{
5336 struct ata_port *ap = qc->ap;
5337
39599a53 5338 qc->result_tf.flags = qc->tf.flags;
4742d54f 5339 ap->ops->tf_read(ap, &qc->result_tf);
39599a53
TH
5340}
5341
f686bcb8
TH
5342/**
5343 * ata_qc_complete - Complete an active ATA command
5344 * @qc: Command to complete
5345 * @err_mask: ATA Status register contents
5346 *
5347 * Indicate to the mid and upper layers that an ATA
5348 * command has completed, with either an ok or not-ok status.
5349 *
5350 * LOCKING:
cca3974e 5351 * spin_lock_irqsave(host lock)
f686bcb8
TH
5352 */
5353void ata_qc_complete(struct ata_queued_cmd *qc)
5354{
5355 struct ata_port *ap = qc->ap;
5356
5357 /* XXX: New EH and old EH use different mechanisms to
5358 * synchronize EH with regular execution path.
5359 *
5360 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5361 * Normal execution path is responsible for not accessing a
5362 * failed qc. libata core enforces the rule by returning NULL
5363 * from ata_qc_from_tag() for failed qcs.
5364 *
5365 * Old EH depends on ata_qc_complete() nullifying completion
5366 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5367 * not synchronize with interrupt handler. Only PIO task is
5368 * taken care of.
5369 */
5370 if (ap->ops->error_handler) {
b51e9e5d 5371 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
5372
5373 if (unlikely(qc->err_mask))
5374 qc->flags |= ATA_QCFLAG_FAILED;
5375
5376 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5377 if (!ata_tag_internal(qc->tag)) {
5378 /* always fill result TF for failed qc */
39599a53 5379 fill_result_tf(qc);
f686bcb8
TH
5380 ata_qc_schedule_eh(qc);
5381 return;
5382 }
5383 }
5384
5385 /* read result TF if requested */
5386 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5387 fill_result_tf(qc);
f686bcb8
TH
5388
5389 __ata_qc_complete(qc);
5390 } else {
5391 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5392 return;
5393
5394 /* read result TF if failed or requested */
5395 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5396 fill_result_tf(qc);
f686bcb8
TH
5397
5398 __ata_qc_complete(qc);
5399 }
5400}
5401
dedaf2b0
TH
5402/**
5403 * ata_qc_complete_multiple - Complete multiple qcs successfully
5404 * @ap: port in question
5405 * @qc_active: new qc_active mask
5406 * @finish_qc: LLDD callback invoked before completing a qc
5407 *
5408 * Complete in-flight commands. This functions is meant to be
5409 * called from low-level driver's interrupt routine to complete
5410 * requests normally. ap->qc_active and @qc_active is compared
5411 * and commands are completed accordingly.
5412 *
5413 * LOCKING:
cca3974e 5414 * spin_lock_irqsave(host lock)
dedaf2b0
TH
5415 *
5416 * RETURNS:
5417 * Number of completed commands on success, -errno otherwise.
5418 */
5419int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5420 void (*finish_qc)(struct ata_queued_cmd *))
5421{
5422 int nr_done = 0;
5423 u32 done_mask;
5424 int i;
5425
5426 done_mask = ap->qc_active ^ qc_active;
5427
5428 if (unlikely(done_mask & qc_active)) {
5429 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5430 "(%08x->%08x)\n", ap->qc_active, qc_active);
5431 return -EINVAL;
5432 }
5433
5434 for (i = 0; i < ATA_MAX_QUEUE; i++) {
5435 struct ata_queued_cmd *qc;
5436
5437 if (!(done_mask & (1 << i)))
5438 continue;
5439
5440 if ((qc = ata_qc_from_tag(ap, i))) {
5441 if (finish_qc)
5442 finish_qc(qc);
5443 ata_qc_complete(qc);
5444 nr_done++;
5445 }
5446 }
5447
5448 return nr_done;
5449}
5450
1da177e4
LT
5451static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
5452{
5453 struct ata_port *ap = qc->ap;
5454
5455 switch (qc->tf.protocol) {
3dc1d881 5456 case ATA_PROT_NCQ:
1da177e4
LT
5457 case ATA_PROT_DMA:
5458 case ATA_PROT_ATAPI_DMA:
5459 return 1;
5460
5461 case ATA_PROT_ATAPI:
5462 case ATA_PROT_PIO:
1da177e4
LT
5463 if (ap->flags & ATA_FLAG_PIO_DMA)
5464 return 1;
5465
5466 /* fall through */
5467
5468 default:
5469 return 0;
5470 }
5471
5472 /* never reached */
5473}
5474
5475/**
5476 * ata_qc_issue - issue taskfile to device
5477 * @qc: command to issue to device
5478 *
5479 * Prepare an ATA command to submission to device.
5480 * This includes mapping the data into a DMA-able
5481 * area, filling in the S/G table, and finally
5482 * writing the taskfile to hardware, starting the command.
5483 *
5484 * LOCKING:
cca3974e 5485 * spin_lock_irqsave(host lock)
1da177e4 5486 */
8e0e694a 5487void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
5488{
5489 struct ata_port *ap = qc->ap;
9af5c9c9 5490 struct ata_link *link = qc->dev->link;
1da177e4 5491
dedaf2b0
TH
5492 /* Make sure only one non-NCQ command is outstanding. The
5493 * check is skipped for old EH because it reuses active qc to
5494 * request ATAPI sense.
5495 */
9af5c9c9 5496 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
dedaf2b0
TH
5497
5498 if (qc->tf.protocol == ATA_PROT_NCQ) {
9af5c9c9
TH
5499 WARN_ON(link->sactive & (1 << qc->tag));
5500 link->sactive |= 1 << qc->tag;
dedaf2b0 5501 } else {
9af5c9c9
TH
5502 WARN_ON(link->sactive);
5503 link->active_tag = qc->tag;
dedaf2b0
TH
5504 }
5505
e4a70e76 5506 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 5507 ap->qc_active |= 1 << qc->tag;
e4a70e76 5508
1da177e4
LT
5509 if (ata_should_dma_map(qc)) {
5510 if (qc->flags & ATA_QCFLAG_SG) {
5511 if (ata_sg_setup(qc))
8e436af9 5512 goto sg_err;
1da177e4
LT
5513 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
5514 if (ata_sg_setup_one(qc))
8e436af9 5515 goto sg_err;
1da177e4
LT
5516 }
5517 } else {
5518 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5519 }
5520
5521 ap->ops->qc_prep(qc);
5522
8e0e694a
TH
5523 qc->err_mask |= ap->ops->qc_issue(qc);
5524 if (unlikely(qc->err_mask))
5525 goto err;
5526 return;
1da177e4 5527
8e436af9
TH
5528sg_err:
5529 qc->flags &= ~ATA_QCFLAG_DMAMAP;
8e0e694a
TH
5530 qc->err_mask |= AC_ERR_SYSTEM;
5531err:
5532 ata_qc_complete(qc);
1da177e4
LT
5533}
5534
5535/**
5536 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
5537 * @qc: command to issue to device
5538 *
5539 * Using various libata functions and hooks, this function
5540 * starts an ATA command. ATA commands are grouped into
5541 * classes called "protocols", and issuing each type of protocol
5542 * is slightly different.
5543 *
0baab86b
EF
5544 * May be used as the qc_issue() entry in ata_port_operations.
5545 *
1da177e4 5546 * LOCKING:
cca3974e 5547 * spin_lock_irqsave(host lock)
1da177e4
LT
5548 *
5549 * RETURNS:
9a3d9eb0 5550 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
5551 */
5552
9a3d9eb0 5553unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
5554{
5555 struct ata_port *ap = qc->ap;
5556
e50362ec
AL
5557 /* Use polling pio if the LLD doesn't handle
5558 * interrupt driven pio and atapi CDB interrupt.
5559 */
5560 if (ap->flags & ATA_FLAG_PIO_POLLING) {
5561 switch (qc->tf.protocol) {
5562 case ATA_PROT_PIO:
e3472cbe 5563 case ATA_PROT_NODATA:
e50362ec
AL
5564 case ATA_PROT_ATAPI:
5565 case ATA_PROT_ATAPI_NODATA:
5566 qc->tf.flags |= ATA_TFLAG_POLLING;
5567 break;
5568 case ATA_PROT_ATAPI_DMA:
5569 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 5570 /* see ata_dma_blacklisted() */
e50362ec
AL
5571 BUG();
5572 break;
5573 default:
5574 break;
5575 }
5576 }
5577
312f7da2 5578 /* select the device */
1da177e4
LT
5579 ata_dev_select(ap, qc->dev->devno, 1, 0);
5580
312f7da2 5581 /* start the command */
1da177e4
LT
5582 switch (qc->tf.protocol) {
5583 case ATA_PROT_NODATA:
312f7da2
AL
5584 if (qc->tf.flags & ATA_TFLAG_POLLING)
5585 ata_qc_set_polling(qc);
5586
e5338254 5587 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
5588 ap->hsm_task_state = HSM_ST_LAST;
5589
5590 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5591 ata_port_queue_task(ap, ata_pio_task, qc, 0);
312f7da2 5592
1da177e4
LT
5593 break;
5594
5595 case ATA_PROT_DMA:
587005de 5596 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5597
1da177e4
LT
5598 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5599 ap->ops->bmdma_setup(qc); /* set up bmdma */
5600 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 5601 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
5602 break;
5603
312f7da2
AL
5604 case ATA_PROT_PIO:
5605 if (qc->tf.flags & ATA_TFLAG_POLLING)
5606 ata_qc_set_polling(qc);
1da177e4 5607
e5338254 5608 ata_tf_to_host(ap, &qc->tf);
312f7da2 5609
54f00389
AL
5610 if (qc->tf.flags & ATA_TFLAG_WRITE) {
5611 /* PIO data out protocol */
5612 ap->hsm_task_state = HSM_ST_FIRST;
31ce6dae 5613 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5614
5615 /* always send first data block using
e27486db 5616 * the ata_pio_task() codepath.
54f00389 5617 */
312f7da2 5618 } else {
54f00389
AL
5619 /* PIO data in protocol */
5620 ap->hsm_task_state = HSM_ST;
5621
5622 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5623 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5624
5625 /* if polling, ata_pio_task() handles the rest.
5626 * otherwise, interrupt handler takes over from here.
5627 */
312f7da2
AL
5628 }
5629
1da177e4
LT
5630 break;
5631
1da177e4 5632 case ATA_PROT_ATAPI:
1da177e4 5633 case ATA_PROT_ATAPI_NODATA:
312f7da2
AL
5634 if (qc->tf.flags & ATA_TFLAG_POLLING)
5635 ata_qc_set_polling(qc);
5636
e5338254 5637 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 5638
312f7da2
AL
5639 ap->hsm_task_state = HSM_ST_FIRST;
5640
5641 /* send cdb by polling if no cdb interrupt */
5642 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
5643 (qc->tf.flags & ATA_TFLAG_POLLING))
31ce6dae 5644 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5645 break;
5646
5647 case ATA_PROT_ATAPI_DMA:
587005de 5648 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5649
1da177e4
LT
5650 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5651 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
5652 ap->hsm_task_state = HSM_ST_FIRST;
5653
5654 /* send cdb by polling if no cdb interrupt */
5655 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
31ce6dae 5656 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5657 break;
5658
5659 default:
5660 WARN_ON(1);
9a3d9eb0 5661 return AC_ERR_SYSTEM;
1da177e4
LT
5662 }
5663
5664 return 0;
5665}
5666
1da177e4
LT
5667/**
5668 * ata_host_intr - Handle host interrupt for given (port, task)
5669 * @ap: Port on which interrupt arrived (possibly...)
5670 * @qc: Taskfile currently active in engine
5671 *
5672 * Handle host interrupt for given queued command. Currently,
5673 * only DMA interrupts are handled. All other commands are
5674 * handled via polling with interrupts disabled (nIEN bit).
5675 *
5676 * LOCKING:
cca3974e 5677 * spin_lock_irqsave(host lock)
1da177e4
LT
5678 *
5679 * RETURNS:
5680 * One if interrupt was handled, zero if not (shared irq).
5681 */
5682
5683inline unsigned int ata_host_intr (struct ata_port *ap,
5684 struct ata_queued_cmd *qc)
5685{
9af5c9c9 5686 struct ata_eh_info *ehi = &ap->link.eh_info;
312f7da2 5687 u8 status, host_stat = 0;
1da177e4 5688
312f7da2 5689 VPRINTK("ata%u: protocol %d task_state %d\n",
44877b4e 5690 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 5691
312f7da2
AL
5692 /* Check whether we are expecting interrupt in this state */
5693 switch (ap->hsm_task_state) {
5694 case HSM_ST_FIRST:
6912ccd5
AL
5695 /* Some pre-ATAPI-4 devices assert INTRQ
5696 * at this state when ready to receive CDB.
5697 */
1da177e4 5698
312f7da2
AL
5699 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5700 * The flag was turned on only for atapi devices.
5701 * No need to check is_atapi_taskfile(&qc->tf) again.
5702 */
5703 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 5704 goto idle_irq;
1da177e4 5705 break;
312f7da2
AL
5706 case HSM_ST_LAST:
5707 if (qc->tf.protocol == ATA_PROT_DMA ||
5708 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5709 /* check status of DMA engine */
5710 host_stat = ap->ops->bmdma_status(ap);
44877b4e
TH
5711 VPRINTK("ata%u: host_stat 0x%X\n",
5712 ap->print_id, host_stat);
312f7da2
AL
5713
5714 /* if it's not our irq... */
5715 if (!(host_stat & ATA_DMA_INTR))
5716 goto idle_irq;
5717
5718 /* before we do anything else, clear DMA-Start bit */
5719 ap->ops->bmdma_stop(qc);
a4f16610
AL
5720
5721 if (unlikely(host_stat & ATA_DMA_ERR)) {
5722 /* error when transfering data to/from memory */
5723 qc->err_mask |= AC_ERR_HOST_BUS;
5724 ap->hsm_task_state = HSM_ST_ERR;
5725 }
312f7da2
AL
5726 }
5727 break;
5728 case HSM_ST:
5729 break;
1da177e4
LT
5730 default:
5731 goto idle_irq;
5732 }
5733
312f7da2
AL
5734 /* check altstatus */
5735 status = ata_altstatus(ap);
5736 if (status & ATA_BUSY)
5737 goto idle_irq;
1da177e4 5738
312f7da2
AL
5739 /* check main status, clearing INTRQ */
5740 status = ata_chk_status(ap);
5741 if (unlikely(status & ATA_BUSY))
5742 goto idle_irq;
1da177e4 5743
312f7da2
AL
5744 /* ack bmdma irq events */
5745 ap->ops->irq_clear(ap);
1da177e4 5746
bb5cb290 5747 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
5748
5749 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5750 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5751 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5752
1da177e4
LT
5753 return 1; /* irq handled */
5754
5755idle_irq:
5756 ap->stats.idle_irq++;
5757
5758#ifdef ATA_IRQ_TRAP
5759 if ((ap->stats.idle_irq % 1000) == 0) {
6d32d30f
JG
5760 ata_chk_status(ap);
5761 ap->ops->irq_clear(ap);
f15a1daf 5762 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 5763 return 1;
1da177e4
LT
5764 }
5765#endif
5766 return 0; /* irq not handled */
5767}
5768
5769/**
5770 * ata_interrupt - Default ATA host interrupt handler
0cba632b 5771 * @irq: irq line (unused)
cca3974e 5772 * @dev_instance: pointer to our ata_host information structure
1da177e4 5773 *
0cba632b
JG
5774 * Default interrupt handler for PCI IDE devices. Calls
5775 * ata_host_intr() for each port that is not disabled.
5776 *
1da177e4 5777 * LOCKING:
cca3974e 5778 * Obtains host lock during operation.
1da177e4
LT
5779 *
5780 * RETURNS:
0cba632b 5781 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
5782 */
5783
7d12e780 5784irqreturn_t ata_interrupt (int irq, void *dev_instance)
1da177e4 5785{
cca3974e 5786 struct ata_host *host = dev_instance;
1da177e4
LT
5787 unsigned int i;
5788 unsigned int handled = 0;
5789 unsigned long flags;
5790
5791 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 5792 spin_lock_irqsave(&host->lock, flags);
1da177e4 5793
cca3974e 5794 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5795 struct ata_port *ap;
5796
cca3974e 5797 ap = host->ports[i];
c1389503 5798 if (ap &&
029f5468 5799 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
5800 struct ata_queued_cmd *qc;
5801
9af5c9c9 5802 qc = ata_qc_from_tag(ap, ap->link.active_tag);
312f7da2 5803 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 5804 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
5805 handled |= ata_host_intr(ap, qc);
5806 }
5807 }
5808
cca3974e 5809 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
5810
5811 return IRQ_RETVAL(handled);
5812}
5813
34bf2170
TH
5814/**
5815 * sata_scr_valid - test whether SCRs are accessible
936fd732 5816 * @link: ATA link to test SCR accessibility for
34bf2170 5817 *
936fd732 5818 * Test whether SCRs are accessible for @link.
34bf2170
TH
5819 *
5820 * LOCKING:
5821 * None.
5822 *
5823 * RETURNS:
5824 * 1 if SCRs are accessible, 0 otherwise.
5825 */
936fd732 5826int sata_scr_valid(struct ata_link *link)
34bf2170 5827{
936fd732
TH
5828 struct ata_port *ap = link->ap;
5829
a16abc0b 5830 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
34bf2170
TH
5831}
5832
5833/**
5834 * sata_scr_read - read SCR register of the specified port
936fd732 5835 * @link: ATA link to read SCR for
34bf2170
TH
5836 * @reg: SCR to read
5837 * @val: Place to store read value
5838 *
936fd732 5839 * Read SCR register @reg of @link into *@val. This function is
34bf2170
TH
5840 * guaranteed to succeed if the cable type of the port is SATA
5841 * and the port implements ->scr_read.
5842 *
5843 * LOCKING:
5844 * None.
5845 *
5846 * RETURNS:
5847 * 0 on success, negative errno on failure.
5848 */
936fd732 5849int sata_scr_read(struct ata_link *link, int reg, u32 *val)
34bf2170 5850{
936fd732
TH
5851 struct ata_port *ap = link->ap;
5852
5853 if (sata_scr_valid(link))
da3dbb17 5854 return ap->ops->scr_read(ap, reg, val);
34bf2170
TH
5855 return -EOPNOTSUPP;
5856}
5857
5858/**
5859 * sata_scr_write - write SCR register of the specified port
936fd732 5860 * @link: ATA link to write SCR for
34bf2170
TH
5861 * @reg: SCR to write
5862 * @val: value to write
5863 *
936fd732 5864 * Write @val to SCR register @reg of @link. This function is
34bf2170
TH
5865 * guaranteed to succeed if the cable type of the port is SATA
5866 * and the port implements ->scr_read.
5867 *
5868 * LOCKING:
5869 * None.
5870 *
5871 * RETURNS:
5872 * 0 on success, negative errno on failure.
5873 */
936fd732 5874int sata_scr_write(struct ata_link *link, int reg, u32 val)
34bf2170 5875{
936fd732
TH
5876 struct ata_port *ap = link->ap;
5877
5878 if (sata_scr_valid(link))
da3dbb17 5879 return ap->ops->scr_write(ap, reg, val);
34bf2170
TH
5880 return -EOPNOTSUPP;
5881}
5882
5883/**
5884 * sata_scr_write_flush - write SCR register of the specified port and flush
936fd732 5885 * @link: ATA link to write SCR for
34bf2170
TH
5886 * @reg: SCR to write
5887 * @val: value to write
5888 *
5889 * This function is identical to sata_scr_write() except that this
5890 * function performs flush after writing to the register.
5891 *
5892 * LOCKING:
5893 * None.
5894 *
5895 * RETURNS:
5896 * 0 on success, negative errno on failure.
5897 */
936fd732 5898int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
34bf2170 5899{
936fd732 5900 struct ata_port *ap = link->ap;
da3dbb17
TH
5901 int rc;
5902
936fd732 5903 if (sata_scr_valid(link)) {
da3dbb17
TH
5904 rc = ap->ops->scr_write(ap, reg, val);
5905 if (rc == 0)
5906 rc = ap->ops->scr_read(ap, reg, &val);
5907 return rc;
34bf2170
TH
5908 }
5909 return -EOPNOTSUPP;
5910}
5911
5912/**
936fd732
TH
5913 * ata_link_online - test whether the given link is online
5914 * @link: ATA link to test
34bf2170 5915 *
936fd732
TH
5916 * Test whether @link is online. Note that this function returns
5917 * 0 if online status of @link cannot be obtained, so
5918 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
5919 *
5920 * LOCKING:
5921 * None.
5922 *
5923 * RETURNS:
5924 * 1 if the port online status is available and online.
5925 */
936fd732 5926int ata_link_online(struct ata_link *link)
34bf2170
TH
5927{
5928 u32 sstatus;
5929
936fd732
TH
5930 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5931 (sstatus & 0xf) == 0x3)
34bf2170
TH
5932 return 1;
5933 return 0;
5934}
5935
5936/**
936fd732
TH
5937 * ata_link_offline - test whether the given link is offline
5938 * @link: ATA link to test
34bf2170 5939 *
936fd732
TH
5940 * Test whether @link is offline. Note that this function
5941 * returns 0 if offline status of @link cannot be obtained, so
5942 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
5943 *
5944 * LOCKING:
5945 * None.
5946 *
5947 * RETURNS:
5948 * 1 if the port offline status is available and offline.
5949 */
936fd732 5950int ata_link_offline(struct ata_link *link)
34bf2170
TH
5951{
5952 u32 sstatus;
5953
936fd732
TH
5954 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
5955 (sstatus & 0xf) != 0x3)
34bf2170
TH
5956 return 1;
5957 return 0;
5958}
0baab86b 5959
77b08fb5 5960int ata_flush_cache(struct ata_device *dev)
9b847548 5961{
977e6b9f 5962 unsigned int err_mask;
9b847548
JA
5963 u8 cmd;
5964
5965 if (!ata_try_flush_cache(dev))
5966 return 0;
5967
6fc49adb 5968 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
5969 cmd = ATA_CMD_FLUSH_EXT;
5970 else
5971 cmd = ATA_CMD_FLUSH;
5972
4f34337b
AC
5973 /* This is wrong. On a failed flush we get back the LBA of the lost
5974 sector and we should (assuming it wasn't aborted as unknown) issue
5975 a further flush command to continue the writeback until it
5976 does not error */
977e6b9f
TH
5977 err_mask = ata_do_simple_cmd(dev, cmd);
5978 if (err_mask) {
5979 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5980 return -EIO;
5981 }
5982
5983 return 0;
9b847548
JA
5984}
5985
6ffa01d8 5986#ifdef CONFIG_PM
cca3974e
JG
5987static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5988 unsigned int action, unsigned int ehi_flags,
5989 int wait)
500530f6
TH
5990{
5991 unsigned long flags;
5992 int i, rc;
5993
cca3974e
JG
5994 for (i = 0; i < host->n_ports; i++) {
5995 struct ata_port *ap = host->ports[i];
e3667ebf 5996 struct ata_link *link;
500530f6
TH
5997
5998 /* Previous resume operation might still be in
5999 * progress. Wait for PM_PENDING to clear.
6000 */
6001 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
6002 ata_port_wait_eh(ap);
6003 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6004 }
6005
6006 /* request PM ops to EH */
6007 spin_lock_irqsave(ap->lock, flags);
6008
6009 ap->pm_mesg = mesg;
6010 if (wait) {
6011 rc = 0;
6012 ap->pm_result = &rc;
6013 }
6014
6015 ap->pflags |= ATA_PFLAG_PM_PENDING;
e3667ebf
TH
6016 __ata_port_for_each_link(link, ap) {
6017 link->eh_info.action |= action;
6018 link->eh_info.flags |= ehi_flags;
6019 }
500530f6
TH
6020
6021 ata_port_schedule_eh(ap);
6022
6023 spin_unlock_irqrestore(ap->lock, flags);
6024
6025 /* wait and check result */
6026 if (wait) {
6027 ata_port_wait_eh(ap);
6028 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6029 if (rc)
6030 return rc;
6031 }
6032 }
6033
6034 return 0;
6035}
6036
6037/**
cca3974e
JG
6038 * ata_host_suspend - suspend host
6039 * @host: host to suspend
500530f6
TH
6040 * @mesg: PM message
6041 *
cca3974e 6042 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
6043 * function requests EH to perform PM operations and waits for EH
6044 * to finish.
6045 *
6046 * LOCKING:
6047 * Kernel thread context (may sleep).
6048 *
6049 * RETURNS:
6050 * 0 on success, -errno on failure.
6051 */
cca3974e 6052int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6 6053{
9666f400 6054 int rc;
500530f6 6055
cca3974e 6056 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
9666f400
TH
6057 if (rc == 0)
6058 host->dev->power.power_state = mesg;
500530f6
TH
6059 return rc;
6060}
6061
6062/**
cca3974e
JG
6063 * ata_host_resume - resume host
6064 * @host: host to resume
500530f6 6065 *
cca3974e 6066 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
6067 * function requests EH to perform PM operations and returns.
6068 * Note that all resume operations are performed parallely.
6069 *
6070 * LOCKING:
6071 * Kernel thread context (may sleep).
6072 */
cca3974e 6073void ata_host_resume(struct ata_host *host)
500530f6 6074{
cca3974e
JG
6075 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
6076 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
6077 host->dev->power.power_state = PMSG_ON;
500530f6 6078}
6ffa01d8 6079#endif
500530f6 6080
c893a3ae
RD
6081/**
6082 * ata_port_start - Set port up for dma.
6083 * @ap: Port to initialize
6084 *
6085 * Called just after data structures for each port are
6086 * initialized. Allocates space for PRD table.
6087 *
6088 * May be used as the port_start() entry in ata_port_operations.
6089 *
6090 * LOCKING:
6091 * Inherited from caller.
6092 */
f0d36efd 6093int ata_port_start(struct ata_port *ap)
1da177e4 6094{
2f1f610b 6095 struct device *dev = ap->dev;
6037d6bb 6096 int rc;
1da177e4 6097
f0d36efd
TH
6098 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6099 GFP_KERNEL);
1da177e4
LT
6100 if (!ap->prd)
6101 return -ENOMEM;
6102
6037d6bb 6103 rc = ata_pad_alloc(ap, dev);
f0d36efd 6104 if (rc)
6037d6bb 6105 return rc;
1da177e4 6106
f0d36efd
TH
6107 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
6108 (unsigned long long)ap->prd_dma);
1da177e4
LT
6109 return 0;
6110}
6111
3ef3b43d
TH
6112/**
6113 * ata_dev_init - Initialize an ata_device structure
6114 * @dev: Device structure to initialize
6115 *
6116 * Initialize @dev in preparation for probing.
6117 *
6118 * LOCKING:
6119 * Inherited from caller.
6120 */
6121void ata_dev_init(struct ata_device *dev)
6122{
9af5c9c9
TH
6123 struct ata_link *link = dev->link;
6124 struct ata_port *ap = link->ap;
72fa4b74
TH
6125 unsigned long flags;
6126
5a04bf4b 6127 /* SATA spd limit is bound to the first device */
9af5c9c9
TH
6128 link->sata_spd_limit = link->hw_sata_spd_limit;
6129 link->sata_spd = 0;
5a04bf4b 6130
72fa4b74
TH
6131 /* High bits of dev->flags are used to record warm plug
6132 * requests which occur asynchronously. Synchronize using
cca3974e 6133 * host lock.
72fa4b74 6134 */
ba6a1308 6135 spin_lock_irqsave(ap->lock, flags);
72fa4b74 6136 dev->flags &= ~ATA_DFLAG_INIT_MASK;
3dcc323f 6137 dev->horkage = 0;
ba6a1308 6138 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 6139
72fa4b74
TH
6140 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
6141 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
6142 dev->pio_mask = UINT_MAX;
6143 dev->mwdma_mask = UINT_MAX;
6144 dev->udma_mask = UINT_MAX;
6145}
6146
4fb37a25
TH
6147/**
6148 * ata_link_init - Initialize an ata_link structure
6149 * @ap: ATA port link is attached to
6150 * @link: Link structure to initialize
8989805d 6151 * @pmp: Port multiplier port number
4fb37a25
TH
6152 *
6153 * Initialize @link.
6154 *
6155 * LOCKING:
6156 * Kernel thread context (may sleep)
6157 */
8989805d 6158static void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
4fb37a25
TH
6159{
6160 int i;
6161
6162 /* clear everything except for devices */
6163 memset(link, 0, offsetof(struct ata_link, device[0]));
6164
6165 link->ap = ap;
8989805d 6166 link->pmp = pmp;
4fb37a25
TH
6167 link->active_tag = ATA_TAG_POISON;
6168 link->hw_sata_spd_limit = UINT_MAX;
6169
6170 /* can't use iterator, ap isn't initialized yet */
6171 for (i = 0; i < ATA_MAX_DEVICES; i++) {
6172 struct ata_device *dev = &link->device[i];
6173
6174 dev->link = link;
6175 dev->devno = dev - link->device;
6176 ata_dev_init(dev);
6177 }
6178}
6179
6180/**
6181 * sata_link_init_spd - Initialize link->sata_spd_limit
6182 * @link: Link to configure sata_spd_limit for
6183 *
6184 * Initialize @link->[hw_]sata_spd_limit to the currently
6185 * configured value.
6186 *
6187 * LOCKING:
6188 * Kernel thread context (may sleep).
6189 *
6190 * RETURNS:
6191 * 0 on success, -errno on failure.
6192 */
6193static int sata_link_init_spd(struct ata_link *link)
6194{
6195 u32 scontrol, spd;
6196 int rc;
6197
6198 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
6199 if (rc)
6200 return rc;
6201
6202 spd = (scontrol >> 4) & 0xf;
6203 if (spd)
6204 link->hw_sata_spd_limit &= (1 << spd) - 1;
6205
6206 link->sata_spd_limit = link->hw_sata_spd_limit;
6207
6208 return 0;
6209}
6210
1da177e4 6211/**
f3187195
TH
6212 * ata_port_alloc - allocate and initialize basic ATA port resources
6213 * @host: ATA host this allocated port belongs to
1da177e4 6214 *
f3187195
TH
6215 * Allocate and initialize basic ATA port resources.
6216 *
6217 * RETURNS:
6218 * Allocate ATA port on success, NULL on failure.
0cba632b 6219 *
1da177e4 6220 * LOCKING:
f3187195 6221 * Inherited from calling layer (may sleep).
1da177e4 6222 */
f3187195 6223struct ata_port *ata_port_alloc(struct ata_host *host)
1da177e4 6224{
f3187195 6225 struct ata_port *ap;
1da177e4 6226
f3187195
TH
6227 DPRINTK("ENTER\n");
6228
6229 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6230 if (!ap)
6231 return NULL;
6232
f4d6d004 6233 ap->pflags |= ATA_PFLAG_INITIALIZING;
cca3974e 6234 ap->lock = &host->lock;
198e0fed 6235 ap->flags = ATA_FLAG_DISABLED;
f3187195 6236 ap->print_id = -1;
1da177e4 6237 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 6238 ap->host = host;
f3187195 6239 ap->dev = host->dev;
1da177e4 6240 ap->last_ctl = 0xFF;
bd5d825c
BP
6241
6242#if defined(ATA_VERBOSE_DEBUG)
6243 /* turn on all debugging levels */
6244 ap->msg_enable = 0x00FF;
6245#elif defined(ATA_DEBUG)
6246 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 6247#else
0dd4b21f 6248 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 6249#endif
1da177e4 6250
65f27f38
DH
6251 INIT_DELAYED_WORK(&ap->port_task, NULL);
6252 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6253 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 6254 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 6255 init_waitqueue_head(&ap->eh_wait_q);
5ddf24c5
TH
6256 init_timer_deferrable(&ap->fastdrain_timer);
6257 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
6258 ap->fastdrain_timer.data = (unsigned long)ap;
1da177e4 6259
838df628 6260 ap->cbl = ATA_CBL_NONE;
838df628 6261
8989805d 6262 ata_link_init(ap, &ap->link, 0);
1da177e4
LT
6263
6264#ifdef ATA_IRQ_TRAP
6265 ap->stats.unhandled_irq = 1;
6266 ap->stats.idle_irq = 1;
6267#endif
1da177e4 6268 return ap;
1da177e4
LT
6269}
6270
f0d36efd
TH
6271static void ata_host_release(struct device *gendev, void *res)
6272{
6273 struct ata_host *host = dev_get_drvdata(gendev);
6274 int i;
6275
6276 for (i = 0; i < host->n_ports; i++) {
6277 struct ata_port *ap = host->ports[i];
6278
ecef7253
TH
6279 if (!ap)
6280 continue;
6281
6282 if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop)
f0d36efd 6283 ap->ops->port_stop(ap);
f0d36efd
TH
6284 }
6285
ecef7253 6286 if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop)
f0d36efd 6287 host->ops->host_stop(host);
1aa56cca 6288
1aa506e4
TH
6289 for (i = 0; i < host->n_ports; i++) {
6290 struct ata_port *ap = host->ports[i];
6291
4911487a
TH
6292 if (!ap)
6293 continue;
6294
6295 if (ap->scsi_host)
1aa506e4
TH
6296 scsi_host_put(ap->scsi_host);
6297
4911487a 6298 kfree(ap);
1aa506e4
TH
6299 host->ports[i] = NULL;
6300 }
6301
1aa56cca 6302 dev_set_drvdata(gendev, NULL);
f0d36efd
TH
6303}
6304
f3187195
TH
6305/**
6306 * ata_host_alloc - allocate and init basic ATA host resources
6307 * @dev: generic device this host is associated with
6308 * @max_ports: maximum number of ATA ports associated with this host
6309 *
6310 * Allocate and initialize basic ATA host resources. LLD calls
6311 * this function to allocate a host, initializes it fully and
6312 * attaches it using ata_host_register().
6313 *
6314 * @max_ports ports are allocated and host->n_ports is
6315 * initialized to @max_ports. The caller is allowed to decrease
6316 * host->n_ports before calling ata_host_register(). The unused
6317 * ports will be automatically freed on registration.
6318 *
6319 * RETURNS:
6320 * Allocate ATA host on success, NULL on failure.
6321 *
6322 * LOCKING:
6323 * Inherited from calling layer (may sleep).
6324 */
6325struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6326{
6327 struct ata_host *host;
6328 size_t sz;
6329 int i;
6330
6331 DPRINTK("ENTER\n");
6332
6333 if (!devres_open_group(dev, NULL, GFP_KERNEL))
6334 return NULL;
6335
6336 /* alloc a container for our list of ATA ports (buses) */
6337 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6338 /* alloc a container for our list of ATA ports (buses) */
6339 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6340 if (!host)
6341 goto err_out;
6342
6343 devres_add(dev, host);
6344 dev_set_drvdata(dev, host);
6345
6346 spin_lock_init(&host->lock);
6347 host->dev = dev;
6348 host->n_ports = max_ports;
6349
6350 /* allocate ports bound to this host */
6351 for (i = 0; i < max_ports; i++) {
6352 struct ata_port *ap;
6353
6354 ap = ata_port_alloc(host);
6355 if (!ap)
6356 goto err_out;
6357
6358 ap->port_no = i;
6359 host->ports[i] = ap;
6360 }
6361
6362 devres_remove_group(dev, NULL);
6363 return host;
6364
6365 err_out:
6366 devres_release_group(dev, NULL);
6367 return NULL;
6368}
6369
f5cda257
TH
6370/**
6371 * ata_host_alloc_pinfo - alloc host and init with port_info array
6372 * @dev: generic device this host is associated with
6373 * @ppi: array of ATA port_info to initialize host with
6374 * @n_ports: number of ATA ports attached to this host
6375 *
6376 * Allocate ATA host and initialize with info from @ppi. If NULL
6377 * terminated, @ppi may contain fewer entries than @n_ports. The
6378 * last entry will be used for the remaining ports.
6379 *
6380 * RETURNS:
6381 * Allocate ATA host on success, NULL on failure.
6382 *
6383 * LOCKING:
6384 * Inherited from calling layer (may sleep).
6385 */
6386struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6387 const struct ata_port_info * const * ppi,
6388 int n_ports)
6389{
6390 const struct ata_port_info *pi;
6391 struct ata_host *host;
6392 int i, j;
6393
6394 host = ata_host_alloc(dev, n_ports);
6395 if (!host)
6396 return NULL;
6397
6398 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6399 struct ata_port *ap = host->ports[i];
6400
6401 if (ppi[j])
6402 pi = ppi[j++];
6403
6404 ap->pio_mask = pi->pio_mask;
6405 ap->mwdma_mask = pi->mwdma_mask;
6406 ap->udma_mask = pi->udma_mask;
6407 ap->flags |= pi->flags;
0c88758b 6408 ap->link.flags |= pi->link_flags;
f5cda257
TH
6409 ap->ops = pi->port_ops;
6410
6411 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6412 host->ops = pi->port_ops;
6413 if (!host->private_data && pi->private_data)
6414 host->private_data = pi->private_data;
6415 }
6416
6417 return host;
6418}
6419
ecef7253
TH
6420/**
6421 * ata_host_start - start and freeze ports of an ATA host
6422 * @host: ATA host to start ports for
6423 *
6424 * Start and then freeze ports of @host. Started status is
6425 * recorded in host->flags, so this function can be called
6426 * multiple times. Ports are guaranteed to get started only
f3187195
TH
6427 * once. If host->ops isn't initialized yet, its set to the
6428 * first non-dummy port ops.
ecef7253
TH
6429 *
6430 * LOCKING:
6431 * Inherited from calling layer (may sleep).
6432 *
6433 * RETURNS:
6434 * 0 if all ports are started successfully, -errno otherwise.
6435 */
6436int ata_host_start(struct ata_host *host)
6437{
6438 int i, rc;
6439
6440 if (host->flags & ATA_HOST_STARTED)
6441 return 0;
6442
6443 for (i = 0; i < host->n_ports; i++) {
6444 struct ata_port *ap = host->ports[i];
6445
f3187195
TH
6446 if (!host->ops && !ata_port_is_dummy(ap))
6447 host->ops = ap->ops;
6448
ecef7253
TH
6449 if (ap->ops->port_start) {
6450 rc = ap->ops->port_start(ap);
6451 if (rc) {
6452 ata_port_printk(ap, KERN_ERR, "failed to "
6453 "start port (errno=%d)\n", rc);
6454 goto err_out;
6455 }
6456 }
6457
6458 ata_eh_freeze_port(ap);
6459 }
6460
6461 host->flags |= ATA_HOST_STARTED;
6462 return 0;
6463
6464 err_out:
6465 while (--i >= 0) {
6466 struct ata_port *ap = host->ports[i];
6467
6468 if (ap->ops->port_stop)
6469 ap->ops->port_stop(ap);
6470 }
6471 return rc;
6472}
6473
b03732f0 6474/**
cca3974e
JG
6475 * ata_sas_host_init - Initialize a host struct
6476 * @host: host to initialize
6477 * @dev: device host is attached to
6478 * @flags: host flags
6479 * @ops: port_ops
b03732f0
BK
6480 *
6481 * LOCKING:
6482 * PCI/etc. bus probe sem.
6483 *
6484 */
f3187195 6485/* KILLME - the only user left is ipr */
cca3974e
JG
6486void ata_host_init(struct ata_host *host, struct device *dev,
6487 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 6488{
cca3974e
JG
6489 spin_lock_init(&host->lock);
6490 host->dev = dev;
6491 host->flags = flags;
6492 host->ops = ops;
b03732f0
BK
6493}
6494
f3187195
TH
6495/**
6496 * ata_host_register - register initialized ATA host
6497 * @host: ATA host to register
6498 * @sht: template for SCSI host
6499 *
6500 * Register initialized ATA host. @host is allocated using
6501 * ata_host_alloc() and fully initialized by LLD. This function
6502 * starts ports, registers @host with ATA and SCSI layers and
6503 * probe registered devices.
6504 *
6505 * LOCKING:
6506 * Inherited from calling layer (may sleep).
6507 *
6508 * RETURNS:
6509 * 0 on success, -errno otherwise.
6510 */
6511int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6512{
6513 int i, rc;
6514
6515 /* host must have been started */
6516 if (!(host->flags & ATA_HOST_STARTED)) {
6517 dev_printk(KERN_ERR, host->dev,
6518 "BUG: trying to register unstarted host\n");
6519 WARN_ON(1);
6520 return -EINVAL;
6521 }
6522
6523 /* Blow away unused ports. This happens when LLD can't
6524 * determine the exact number of ports to allocate at
6525 * allocation time.
6526 */
6527 for (i = host->n_ports; host->ports[i]; i++)
6528 kfree(host->ports[i]);
6529
6530 /* give ports names and add SCSI hosts */
6531 for (i = 0; i < host->n_ports; i++)
6532 host->ports[i]->print_id = ata_print_id++;
6533
6534 rc = ata_scsi_add_hosts(host, sht);
6535 if (rc)
6536 return rc;
6537
fafbae87
TH
6538 /* associate with ACPI nodes */
6539 ata_acpi_associate(host);
6540
f3187195
TH
6541 /* set cable, sata_spd_limit and report */
6542 for (i = 0; i < host->n_ports; i++) {
6543 struct ata_port *ap = host->ports[i];
f3187195
TH
6544 unsigned long xfer_mask;
6545
6546 /* set SATA cable type if still unset */
6547 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6548 ap->cbl = ATA_CBL_SATA;
6549
6550 /* init sata_spd_limit to the current value */
4fb37a25 6551 sata_link_init_spd(&ap->link);
f3187195 6552
cbcdd875 6553 /* print per-port info to dmesg */
f3187195
TH
6554 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6555 ap->udma_mask);
6556
f3187195 6557 if (!ata_port_is_dummy(ap))
cbcdd875
TH
6558 ata_port_printk(ap, KERN_INFO,
6559 "%cATA max %s %s\n",
a16abc0b 6560 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
f3187195 6561 ata_mode_string(xfer_mask),
cbcdd875 6562 ap->link.eh_info.desc);
f3187195
TH
6563 else
6564 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
6565 }
6566
6567 /* perform each probe synchronously */
6568 DPRINTK("probe begin\n");
6569 for (i = 0; i < host->n_ports; i++) {
6570 struct ata_port *ap = host->ports[i];
6571 int rc;
6572
6573 /* probe */
6574 if (ap->ops->error_handler) {
9af5c9c9 6575 struct ata_eh_info *ehi = &ap->link.eh_info;
f3187195
TH
6576 unsigned long flags;
6577
6578 ata_port_probe(ap);
6579
6580 /* kick EH for boot probing */
6581 spin_lock_irqsave(ap->lock, flags);
6582
f58229f8
TH
6583 ehi->probe_mask =
6584 (1 << ata_link_max_devices(&ap->link)) - 1;
f3187195
TH
6585 ehi->action |= ATA_EH_SOFTRESET;
6586 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6587
f4d6d004 6588 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
f3187195
TH
6589 ap->pflags |= ATA_PFLAG_LOADING;
6590 ata_port_schedule_eh(ap);
6591
6592 spin_unlock_irqrestore(ap->lock, flags);
6593
6594 /* wait for EH to finish */
6595 ata_port_wait_eh(ap);
6596 } else {
6597 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6598 rc = ata_bus_probe(ap);
6599 DPRINTK("ata%u: bus probe end\n", ap->print_id);
6600
6601 if (rc) {
6602 /* FIXME: do something useful here?
6603 * Current libata behavior will
6604 * tear down everything when
6605 * the module is removed
6606 * or the h/w is unplugged.
6607 */
6608 }
6609 }
6610 }
6611
6612 /* probes are done, now scan each port's disk(s) */
6613 DPRINTK("host probe begin\n");
6614 for (i = 0; i < host->n_ports; i++) {
6615 struct ata_port *ap = host->ports[i];
6616
1ae46317 6617 ata_scsi_scan_host(ap, 1);
f3187195
TH
6618 }
6619
6620 return 0;
6621}
6622
f5cda257
TH
6623/**
6624 * ata_host_activate - start host, request IRQ and register it
6625 * @host: target ATA host
6626 * @irq: IRQ to request
6627 * @irq_handler: irq_handler used when requesting IRQ
6628 * @irq_flags: irq_flags used when requesting IRQ
6629 * @sht: scsi_host_template to use when registering the host
6630 *
6631 * After allocating an ATA host and initializing it, most libata
6632 * LLDs perform three steps to activate the host - start host,
6633 * request IRQ and register it. This helper takes necessasry
6634 * arguments and performs the three steps in one go.
6635 *
6636 * LOCKING:
6637 * Inherited from calling layer (may sleep).
6638 *
6639 * RETURNS:
6640 * 0 on success, -errno otherwise.
6641 */
6642int ata_host_activate(struct ata_host *host, int irq,
6643 irq_handler_t irq_handler, unsigned long irq_flags,
6644 struct scsi_host_template *sht)
6645{
cbcdd875 6646 int i, rc;
f5cda257
TH
6647
6648 rc = ata_host_start(host);
6649 if (rc)
6650 return rc;
6651
6652 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6653 dev_driver_string(host->dev), host);
6654 if (rc)
6655 return rc;
6656
cbcdd875
TH
6657 for (i = 0; i < host->n_ports; i++)
6658 ata_port_desc(host->ports[i], "irq %d", irq);
4031826b 6659
f5cda257
TH
6660 rc = ata_host_register(host, sht);
6661 /* if failed, just free the IRQ and leave ports alone */
6662 if (rc)
6663 devm_free_irq(host->dev, irq, host);
6664
6665 return rc;
6666}
6667
720ba126
TH
6668/**
6669 * ata_port_detach - Detach ATA port in prepration of device removal
6670 * @ap: ATA port to be detached
6671 *
6672 * Detach all ATA devices and the associated SCSI devices of @ap;
6673 * then, remove the associated SCSI host. @ap is guaranteed to
6674 * be quiescent on return from this function.
6675 *
6676 * LOCKING:
6677 * Kernel thread context (may sleep).
6678 */
6679void ata_port_detach(struct ata_port *ap)
6680{
6681 unsigned long flags;
41bda9c9 6682 struct ata_link *link;
f58229f8 6683 struct ata_device *dev;
720ba126
TH
6684
6685 if (!ap->ops->error_handler)
c3cf30a9 6686 goto skip_eh;
720ba126
TH
6687
6688 /* tell EH we're leaving & flush EH */
ba6a1308 6689 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 6690 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 6691 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6692
6693 ata_port_wait_eh(ap);
6694
6695 /* EH is now guaranteed to see UNLOADING, so no new device
6696 * will be attached. Disable all existing devices.
6697 */
ba6a1308 6698 spin_lock_irqsave(ap->lock, flags);
720ba126 6699
41bda9c9
TH
6700 ata_port_for_each_link(link, ap) {
6701 ata_link_for_each_dev(dev, link)
6702 ata_dev_disable(dev);
6703 }
720ba126 6704
ba6a1308 6705 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6706
6707 /* Final freeze & EH. All in-flight commands are aborted. EH
6708 * will be skipped and retrials will be terminated with bad
6709 * target.
6710 */
ba6a1308 6711 spin_lock_irqsave(ap->lock, flags);
720ba126 6712 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 6713 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6714
6715 ata_port_wait_eh(ap);
45a66c1c 6716 cancel_rearming_delayed_work(&ap->hotplug_task);
720ba126 6717
c3cf30a9 6718 skip_eh:
720ba126 6719 /* remove the associated SCSI host */
cca3974e 6720 scsi_remove_host(ap->scsi_host);
720ba126
TH
6721}
6722
0529c159
TH
6723/**
6724 * ata_host_detach - Detach all ports of an ATA host
6725 * @host: Host to detach
6726 *
6727 * Detach all ports of @host.
6728 *
6729 * LOCKING:
6730 * Kernel thread context (may sleep).
6731 */
6732void ata_host_detach(struct ata_host *host)
6733{
6734 int i;
6735
6736 for (i = 0; i < host->n_ports; i++)
6737 ata_port_detach(host->ports[i]);
6738}
6739
1da177e4
LT
6740/**
6741 * ata_std_ports - initialize ioaddr with standard port offsets.
6742 * @ioaddr: IO address structure to be initialized
0baab86b
EF
6743 *
6744 * Utility function which initializes data_addr, error_addr,
6745 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
6746 * device_addr, status_addr, and command_addr to standard offsets
6747 * relative to cmd_addr.
6748 *
6749 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 6750 */
0baab86b 6751
1da177e4
LT
6752void ata_std_ports(struct ata_ioports *ioaddr)
6753{
6754 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
6755 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
6756 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
6757 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
6758 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
6759 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
6760 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
6761 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
6762 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
6763 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
6764}
6765
0baab86b 6766
374b1873
JG
6767#ifdef CONFIG_PCI
6768
1da177e4
LT
6769/**
6770 * ata_pci_remove_one - PCI layer callback for device removal
6771 * @pdev: PCI device that was removed
6772 *
b878ca5d
TH
6773 * PCI layer indicates to libata via this hook that hot-unplug or
6774 * module unload event has occurred. Detach all ports. Resource
6775 * release is handled via devres.
1da177e4
LT
6776 *
6777 * LOCKING:
6778 * Inherited from PCI layer (may sleep).
6779 */
f0d36efd 6780void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4
LT
6781{
6782 struct device *dev = pci_dev_to_dev(pdev);
cca3974e 6783 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 6784
b878ca5d 6785 ata_host_detach(host);
1da177e4
LT
6786}
6787
6788/* move to PCI subsystem */
057ace5e 6789int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
6790{
6791 unsigned long tmp = 0;
6792
6793 switch (bits->width) {
6794 case 1: {
6795 u8 tmp8 = 0;
6796 pci_read_config_byte(pdev, bits->reg, &tmp8);
6797 tmp = tmp8;
6798 break;
6799 }
6800 case 2: {
6801 u16 tmp16 = 0;
6802 pci_read_config_word(pdev, bits->reg, &tmp16);
6803 tmp = tmp16;
6804 break;
6805 }
6806 case 4: {
6807 u32 tmp32 = 0;
6808 pci_read_config_dword(pdev, bits->reg, &tmp32);
6809 tmp = tmp32;
6810 break;
6811 }
6812
6813 default:
6814 return -EINVAL;
6815 }
6816
6817 tmp &= bits->mask;
6818
6819 return (tmp == bits->val) ? 1 : 0;
6820}
9b847548 6821
6ffa01d8 6822#ifdef CONFIG_PM
3c5100c1 6823void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
6824{
6825 pci_save_state(pdev);
4c90d971 6826 pci_disable_device(pdev);
500530f6 6827
4c90d971 6828 if (mesg.event == PM_EVENT_SUSPEND)
500530f6 6829 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
6830}
6831
553c4aa6 6832int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 6833{
553c4aa6
TH
6834 int rc;
6835
9b847548
JA
6836 pci_set_power_state(pdev, PCI_D0);
6837 pci_restore_state(pdev);
553c4aa6 6838
b878ca5d 6839 rc = pcim_enable_device(pdev);
553c4aa6
TH
6840 if (rc) {
6841 dev_printk(KERN_ERR, &pdev->dev,
6842 "failed to enable device after resume (%d)\n", rc);
6843 return rc;
6844 }
6845
9b847548 6846 pci_set_master(pdev);
553c4aa6 6847 return 0;
500530f6
TH
6848}
6849
3c5100c1 6850int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 6851{
cca3974e 6852 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
6853 int rc = 0;
6854
cca3974e 6855 rc = ata_host_suspend(host, mesg);
500530f6
TH
6856 if (rc)
6857 return rc;
6858
3c5100c1 6859 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
6860
6861 return 0;
6862}
6863
6864int ata_pci_device_resume(struct pci_dev *pdev)
6865{
cca3974e 6866 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 6867 int rc;
500530f6 6868
553c4aa6
TH
6869 rc = ata_pci_device_do_resume(pdev);
6870 if (rc == 0)
6871 ata_host_resume(host);
6872 return rc;
9b847548 6873}
6ffa01d8
TH
6874#endif /* CONFIG_PM */
6875
1da177e4
LT
6876#endif /* CONFIG_PCI */
6877
6878
1da177e4
LT
6879static int __init ata_init(void)
6880{
a8601e5f 6881 ata_probe_timeout *= HZ;
1da177e4
LT
6882 ata_wq = create_workqueue("ata");
6883 if (!ata_wq)
6884 return -ENOMEM;
6885
453b07ac
TH
6886 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6887 if (!ata_aux_wq) {
6888 destroy_workqueue(ata_wq);
6889 return -ENOMEM;
6890 }
6891
1da177e4
LT
6892 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6893 return 0;
6894}
6895
6896static void __exit ata_exit(void)
6897{
6898 destroy_workqueue(ata_wq);
453b07ac 6899 destroy_workqueue(ata_aux_wq);
1da177e4
LT
6900}
6901
a4625085 6902subsys_initcall(ata_init);
1da177e4
LT
6903module_exit(ata_exit);
6904
67846b30 6905static unsigned long ratelimit_time;
34af946a 6906static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
6907
6908int ata_ratelimit(void)
6909{
6910 int rc;
6911 unsigned long flags;
6912
6913 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6914
6915 if (time_after(jiffies, ratelimit_time)) {
6916 rc = 1;
6917 ratelimit_time = jiffies + (HZ/5);
6918 } else
6919 rc = 0;
6920
6921 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6922
6923 return rc;
6924}
6925
c22daff4
TH
6926/**
6927 * ata_wait_register - wait until register value changes
6928 * @reg: IO-mapped register
6929 * @mask: Mask to apply to read register value
6930 * @val: Wait condition
6931 * @interval_msec: polling interval in milliseconds
6932 * @timeout_msec: timeout in milliseconds
6933 *
6934 * Waiting for some bits of register to change is a common
6935 * operation for ATA controllers. This function reads 32bit LE
6936 * IO-mapped register @reg and tests for the following condition.
6937 *
6938 * (*@reg & mask) != val
6939 *
6940 * If the condition is met, it returns; otherwise, the process is
6941 * repeated after @interval_msec until timeout.
6942 *
6943 * LOCKING:
6944 * Kernel thread context (may sleep)
6945 *
6946 * RETURNS:
6947 * The final register value.
6948 */
6949u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6950 unsigned long interval_msec,
6951 unsigned long timeout_msec)
6952{
6953 unsigned long timeout;
6954 u32 tmp;
6955
6956 tmp = ioread32(reg);
6957
6958 /* Calculate timeout _after_ the first read to make sure
6959 * preceding writes reach the controller before starting to
6960 * eat away the timeout.
6961 */
6962 timeout = jiffies + (timeout_msec * HZ) / 1000;
6963
6964 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6965 msleep(interval_msec);
6966 tmp = ioread32(reg);
6967 }
6968
6969 return tmp;
6970}
6971
dd5b06c4
TH
6972/*
6973 * Dummy port_ops
6974 */
6975static void ata_dummy_noret(struct ata_port *ap) { }
6976static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
6977static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6978
6979static u8 ata_dummy_check_status(struct ata_port *ap)
6980{
6981 return ATA_DRDY;
6982}
6983
6984static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6985{
6986 return AC_ERR_SYSTEM;
6987}
6988
6989const struct ata_port_operations ata_dummy_port_ops = {
dd5b06c4
TH
6990 .check_status = ata_dummy_check_status,
6991 .check_altstatus = ata_dummy_check_status,
6992 .dev_select = ata_noop_dev_select,
6993 .qc_prep = ata_noop_qc_prep,
6994 .qc_issue = ata_dummy_qc_issue,
6995 .freeze = ata_dummy_noret,
6996 .thaw = ata_dummy_noret,
6997 .error_handler = ata_dummy_noret,
6998 .post_internal_cmd = ata_dummy_qc_noret,
6999 .irq_clear = ata_dummy_noret,
7000 .port_start = ata_dummy_ret0,
7001 .port_stop = ata_dummy_noret,
7002};
7003
21b0ad4f
TH
7004const struct ata_port_info ata_dummy_port_info = {
7005 .port_ops = &ata_dummy_port_ops,
7006};
7007
1da177e4
LT
7008/*
7009 * libata is essentially a library of internal helper functions for
7010 * low-level ATA host controller drivers. As such, the API/ABI is
7011 * likely to change as new drivers are added and updated.
7012 * Do not depend on ABI/API stability.
7013 */
7014
e9c83914
TH
7015EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7016EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7017EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 7018EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
21b0ad4f 7019EXPORT_SYMBOL_GPL(ata_dummy_port_info);
1da177e4
LT
7020EXPORT_SYMBOL_GPL(ata_std_bios_param);
7021EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 7022EXPORT_SYMBOL_GPL(ata_host_init);
f3187195 7023EXPORT_SYMBOL_GPL(ata_host_alloc);
f5cda257 7024EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
ecef7253 7025EXPORT_SYMBOL_GPL(ata_host_start);
f3187195 7026EXPORT_SYMBOL_GPL(ata_host_register);
f5cda257 7027EXPORT_SYMBOL_GPL(ata_host_activate);
0529c159 7028EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4
LT
7029EXPORT_SYMBOL_GPL(ata_sg_init);
7030EXPORT_SYMBOL_GPL(ata_sg_init_one);
9a1004d0 7031EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 7032EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 7033EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 7034EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
7035EXPORT_SYMBOL_GPL(ata_tf_load);
7036EXPORT_SYMBOL_GPL(ata_tf_read);
7037EXPORT_SYMBOL_GPL(ata_noop_dev_select);
7038EXPORT_SYMBOL_GPL(ata_std_dev_select);
43727fbc 7039EXPORT_SYMBOL_GPL(sata_print_link_status);
1da177e4
LT
7040EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7041EXPORT_SYMBOL_GPL(ata_tf_from_fis);
7042EXPORT_SYMBOL_GPL(ata_check_status);
7043EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
7044EXPORT_SYMBOL_GPL(ata_exec_command);
7045EXPORT_SYMBOL_GPL(ata_port_start);
d92e74d3 7046EXPORT_SYMBOL_GPL(ata_sff_port_start);
1da177e4 7047EXPORT_SYMBOL_GPL(ata_interrupt);
04351821 7048EXPORT_SYMBOL_GPL(ata_do_set_mode);
0d5ff566
TH
7049EXPORT_SYMBOL_GPL(ata_data_xfer);
7050EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
1da177e4 7051EXPORT_SYMBOL_GPL(ata_qc_prep);
d26fc955 7052EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
e46834cd 7053EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
7054EXPORT_SYMBOL_GPL(ata_bmdma_setup);
7055EXPORT_SYMBOL_GPL(ata_bmdma_start);
7056EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
7057EXPORT_SYMBOL_GPL(ata_bmdma_status);
7058EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
7059EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
7060EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
7061EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
7062EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
7063EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 7064EXPORT_SYMBOL_GPL(ata_port_probe);
10305f0f 7065EXPORT_SYMBOL_GPL(ata_dev_disable);
3c567b7d 7066EXPORT_SYMBOL_GPL(sata_set_spd);
936fd732
TH
7067EXPORT_SYMBOL_GPL(sata_link_debounce);
7068EXPORT_SYMBOL_GPL(sata_link_resume);
1da177e4
LT
7069EXPORT_SYMBOL_GPL(sata_phy_reset);
7070EXPORT_SYMBOL_GPL(__sata_phy_reset);
7071EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 7072EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 7073EXPORT_SYMBOL_GPL(ata_std_softreset);
cc0680a5 7074EXPORT_SYMBOL_GPL(sata_link_hardreset);
c2bd5804
TH
7075EXPORT_SYMBOL_GPL(sata_std_hardreset);
7076EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
7077EXPORT_SYMBOL_GPL(ata_dev_classify);
7078EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 7079EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 7080EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 7081EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 7082EXPORT_SYMBOL_GPL(ata_busy_sleep);
d4b2bab4 7083EXPORT_SYMBOL_GPL(ata_wait_ready);
86e45b6b 7084EXPORT_SYMBOL_GPL(ata_port_queue_task);
1da177e4
LT
7085EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
7086EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 7087EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 7088EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 7089EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4 7090EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
7091EXPORT_SYMBOL_GPL(sata_scr_valid);
7092EXPORT_SYMBOL_GPL(sata_scr_read);
7093EXPORT_SYMBOL_GPL(sata_scr_write);
7094EXPORT_SYMBOL_GPL(sata_scr_write_flush);
936fd732
TH
7095EXPORT_SYMBOL_GPL(ata_link_online);
7096EXPORT_SYMBOL_GPL(ata_link_offline);
6ffa01d8 7097#ifdef CONFIG_PM
cca3974e
JG
7098EXPORT_SYMBOL_GPL(ata_host_suspend);
7099EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 7100#endif /* CONFIG_PM */
6a62a04d
TH
7101EXPORT_SYMBOL_GPL(ata_id_string);
7102EXPORT_SYMBOL_GPL(ata_id_c_string);
10305f0f 7103EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
1da177e4
LT
7104EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7105
1bc4ccff 7106EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
452503f9
AC
7107EXPORT_SYMBOL_GPL(ata_timing_compute);
7108EXPORT_SYMBOL_GPL(ata_timing_merge);
7109
1da177e4
LT
7110#ifdef CONFIG_PCI
7111EXPORT_SYMBOL_GPL(pci_test_config_bits);
d583bc18 7112EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
1626aeb8 7113EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
d583bc18 7114EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
1da177e4
LT
7115EXPORT_SYMBOL_GPL(ata_pci_init_one);
7116EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6ffa01d8 7117#ifdef CONFIG_PM
500530f6
TH
7118EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7119EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
7120EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7121EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 7122#endif /* CONFIG_PM */
67951ade
AC
7123EXPORT_SYMBOL_GPL(ata_pci_default_filter);
7124EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 7125#endif /* CONFIG_PCI */
9b847548 7126
b64bbc39
TH
7127EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7128EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7129EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
cbcdd875
TH
7130EXPORT_SYMBOL_GPL(ata_port_desc);
7131#ifdef CONFIG_PCI
7132EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7133#endif /* CONFIG_PCI */
ece1d636 7134EXPORT_SYMBOL_GPL(ata_eng_timeout);
7b70fc03 7135EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
dbd82616 7136EXPORT_SYMBOL_GPL(ata_link_abort);
7b70fc03 7137EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499
TH
7138EXPORT_SYMBOL_GPL(ata_port_freeze);
7139EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7140EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
7141EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7142EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 7143EXPORT_SYMBOL_GPL(ata_do_eh);
83625006 7144EXPORT_SYMBOL_GPL(ata_irq_on);
a619f981 7145EXPORT_SYMBOL_GPL(ata_dev_try_classify);
be0d18df
AC
7146
7147EXPORT_SYMBOL_GPL(ata_cable_40wire);
7148EXPORT_SYMBOL_GPL(ata_cable_80wire);
7149EXPORT_SYMBOL_GPL(ata_cable_unknown);
7150EXPORT_SYMBOL_GPL(ata_cable_sata);