]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/ata/libata-core.c
libata: ignore EH scheduling during initialization
[net-next-2.6.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
1da177e4
LT
33 */
34
1da177e4
LT
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/mm.h>
41#include <linux/highmem.h>
42#include <linux/spinlock.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/timer.h>
46#include <linux/interrupt.h>
47#include <linux/completion.h>
48#include <linux/suspend.h>
49#include <linux/workqueue.h>
67846b30 50#include <linux/jiffies.h>
378f058c 51#include <linux/scatterlist.h>
1da177e4 52#include <scsi/scsi.h>
193515d5 53#include <scsi/scsi_cmnd.h>
1da177e4
LT
54#include <scsi/scsi_host.h>
55#include <linux/libata.h>
56#include <asm/io.h>
57#include <asm/semaphore.h>
58#include <asm/byteorder.h>
59
60#include "libata.h"
61
cb48cab7 62#define DRV_VERSION "2.20" /* must be exactly four chars */
fda0efc5
JG
63
64
d7bb4cc7 65/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
66const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
67const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
68const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 69
3373efd8
TH
70static unsigned int ata_dev_init_params(struct ata_device *dev,
71 u16 heads, u16 sectors);
72static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
73static void ata_dev_xfermask(struct ata_device *dev);
1da177e4 74
f3187195 75unsigned int ata_print_id = 1;
1da177e4
LT
76static struct workqueue_struct *ata_wq;
77
453b07ac
TH
78struct workqueue_struct *ata_aux_wq;
79
418dc1f5 80int atapi_enabled = 1;
1623c81e
JG
81module_param(atapi_enabled, int, 0444);
82MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
83
95de719a
AL
84int atapi_dmadir = 0;
85module_param(atapi_dmadir, int, 0444);
86MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
87
c3c013a2
JG
88int libata_fua = 0;
89module_param_named(fua, libata_fua, int, 0444);
90MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
91
1e999736
AC
92static int ata_ignore_hpa = 0;
93module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
94MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
95
a8601e5f
AM
96static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
97module_param(ata_probe_timeout, int, 0444);
98MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
99
d7d0dad6
JG
100int libata_noacpi = 1;
101module_param_named(noacpi, libata_noacpi, int, 0444);
11ef697b
KCA
102MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
103
920a4b10
TH
104int ata_spindown_compat = 1;
105module_param_named(spindown_compat, ata_spindown_compat, int, 0644);
106MODULE_PARM_DESC(spindown_compat, "Enable backward compatible spindown "
107 "behavior. Will be removed. More info can be found in "
108 "Documentation/feature-removal-schedule.txt\n");
109
1da177e4
LT
110MODULE_AUTHOR("Jeff Garzik");
111MODULE_DESCRIPTION("Library module for ATA devices");
112MODULE_LICENSE("GPL");
113MODULE_VERSION(DRV_VERSION);
114
0baab86b 115
1da177e4
LT
116/**
117 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
118 * @tf: Taskfile to convert
119 * @fis: Buffer into which data will output
120 * @pmp: Port multiplier port
121 *
122 * Converts a standard ATA taskfile to a Serial ATA
123 * FIS structure (Register - Host to Device).
124 *
125 * LOCKING:
126 * Inherited from caller.
127 */
128
057ace5e 129void ata_tf_to_fis(const struct ata_taskfile *tf, u8 *fis, u8 pmp)
1da177e4
LT
130{
131 fis[0] = 0x27; /* Register - Host to Device FIS */
132 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
133 bit 7 indicates Command FIS */
134 fis[2] = tf->command;
135 fis[3] = tf->feature;
136
137 fis[4] = tf->lbal;
138 fis[5] = tf->lbam;
139 fis[6] = tf->lbah;
140 fis[7] = tf->device;
141
142 fis[8] = tf->hob_lbal;
143 fis[9] = tf->hob_lbam;
144 fis[10] = tf->hob_lbah;
145 fis[11] = tf->hob_feature;
146
147 fis[12] = tf->nsect;
148 fis[13] = tf->hob_nsect;
149 fis[14] = 0;
150 fis[15] = tf->ctl;
151
152 fis[16] = 0;
153 fis[17] = 0;
154 fis[18] = 0;
155 fis[19] = 0;
156}
157
158/**
159 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
160 * @fis: Buffer from which data will be input
161 * @tf: Taskfile to output
162 *
e12a1be6 163 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
164 *
165 * LOCKING:
166 * Inherited from caller.
167 */
168
057ace5e 169void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
170{
171 tf->command = fis[2]; /* status */
172 tf->feature = fis[3]; /* error */
173
174 tf->lbal = fis[4];
175 tf->lbam = fis[5];
176 tf->lbah = fis[6];
177 tf->device = fis[7];
178
179 tf->hob_lbal = fis[8];
180 tf->hob_lbam = fis[9];
181 tf->hob_lbah = fis[10];
182
183 tf->nsect = fis[12];
184 tf->hob_nsect = fis[13];
185}
186
8cbd6df1
AL
187static const u8 ata_rw_cmds[] = {
188 /* pio multi */
189 ATA_CMD_READ_MULTI,
190 ATA_CMD_WRITE_MULTI,
191 ATA_CMD_READ_MULTI_EXT,
192 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
193 0,
194 0,
195 0,
196 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
197 /* pio */
198 ATA_CMD_PIO_READ,
199 ATA_CMD_PIO_WRITE,
200 ATA_CMD_PIO_READ_EXT,
201 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
202 0,
203 0,
204 0,
205 0,
8cbd6df1
AL
206 /* dma */
207 ATA_CMD_READ,
208 ATA_CMD_WRITE,
209 ATA_CMD_READ_EXT,
9a3dccc4
TH
210 ATA_CMD_WRITE_EXT,
211 0,
212 0,
213 0,
214 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 215};
1da177e4
LT
216
217/**
8cbd6df1 218 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
219 * @tf: command to examine and configure
220 * @dev: device tf belongs to
1da177e4 221 *
2e9edbf8 222 * Examine the device configuration and tf->flags to calculate
8cbd6df1 223 * the proper read/write commands and protocol to use.
1da177e4
LT
224 *
225 * LOCKING:
226 * caller.
227 */
bd056d7e 228static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 229{
9a3dccc4 230 u8 cmd;
1da177e4 231
9a3dccc4 232 int index, fua, lba48, write;
2e9edbf8 233
9a3dccc4 234 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
235 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
236 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 237
8cbd6df1
AL
238 if (dev->flags & ATA_DFLAG_PIO) {
239 tf->protocol = ATA_PROT_PIO;
9a3dccc4 240 index = dev->multi_count ? 0 : 8;
bd056d7e 241 } else if (lba48 && (dev->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
242 /* Unable to use DMA due to host limitation */
243 tf->protocol = ATA_PROT_PIO;
0565c26d 244 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
245 } else {
246 tf->protocol = ATA_PROT_DMA;
9a3dccc4 247 index = 16;
8cbd6df1 248 }
1da177e4 249
9a3dccc4
TH
250 cmd = ata_rw_cmds[index + fua + lba48 + write];
251 if (cmd) {
252 tf->command = cmd;
253 return 0;
254 }
255 return -1;
1da177e4
LT
256}
257
35b649fe
TH
258/**
259 * ata_tf_read_block - Read block address from ATA taskfile
260 * @tf: ATA taskfile of interest
261 * @dev: ATA device @tf belongs to
262 *
263 * LOCKING:
264 * None.
265 *
266 * Read block address from @tf. This function can handle all
267 * three address formats - LBA, LBA48 and CHS. tf->protocol and
268 * flags select the address format to use.
269 *
270 * RETURNS:
271 * Block address read from @tf.
272 */
273u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
274{
275 u64 block = 0;
276
277 if (tf->flags & ATA_TFLAG_LBA) {
278 if (tf->flags & ATA_TFLAG_LBA48) {
279 block |= (u64)tf->hob_lbah << 40;
280 block |= (u64)tf->hob_lbam << 32;
281 block |= tf->hob_lbal << 24;
282 } else
283 block |= (tf->device & 0xf) << 24;
284
285 block |= tf->lbah << 16;
286 block |= tf->lbam << 8;
287 block |= tf->lbal;
288 } else {
289 u32 cyl, head, sect;
290
291 cyl = tf->lbam | (tf->lbah << 8);
292 head = tf->device & 0xf;
293 sect = tf->lbal;
294
295 block = (cyl * dev->heads + head) * dev->sectors + sect;
296 }
297
298 return block;
299}
300
bd056d7e
TH
301/**
302 * ata_build_rw_tf - Build ATA taskfile for given read/write request
303 * @tf: Target ATA taskfile
304 * @dev: ATA device @tf belongs to
305 * @block: Block address
306 * @n_block: Number of blocks
307 * @tf_flags: RW/FUA etc...
308 * @tag: tag
309 *
310 * LOCKING:
311 * None.
312 *
313 * Build ATA taskfile @tf for read/write request described by
314 * @block, @n_block, @tf_flags and @tag on @dev.
315 *
316 * RETURNS:
317 *
318 * 0 on success, -ERANGE if the request is too large for @dev,
319 * -EINVAL if the request is invalid.
320 */
321int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
322 u64 block, u32 n_block, unsigned int tf_flags,
323 unsigned int tag)
324{
325 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
326 tf->flags |= tf_flags;
327
6d1245bf 328 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
329 /* yay, NCQ */
330 if (!lba_48_ok(block, n_block))
331 return -ERANGE;
332
333 tf->protocol = ATA_PROT_NCQ;
334 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
335
336 if (tf->flags & ATA_TFLAG_WRITE)
337 tf->command = ATA_CMD_FPDMA_WRITE;
338 else
339 tf->command = ATA_CMD_FPDMA_READ;
340
341 tf->nsect = tag << 3;
342 tf->hob_feature = (n_block >> 8) & 0xff;
343 tf->feature = n_block & 0xff;
344
345 tf->hob_lbah = (block >> 40) & 0xff;
346 tf->hob_lbam = (block >> 32) & 0xff;
347 tf->hob_lbal = (block >> 24) & 0xff;
348 tf->lbah = (block >> 16) & 0xff;
349 tf->lbam = (block >> 8) & 0xff;
350 tf->lbal = block & 0xff;
351
352 tf->device = 1 << 6;
353 if (tf->flags & ATA_TFLAG_FUA)
354 tf->device |= 1 << 7;
355 } else if (dev->flags & ATA_DFLAG_LBA) {
356 tf->flags |= ATA_TFLAG_LBA;
357
358 if (lba_28_ok(block, n_block)) {
359 /* use LBA28 */
360 tf->device |= (block >> 24) & 0xf;
361 } else if (lba_48_ok(block, n_block)) {
362 if (!(dev->flags & ATA_DFLAG_LBA48))
363 return -ERANGE;
364
365 /* use LBA48 */
366 tf->flags |= ATA_TFLAG_LBA48;
367
368 tf->hob_nsect = (n_block >> 8) & 0xff;
369
370 tf->hob_lbah = (block >> 40) & 0xff;
371 tf->hob_lbam = (block >> 32) & 0xff;
372 tf->hob_lbal = (block >> 24) & 0xff;
373 } else
374 /* request too large even for LBA48 */
375 return -ERANGE;
376
377 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
378 return -EINVAL;
379
380 tf->nsect = n_block & 0xff;
381
382 tf->lbah = (block >> 16) & 0xff;
383 tf->lbam = (block >> 8) & 0xff;
384 tf->lbal = block & 0xff;
385
386 tf->device |= ATA_LBA;
387 } else {
388 /* CHS */
389 u32 sect, head, cyl, track;
390
391 /* The request -may- be too large for CHS addressing. */
392 if (!lba_28_ok(block, n_block))
393 return -ERANGE;
394
395 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
396 return -EINVAL;
397
398 /* Convert LBA to CHS */
399 track = (u32)block / dev->sectors;
400 cyl = track / dev->heads;
401 head = track % dev->heads;
402 sect = (u32)block % dev->sectors + 1;
403
404 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
405 (u32)block, track, cyl, head, sect);
406
407 /* Check whether the converted CHS can fit.
408 Cylinder: 0-65535
409 Head: 0-15
410 Sector: 1-255*/
411 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
412 return -ERANGE;
413
414 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
415 tf->lbal = sect;
416 tf->lbam = cyl;
417 tf->lbah = cyl >> 8;
418 tf->device |= head;
419 }
420
421 return 0;
422}
423
cb95d562
TH
424/**
425 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
426 * @pio_mask: pio_mask
427 * @mwdma_mask: mwdma_mask
428 * @udma_mask: udma_mask
429 *
430 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
431 * unsigned int xfer_mask.
432 *
433 * LOCKING:
434 * None.
435 *
436 * RETURNS:
437 * Packed xfer_mask.
438 */
439static unsigned int ata_pack_xfermask(unsigned int pio_mask,
440 unsigned int mwdma_mask,
441 unsigned int udma_mask)
442{
443 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
444 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
445 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
446}
447
c0489e4e
TH
448/**
449 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
450 * @xfer_mask: xfer_mask to unpack
451 * @pio_mask: resulting pio_mask
452 * @mwdma_mask: resulting mwdma_mask
453 * @udma_mask: resulting udma_mask
454 *
455 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
456 * Any NULL distination masks will be ignored.
457 */
458static void ata_unpack_xfermask(unsigned int xfer_mask,
459 unsigned int *pio_mask,
460 unsigned int *mwdma_mask,
461 unsigned int *udma_mask)
462{
463 if (pio_mask)
464 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
465 if (mwdma_mask)
466 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
467 if (udma_mask)
468 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
469}
470
cb95d562 471static const struct ata_xfer_ent {
be9a50c8 472 int shift, bits;
cb95d562
TH
473 u8 base;
474} ata_xfer_tbl[] = {
475 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
476 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
477 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
478 { -1, },
479};
480
481/**
482 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
483 * @xfer_mask: xfer_mask of interest
484 *
485 * Return matching XFER_* value for @xfer_mask. Only the highest
486 * bit of @xfer_mask is considered.
487 *
488 * LOCKING:
489 * None.
490 *
491 * RETURNS:
492 * Matching XFER_* value, 0 if no match found.
493 */
494static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
495{
496 int highbit = fls(xfer_mask) - 1;
497 const struct ata_xfer_ent *ent;
498
499 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
500 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
501 return ent->base + highbit - ent->shift;
502 return 0;
503}
504
505/**
506 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
507 * @xfer_mode: XFER_* of interest
508 *
509 * Return matching xfer_mask for @xfer_mode.
510 *
511 * LOCKING:
512 * None.
513 *
514 * RETURNS:
515 * Matching xfer_mask, 0 if no match found.
516 */
517static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
518{
519 const struct ata_xfer_ent *ent;
520
521 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
522 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
523 return 1 << (ent->shift + xfer_mode - ent->base);
524 return 0;
525}
526
527/**
528 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
529 * @xfer_mode: XFER_* of interest
530 *
531 * Return matching xfer_shift for @xfer_mode.
532 *
533 * LOCKING:
534 * None.
535 *
536 * RETURNS:
537 * Matching xfer_shift, -1 if no match found.
538 */
539static int ata_xfer_mode2shift(unsigned int xfer_mode)
540{
541 const struct ata_xfer_ent *ent;
542
543 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
544 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
545 return ent->shift;
546 return -1;
547}
548
1da177e4 549/**
1da7b0d0
TH
550 * ata_mode_string - convert xfer_mask to string
551 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
552 *
553 * Determine string which represents the highest speed
1da7b0d0 554 * (highest bit in @modemask).
1da177e4
LT
555 *
556 * LOCKING:
557 * None.
558 *
559 * RETURNS:
560 * Constant C string representing highest speed listed in
1da7b0d0 561 * @mode_mask, or the constant C string "<n/a>".
1da177e4 562 */
1da7b0d0 563static const char *ata_mode_string(unsigned int xfer_mask)
1da177e4 564{
75f554bc
TH
565 static const char * const xfer_mode_str[] = {
566 "PIO0",
567 "PIO1",
568 "PIO2",
569 "PIO3",
570 "PIO4",
b352e57d
AC
571 "PIO5",
572 "PIO6",
75f554bc
TH
573 "MWDMA0",
574 "MWDMA1",
575 "MWDMA2",
b352e57d
AC
576 "MWDMA3",
577 "MWDMA4",
75f554bc
TH
578 "UDMA/16",
579 "UDMA/25",
580 "UDMA/33",
581 "UDMA/44",
582 "UDMA/66",
583 "UDMA/100",
584 "UDMA/133",
585 "UDMA7",
586 };
1da7b0d0 587 int highbit;
1da177e4 588
1da7b0d0
TH
589 highbit = fls(xfer_mask) - 1;
590 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
591 return xfer_mode_str[highbit];
1da177e4 592 return "<n/a>";
1da177e4
LT
593}
594
4c360c81
TH
595static const char *sata_spd_string(unsigned int spd)
596{
597 static const char * const spd_str[] = {
598 "1.5 Gbps",
599 "3.0 Gbps",
600 };
601
602 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
603 return "<unknown>";
604 return spd_str[spd - 1];
605}
606
3373efd8 607void ata_dev_disable(struct ata_device *dev)
0b8efb0a 608{
0dd4b21f 609 if (ata_dev_enabled(dev) && ata_msg_drv(dev->ap)) {
f15a1daf 610 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
4ae72a1e
TH
611 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
612 ATA_DNXFER_QUIET);
0b8efb0a
TH
613 dev->class++;
614 }
615}
616
1da177e4 617/**
0d5ff566 618 * ata_devchk - PATA device presence detection
1da177e4
LT
619 * @ap: ATA channel to examine
620 * @device: Device to examine (starting at zero)
621 *
622 * This technique was originally described in
623 * Hale Landis's ATADRVR (www.ata-atapi.com), and
624 * later found its way into the ATA/ATAPI spec.
625 *
626 * Write a pattern to the ATA shadow registers,
627 * and if a device is present, it will respond by
628 * correctly storing and echoing back the
629 * ATA shadow register contents.
630 *
631 * LOCKING:
632 * caller.
633 */
634
0d5ff566 635static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1da177e4
LT
636{
637 struct ata_ioports *ioaddr = &ap->ioaddr;
638 u8 nsect, lbal;
639
640 ap->ops->dev_select(ap, device);
641
0d5ff566
TH
642 iowrite8(0x55, ioaddr->nsect_addr);
643 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 644
0d5ff566
TH
645 iowrite8(0xaa, ioaddr->nsect_addr);
646 iowrite8(0x55, ioaddr->lbal_addr);
1da177e4 647
0d5ff566
TH
648 iowrite8(0x55, ioaddr->nsect_addr);
649 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 650
0d5ff566
TH
651 nsect = ioread8(ioaddr->nsect_addr);
652 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
653
654 if ((nsect == 0x55) && (lbal == 0xaa))
655 return 1; /* we found a device */
656
657 return 0; /* nothing found */
658}
659
1da177e4
LT
660/**
661 * ata_dev_classify - determine device type based on ATA-spec signature
662 * @tf: ATA taskfile register set for device to be identified
663 *
664 * Determine from taskfile register contents whether a device is
665 * ATA or ATAPI, as per "Signature and persistence" section
666 * of ATA/PI spec (volume 1, sect 5.14).
667 *
668 * LOCKING:
669 * None.
670 *
671 * RETURNS:
672 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
673 * the event of failure.
674 */
675
057ace5e 676unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
677{
678 /* Apple's open source Darwin code hints that some devices only
679 * put a proper signature into the LBA mid/high registers,
680 * So, we only check those. It's sufficient for uniqueness.
681 */
682
683 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
684 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
685 DPRINTK("found ATA device by sig\n");
686 return ATA_DEV_ATA;
687 }
688
689 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
690 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
691 DPRINTK("found ATAPI device by sig\n");
692 return ATA_DEV_ATAPI;
693 }
694
695 DPRINTK("unknown device\n");
696 return ATA_DEV_UNKNOWN;
697}
698
699/**
700 * ata_dev_try_classify - Parse returned ATA device signature
701 * @ap: ATA channel to examine
702 * @device: Device to examine (starting at zero)
b4dc7623 703 * @r_err: Value of error register on completion
1da177e4
LT
704 *
705 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
706 * an ATA/ATAPI-defined set of values is placed in the ATA
707 * shadow registers, indicating the results of device detection
708 * and diagnostics.
709 *
710 * Select the ATA device, and read the values from the ATA shadow
711 * registers. Then parse according to the Error register value,
712 * and the spec-defined values examined by ata_dev_classify().
713 *
714 * LOCKING:
715 * caller.
b4dc7623
TH
716 *
717 * RETURNS:
718 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4
LT
719 */
720
a619f981 721unsigned int
b4dc7623 722ata_dev_try_classify(struct ata_port *ap, unsigned int device, u8 *r_err)
1da177e4 723{
1da177e4
LT
724 struct ata_taskfile tf;
725 unsigned int class;
726 u8 err;
727
728 ap->ops->dev_select(ap, device);
729
730 memset(&tf, 0, sizeof(tf));
731
1da177e4 732 ap->ops->tf_read(ap, &tf);
0169e284 733 err = tf.feature;
b4dc7623
TH
734 if (r_err)
735 *r_err = err;
1da177e4 736
93590859
AC
737 /* see if device passed diags: if master then continue and warn later */
738 if (err == 0 && device == 0)
739 /* diagnostic fail : do nothing _YET_ */
740 ap->device[device].horkage |= ATA_HORKAGE_DIAGNOSTIC;
741 else if (err == 1)
1da177e4
LT
742 /* do nothing */ ;
743 else if ((device == 0) && (err == 0x81))
744 /* do nothing */ ;
745 else
b4dc7623 746 return ATA_DEV_NONE;
1da177e4 747
b4dc7623 748 /* determine if device is ATA or ATAPI */
1da177e4 749 class = ata_dev_classify(&tf);
b4dc7623 750
1da177e4 751 if (class == ATA_DEV_UNKNOWN)
b4dc7623 752 return ATA_DEV_NONE;
1da177e4 753 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
b4dc7623
TH
754 return ATA_DEV_NONE;
755 return class;
1da177e4
LT
756}
757
758/**
6a62a04d 759 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
760 * @id: IDENTIFY DEVICE results we will examine
761 * @s: string into which data is output
762 * @ofs: offset into identify device page
763 * @len: length of string to return. must be an even number.
764 *
765 * The strings in the IDENTIFY DEVICE page are broken up into
766 * 16-bit chunks. Run through the string, and output each
767 * 8-bit chunk linearly, regardless of platform.
768 *
769 * LOCKING:
770 * caller.
771 */
772
6a62a04d
TH
773void ata_id_string(const u16 *id, unsigned char *s,
774 unsigned int ofs, unsigned int len)
1da177e4
LT
775{
776 unsigned int c;
777
778 while (len > 0) {
779 c = id[ofs] >> 8;
780 *s = c;
781 s++;
782
783 c = id[ofs] & 0xff;
784 *s = c;
785 s++;
786
787 ofs++;
788 len -= 2;
789 }
790}
791
0e949ff3 792/**
6a62a04d 793 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
794 * @id: IDENTIFY DEVICE results we will examine
795 * @s: string into which data is output
796 * @ofs: offset into identify device page
797 * @len: length of string to return. must be an odd number.
798 *
6a62a04d 799 * This function is identical to ata_id_string except that it
0e949ff3
TH
800 * trims trailing spaces and terminates the resulting string with
801 * null. @len must be actual maximum length (even number) + 1.
802 *
803 * LOCKING:
804 * caller.
805 */
6a62a04d
TH
806void ata_id_c_string(const u16 *id, unsigned char *s,
807 unsigned int ofs, unsigned int len)
0e949ff3
TH
808{
809 unsigned char *p;
810
811 WARN_ON(!(len & 1));
812
6a62a04d 813 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
814
815 p = s + strnlen(s, len - 1);
816 while (p > s && p[-1] == ' ')
817 p--;
818 *p = '\0';
819}
0baab86b 820
1e999736
AC
821static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
822{
823 u64 sectors = 0;
824
825 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
826 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
827 sectors |= (tf->hob_lbal & 0xff) << 24;
828 sectors |= (tf->lbah & 0xff) << 16;
829 sectors |= (tf->lbam & 0xff) << 8;
830 sectors |= (tf->lbal & 0xff);
831
832 return ++sectors;
833}
834
835static u64 ata_tf_to_lba(struct ata_taskfile *tf)
836{
837 u64 sectors = 0;
838
839 sectors |= (tf->device & 0x0f) << 24;
840 sectors |= (tf->lbah & 0xff) << 16;
841 sectors |= (tf->lbam & 0xff) << 8;
842 sectors |= (tf->lbal & 0xff);
843
844 return ++sectors;
845}
846
847/**
848 * ata_read_native_max_address_ext - LBA48 native max query
849 * @dev: Device to query
850 *
851 * Perform an LBA48 size query upon the device in question. Return the
852 * actual LBA48 size or zero if the command fails.
853 */
854
855static u64 ata_read_native_max_address_ext(struct ata_device *dev)
856{
857 unsigned int err;
858 struct ata_taskfile tf;
859
860 ata_tf_init(dev, &tf);
861
862 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
863 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | ATA_TFLAG_ISADDR;
864 tf.protocol |= ATA_PROT_NODATA;
865 tf.device |= 0x40;
866
867 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
868 if (err)
869 return 0;
870
871 return ata_tf_to_lba48(&tf);
872}
873
874/**
875 * ata_read_native_max_address - LBA28 native max query
876 * @dev: Device to query
877 *
878 * Performa an LBA28 size query upon the device in question. Return the
879 * actual LBA28 size or zero if the command fails.
880 */
881
882static u64 ata_read_native_max_address(struct ata_device *dev)
883{
884 unsigned int err;
885 struct ata_taskfile tf;
886
887 ata_tf_init(dev, &tf);
888
889 tf.command = ATA_CMD_READ_NATIVE_MAX;
890 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
891 tf.protocol |= ATA_PROT_NODATA;
892 tf.device |= 0x40;
893
894 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
895 if (err)
896 return 0;
897
898 return ata_tf_to_lba(&tf);
899}
900
901/**
902 * ata_set_native_max_address_ext - LBA48 native max set
903 * @dev: Device to query
6b38d1d1 904 * @new_sectors: new max sectors value to set for the device
1e999736
AC
905 *
906 * Perform an LBA48 size set max upon the device in question. Return the
907 * actual LBA48 size or zero if the command fails.
908 */
909
910static u64 ata_set_native_max_address_ext(struct ata_device *dev, u64 new_sectors)
911{
912 unsigned int err;
913 struct ata_taskfile tf;
914
915 new_sectors--;
916
917 ata_tf_init(dev, &tf);
918
919 tf.command = ATA_CMD_SET_MAX_EXT;
920 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_LBA48 | ATA_TFLAG_ISADDR;
921 tf.protocol |= ATA_PROT_NODATA;
922 tf.device |= 0x40;
923
924 tf.lbal = (new_sectors >> 0) & 0xff;
925 tf.lbam = (new_sectors >> 8) & 0xff;
926 tf.lbah = (new_sectors >> 16) & 0xff;
927
928 tf.hob_lbal = (new_sectors >> 24) & 0xff;
929 tf.hob_lbam = (new_sectors >> 32) & 0xff;
930 tf.hob_lbah = (new_sectors >> 40) & 0xff;
931
932 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
933 if (err)
934 return 0;
935
936 return ata_tf_to_lba48(&tf);
937}
938
939/**
940 * ata_set_native_max_address - LBA28 native max set
941 * @dev: Device to query
6b38d1d1 942 * @new_sectors: new max sectors value to set for the device
1e999736
AC
943 *
944 * Perform an LBA28 size set max upon the device in question. Return the
945 * actual LBA28 size or zero if the command fails.
946 */
947
948static u64 ata_set_native_max_address(struct ata_device *dev, u64 new_sectors)
949{
950 unsigned int err;
951 struct ata_taskfile tf;
952
953 new_sectors--;
954
955 ata_tf_init(dev, &tf);
956
957 tf.command = ATA_CMD_SET_MAX;
958 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
959 tf.protocol |= ATA_PROT_NODATA;
960
961 tf.lbal = (new_sectors >> 0) & 0xff;
962 tf.lbam = (new_sectors >> 8) & 0xff;
963 tf.lbah = (new_sectors >> 16) & 0xff;
964 tf.device |= ((new_sectors >> 24) & 0x0f) | 0x40;
965
966 err = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
967 if (err)
968 return 0;
969
970 return ata_tf_to_lba(&tf);
971}
972
973/**
974 * ata_hpa_resize - Resize a device with an HPA set
975 * @dev: Device to resize
976 *
977 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
978 * it if required to the full size of the media. The caller must check
979 * the drive has the HPA feature set enabled.
980 */
981
982static u64 ata_hpa_resize(struct ata_device *dev)
983{
984 u64 sectors = dev->n_sectors;
985 u64 hpa_sectors;
986
987 if (ata_id_has_lba48(dev->id))
988 hpa_sectors = ata_read_native_max_address_ext(dev);
989 else
990 hpa_sectors = ata_read_native_max_address(dev);
991
992 /* if no hpa, both should be equal */
bd1d5ec6
AM
993 ata_dev_printk(dev, KERN_INFO, "%s 1: sectors = %lld, "
994 "hpa_sectors = %lld\n",
995 __FUNCTION__, (long long)sectors, (long long)hpa_sectors);
1e999736
AC
996
997 if (hpa_sectors > sectors) {
998 ata_dev_printk(dev, KERN_INFO,
999 "Host Protected Area detected:\n"
1000 "\tcurrent size: %lld sectors\n"
1001 "\tnative size: %lld sectors\n",
bd1d5ec6 1002 (long long)sectors, (long long)hpa_sectors);
1e999736
AC
1003
1004 if (ata_ignore_hpa) {
1005 if (ata_id_has_lba48(dev->id))
1006 hpa_sectors = ata_set_native_max_address_ext(dev, hpa_sectors);
1007 else
bd1d5ec6
AM
1008 hpa_sectors = ata_set_native_max_address(dev,
1009 hpa_sectors);
1e999736
AC
1010
1011 if (hpa_sectors) {
bd1d5ec6
AM
1012 ata_dev_printk(dev, KERN_INFO, "native size "
1013 "increased to %lld sectors\n",
1014 (long long)hpa_sectors);
1e999736
AC
1015 return hpa_sectors;
1016 }
1017 }
1018 }
1019 return sectors;
1020}
1021
2940740b
TH
1022static u64 ata_id_n_sectors(const u16 *id)
1023{
1024 if (ata_id_has_lba(id)) {
1025 if (ata_id_has_lba48(id))
1026 return ata_id_u64(id, 100);
1027 else
1028 return ata_id_u32(id, 60);
1029 } else {
1030 if (ata_id_current_chs_valid(id))
1031 return ata_id_u32(id, 57);
1032 else
1033 return id[1] * id[3] * id[6];
1034 }
1035}
1036
10305f0f
AC
1037/**
1038 * ata_id_to_dma_mode - Identify DMA mode from id block
1039 * @dev: device to identify
cc261267 1040 * @unknown: mode to assume if we cannot tell
10305f0f
AC
1041 *
1042 * Set up the timing values for the device based upon the identify
1043 * reported values for the DMA mode. This function is used by drivers
1044 * which rely upon firmware configured modes, but wish to report the
1045 * mode correctly when possible.
1046 *
1047 * In addition we emit similarly formatted messages to the default
1048 * ata_dev_set_mode handler, in order to provide consistency of
1049 * presentation.
1050 */
1051
1052void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
1053{
1054 unsigned int mask;
1055 u8 mode;
1056
1057 /* Pack the DMA modes */
1058 mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
1059 if (dev->id[53] & 0x04)
1060 mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
1061
1062 /* Select the mode in use */
1063 mode = ata_xfer_mask2mode(mask);
1064
1065 if (mode != 0) {
1066 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
1067 ata_mode_string(mask));
1068 } else {
1069 /* SWDMA perhaps ? */
1070 mode = unknown;
1071 ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
1072 }
1073
1074 /* Configure the device reporting */
1075 dev->xfer_mode = mode;
1076 dev->xfer_shift = ata_xfer_mode2shift(mode);
1077}
1078
0baab86b
EF
1079/**
1080 * ata_noop_dev_select - Select device 0/1 on ATA bus
1081 * @ap: ATA channel to manipulate
1082 * @device: ATA device (numbered from zero) to select
1083 *
1084 * This function performs no actual function.
1085 *
1086 * May be used as the dev_select() entry in ata_port_operations.
1087 *
1088 * LOCKING:
1089 * caller.
1090 */
1da177e4
LT
1091void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
1092{
1093}
1094
0baab86b 1095
1da177e4
LT
1096/**
1097 * ata_std_dev_select - Select device 0/1 on ATA bus
1098 * @ap: ATA channel to manipulate
1099 * @device: ATA device (numbered from zero) to select
1100 *
1101 * Use the method defined in the ATA specification to
1102 * make either device 0, or device 1, active on the
0baab86b
EF
1103 * ATA channel. Works with both PIO and MMIO.
1104 *
1105 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
1106 *
1107 * LOCKING:
1108 * caller.
1109 */
1110
1111void ata_std_dev_select (struct ata_port *ap, unsigned int device)
1112{
1113 u8 tmp;
1114
1115 if (device == 0)
1116 tmp = ATA_DEVICE_OBS;
1117 else
1118 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1119
0d5ff566 1120 iowrite8(tmp, ap->ioaddr.device_addr);
1da177e4
LT
1121 ata_pause(ap); /* needed; also flushes, for mmio */
1122}
1123
1124/**
1125 * ata_dev_select - Select device 0/1 on ATA bus
1126 * @ap: ATA channel to manipulate
1127 * @device: ATA device (numbered from zero) to select
1128 * @wait: non-zero to wait for Status register BSY bit to clear
1129 * @can_sleep: non-zero if context allows sleeping
1130 *
1131 * Use the method defined in the ATA specification to
1132 * make either device 0, or device 1, active on the
1133 * ATA channel.
1134 *
1135 * This is a high-level version of ata_std_dev_select(),
1136 * which additionally provides the services of inserting
1137 * the proper pauses and status polling, where needed.
1138 *
1139 * LOCKING:
1140 * caller.
1141 */
1142
1143void ata_dev_select(struct ata_port *ap, unsigned int device,
1144 unsigned int wait, unsigned int can_sleep)
1145{
88574551 1146 if (ata_msg_probe(ap))
44877b4e
TH
1147 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
1148 "device %u, wait %u\n", device, wait);
1da177e4
LT
1149
1150 if (wait)
1151 ata_wait_idle(ap);
1152
1153 ap->ops->dev_select(ap, device);
1154
1155 if (wait) {
1156 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
1157 msleep(150);
1158 ata_wait_idle(ap);
1159 }
1160}
1161
1162/**
1163 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 1164 * @id: IDENTIFY DEVICE page to dump
1da177e4 1165 *
0bd3300a
TH
1166 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1167 * page.
1da177e4
LT
1168 *
1169 * LOCKING:
1170 * caller.
1171 */
1172
0bd3300a 1173static inline void ata_dump_id(const u16 *id)
1da177e4
LT
1174{
1175 DPRINTK("49==0x%04x "
1176 "53==0x%04x "
1177 "63==0x%04x "
1178 "64==0x%04x "
1179 "75==0x%04x \n",
0bd3300a
TH
1180 id[49],
1181 id[53],
1182 id[63],
1183 id[64],
1184 id[75]);
1da177e4
LT
1185 DPRINTK("80==0x%04x "
1186 "81==0x%04x "
1187 "82==0x%04x "
1188 "83==0x%04x "
1189 "84==0x%04x \n",
0bd3300a
TH
1190 id[80],
1191 id[81],
1192 id[82],
1193 id[83],
1194 id[84]);
1da177e4
LT
1195 DPRINTK("88==0x%04x "
1196 "93==0x%04x\n",
0bd3300a
TH
1197 id[88],
1198 id[93]);
1da177e4
LT
1199}
1200
cb95d562
TH
1201/**
1202 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1203 * @id: IDENTIFY data to compute xfer mask from
1204 *
1205 * Compute the xfermask for this device. This is not as trivial
1206 * as it seems if we must consider early devices correctly.
1207 *
1208 * FIXME: pre IDE drive timing (do we care ?).
1209 *
1210 * LOCKING:
1211 * None.
1212 *
1213 * RETURNS:
1214 * Computed xfermask
1215 */
1216static unsigned int ata_id_xfermask(const u16 *id)
1217{
1218 unsigned int pio_mask, mwdma_mask, udma_mask;
1219
1220 /* Usual case. Word 53 indicates word 64 is valid */
1221 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1222 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1223 pio_mask <<= 3;
1224 pio_mask |= 0x7;
1225 } else {
1226 /* If word 64 isn't valid then Word 51 high byte holds
1227 * the PIO timing number for the maximum. Turn it into
1228 * a mask.
1229 */
7a0f1c8a 1230 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb
AC
1231 if (mode < 5) /* Valid PIO range */
1232 pio_mask = (2 << mode) - 1;
1233 else
1234 pio_mask = 1;
cb95d562
TH
1235
1236 /* But wait.. there's more. Design your standards by
1237 * committee and you too can get a free iordy field to
1238 * process. However its the speeds not the modes that
1239 * are supported... Note drivers using the timing API
1240 * will get this right anyway
1241 */
1242 }
1243
1244 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1245
b352e57d
AC
1246 if (ata_id_is_cfa(id)) {
1247 /*
1248 * Process compact flash extended modes
1249 */
1250 int pio = id[163] & 0x7;
1251 int dma = (id[163] >> 3) & 7;
1252
1253 if (pio)
1254 pio_mask |= (1 << 5);
1255 if (pio > 1)
1256 pio_mask |= (1 << 6);
1257 if (dma)
1258 mwdma_mask |= (1 << 3);
1259 if (dma > 1)
1260 mwdma_mask |= (1 << 4);
1261 }
1262
fb21f0d0
TH
1263 udma_mask = 0;
1264 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1265 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1266
1267 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1268}
1269
86e45b6b
TH
1270/**
1271 * ata_port_queue_task - Queue port_task
1272 * @ap: The ata_port to queue port_task for
e2a7f77a 1273 * @fn: workqueue function to be scheduled
65f27f38 1274 * @data: data for @fn to use
e2a7f77a 1275 * @delay: delay time for workqueue function
86e45b6b
TH
1276 *
1277 * Schedule @fn(@data) for execution after @delay jiffies using
1278 * port_task. There is one port_task per port and it's the
1279 * user(low level driver)'s responsibility to make sure that only
1280 * one task is active at any given time.
1281 *
1282 * libata core layer takes care of synchronization between
1283 * port_task and EH. ata_port_queue_task() may be ignored for EH
1284 * synchronization.
1285 *
1286 * LOCKING:
1287 * Inherited from caller.
1288 */
65f27f38 1289void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
86e45b6b
TH
1290 unsigned long delay)
1291{
1292 int rc;
1293
b51e9e5d 1294 if (ap->pflags & ATA_PFLAG_FLUSH_PORT_TASK)
86e45b6b
TH
1295 return;
1296
65f27f38
DH
1297 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1298 ap->port_task_data = data;
86e45b6b 1299
52bad64d 1300 rc = queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1301
1302 /* rc == 0 means that another user is using port task */
1303 WARN_ON(rc == 0);
1304}
1305
1306/**
1307 * ata_port_flush_task - Flush port_task
1308 * @ap: The ata_port to flush port_task for
1309 *
1310 * After this function completes, port_task is guranteed not to
1311 * be running or scheduled.
1312 *
1313 * LOCKING:
1314 * Kernel thread context (may sleep)
1315 */
1316void ata_port_flush_task(struct ata_port *ap)
1317{
1318 unsigned long flags;
1319
1320 DPRINTK("ENTER\n");
1321
ba6a1308 1322 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 1323 ap->pflags |= ATA_PFLAG_FLUSH_PORT_TASK;
ba6a1308 1324 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b
TH
1325
1326 DPRINTK("flush #1\n");
28e53bdd 1327 cancel_work_sync(&ap->port_task.work); /* akpm: seems unneeded */
86e45b6b
TH
1328
1329 /*
1330 * At this point, if a task is running, it's guaranteed to see
1331 * the FLUSH flag; thus, it will never queue pio tasks again.
1332 * Cancel and flush.
1333 */
1334 if (!cancel_delayed_work(&ap->port_task)) {
0dd4b21f 1335 if (ata_msg_ctl(ap))
88574551
TH
1336 ata_port_printk(ap, KERN_DEBUG, "%s: flush #2\n",
1337 __FUNCTION__);
28e53bdd 1338 cancel_work_sync(&ap->port_task.work);
86e45b6b
TH
1339 }
1340
ba6a1308 1341 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 1342 ap->pflags &= ~ATA_PFLAG_FLUSH_PORT_TASK;
ba6a1308 1343 spin_unlock_irqrestore(ap->lock, flags);
86e45b6b 1344
0dd4b21f
BP
1345 if (ata_msg_ctl(ap))
1346 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
86e45b6b
TH
1347}
1348
7102d230 1349static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1350{
77853bf2 1351 struct completion *waiting = qc->private_data;
a2a7a662 1352
a2a7a662 1353 complete(waiting);
a2a7a662
TH
1354}
1355
1356/**
2432697b 1357 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1358 * @dev: Device to which the command is sent
1359 * @tf: Taskfile registers for the command and the result
d69cf37d 1360 * @cdb: CDB for packet command
a2a7a662 1361 * @dma_dir: Data tranfer direction of the command
2432697b
TH
1362 * @sg: sg list for the data buffer of the command
1363 * @n_elem: Number of sg entries
a2a7a662
TH
1364 *
1365 * Executes libata internal command with timeout. @tf contains
1366 * command on entry and result on return. Timeout and error
1367 * conditions are reported via return value. No recovery action
1368 * is taken after a command times out. It's caller's duty to
1369 * clean up after timeout.
1370 *
1371 * LOCKING:
1372 * None. Should be called with kernel context, might sleep.
551e8889
TH
1373 *
1374 * RETURNS:
1375 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1376 */
2432697b
TH
1377unsigned ata_exec_internal_sg(struct ata_device *dev,
1378 struct ata_taskfile *tf, const u8 *cdb,
1379 int dma_dir, struct scatterlist *sg,
1380 unsigned int n_elem)
a2a7a662 1381{
3373efd8 1382 struct ata_port *ap = dev->ap;
a2a7a662
TH
1383 u8 command = tf->command;
1384 struct ata_queued_cmd *qc;
2ab7db1f 1385 unsigned int tag, preempted_tag;
dedaf2b0 1386 u32 preempted_sactive, preempted_qc_active;
60be6b9a 1387 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1388 unsigned long flags;
77853bf2 1389 unsigned int err_mask;
d95a717f 1390 int rc;
a2a7a662 1391
ba6a1308 1392 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1393
e3180499 1394 /* no internal command while frozen */
b51e9e5d 1395 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1396 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1397 return AC_ERR_SYSTEM;
1398 }
1399
2ab7db1f 1400 /* initialize internal qc */
a2a7a662 1401
2ab7db1f
TH
1402 /* XXX: Tag 0 is used for drivers with legacy EH as some
1403 * drivers choke if any other tag is given. This breaks
1404 * ata_tag_internal() test for those drivers. Don't use new
1405 * EH stuff without converting to it.
1406 */
1407 if (ap->ops->error_handler)
1408 tag = ATA_TAG_INTERNAL;
1409 else
1410 tag = 0;
1411
6cec4a39 1412 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1413 BUG();
f69499f4 1414 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1415
1416 qc->tag = tag;
1417 qc->scsicmd = NULL;
1418 qc->ap = ap;
1419 qc->dev = dev;
1420 ata_qc_reinit(qc);
1421
1422 preempted_tag = ap->active_tag;
dedaf2b0
TH
1423 preempted_sactive = ap->sactive;
1424 preempted_qc_active = ap->qc_active;
2ab7db1f 1425 ap->active_tag = ATA_TAG_POISON;
dedaf2b0
TH
1426 ap->sactive = 0;
1427 ap->qc_active = 0;
2ab7db1f
TH
1428
1429 /* prepare & issue qc */
a2a7a662 1430 qc->tf = *tf;
d69cf37d
TH
1431 if (cdb)
1432 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1433 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1434 qc->dma_dir = dma_dir;
1435 if (dma_dir != DMA_NONE) {
2432697b
TH
1436 unsigned int i, buflen = 0;
1437
1438 for (i = 0; i < n_elem; i++)
1439 buflen += sg[i].length;
1440
1441 ata_sg_init(qc, sg, n_elem);
49c80429 1442 qc->nbytes = buflen;
a2a7a662
TH
1443 }
1444
77853bf2 1445 qc->private_data = &wait;
a2a7a662
TH
1446 qc->complete_fn = ata_qc_complete_internal;
1447
8e0e694a 1448 ata_qc_issue(qc);
a2a7a662 1449
ba6a1308 1450 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1451
a8601e5f 1452 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
d95a717f
TH
1453
1454 ata_port_flush_task(ap);
41ade50c 1455
d95a717f 1456 if (!rc) {
ba6a1308 1457 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1458
1459 /* We're racing with irq here. If we lose, the
1460 * following test prevents us from completing the qc
d95a717f
TH
1461 * twice. If we win, the port is frozen and will be
1462 * cleaned up by ->post_internal_cmd().
a2a7a662 1463 */
77853bf2 1464 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1465 qc->err_mask |= AC_ERR_TIMEOUT;
1466
1467 if (ap->ops->error_handler)
1468 ata_port_freeze(ap);
1469 else
1470 ata_qc_complete(qc);
f15a1daf 1471
0dd4b21f
BP
1472 if (ata_msg_warn(ap))
1473 ata_dev_printk(dev, KERN_WARNING,
88574551 1474 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1475 }
1476
ba6a1308 1477 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1478 }
1479
d95a717f
TH
1480 /* do post_internal_cmd */
1481 if (ap->ops->post_internal_cmd)
1482 ap->ops->post_internal_cmd(qc);
1483
a51d644a
TH
1484 /* perform minimal error analysis */
1485 if (qc->flags & ATA_QCFLAG_FAILED) {
1486 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1487 qc->err_mask |= AC_ERR_DEV;
1488
1489 if (!qc->err_mask)
1490 qc->err_mask |= AC_ERR_OTHER;
1491
1492 if (qc->err_mask & ~AC_ERR_OTHER)
1493 qc->err_mask &= ~AC_ERR_OTHER;
d95a717f
TH
1494 }
1495
15869303 1496 /* finish up */
ba6a1308 1497 spin_lock_irqsave(ap->lock, flags);
15869303 1498
e61e0672 1499 *tf = qc->result_tf;
77853bf2
TH
1500 err_mask = qc->err_mask;
1501
1502 ata_qc_free(qc);
2ab7db1f 1503 ap->active_tag = preempted_tag;
dedaf2b0
TH
1504 ap->sactive = preempted_sactive;
1505 ap->qc_active = preempted_qc_active;
77853bf2 1506
1f7dd3e9
TH
1507 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1508 * Until those drivers are fixed, we detect the condition
1509 * here, fail the command with AC_ERR_SYSTEM and reenable the
1510 * port.
1511 *
1512 * Note that this doesn't change any behavior as internal
1513 * command failure results in disabling the device in the
1514 * higher layer for LLDDs without new reset/EH callbacks.
1515 *
1516 * Kill the following code as soon as those drivers are fixed.
1517 */
198e0fed 1518 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1519 err_mask |= AC_ERR_SYSTEM;
1520 ata_port_probe(ap);
1521 }
1522
ba6a1308 1523 spin_unlock_irqrestore(ap->lock, flags);
15869303 1524
77853bf2 1525 return err_mask;
a2a7a662
TH
1526}
1527
2432697b 1528/**
33480a0e 1529 * ata_exec_internal - execute libata internal command
2432697b
TH
1530 * @dev: Device to which the command is sent
1531 * @tf: Taskfile registers for the command and the result
1532 * @cdb: CDB for packet command
1533 * @dma_dir: Data tranfer direction of the command
1534 * @buf: Data buffer of the command
1535 * @buflen: Length of data buffer
1536 *
1537 * Wrapper around ata_exec_internal_sg() which takes simple
1538 * buffer instead of sg list.
1539 *
1540 * LOCKING:
1541 * None. Should be called with kernel context, might sleep.
1542 *
1543 * RETURNS:
1544 * Zero on success, AC_ERR_* mask on failure
1545 */
1546unsigned ata_exec_internal(struct ata_device *dev,
1547 struct ata_taskfile *tf, const u8 *cdb,
1548 int dma_dir, void *buf, unsigned int buflen)
1549{
33480a0e
TH
1550 struct scatterlist *psg = NULL, sg;
1551 unsigned int n_elem = 0;
2432697b 1552
33480a0e
TH
1553 if (dma_dir != DMA_NONE) {
1554 WARN_ON(!buf);
1555 sg_init_one(&sg, buf, buflen);
1556 psg = &sg;
1557 n_elem++;
1558 }
2432697b 1559
33480a0e 1560 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
2432697b
TH
1561}
1562
977e6b9f
TH
1563/**
1564 * ata_do_simple_cmd - execute simple internal command
1565 * @dev: Device to which the command is sent
1566 * @cmd: Opcode to execute
1567 *
1568 * Execute a 'simple' command, that only consists of the opcode
1569 * 'cmd' itself, without filling any other registers
1570 *
1571 * LOCKING:
1572 * Kernel thread context (may sleep).
1573 *
1574 * RETURNS:
1575 * Zero on success, AC_ERR_* mask on failure
e58eb583 1576 */
77b08fb5 1577unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1578{
1579 struct ata_taskfile tf;
e58eb583
TH
1580
1581 ata_tf_init(dev, &tf);
1582
1583 tf.command = cmd;
1584 tf.flags |= ATA_TFLAG_DEVICE;
1585 tf.protocol = ATA_PROT_NODATA;
1586
977e6b9f 1587 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
e58eb583
TH
1588}
1589
1bc4ccff
AC
1590/**
1591 * ata_pio_need_iordy - check if iordy needed
1592 * @adev: ATA device
1593 *
1594 * Check if the current speed of the device requires IORDY. Used
1595 * by various controllers for chip configuration.
1596 */
432729f0 1597
1bc4ccff
AC
1598unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1599{
432729f0
AC
1600 /* Controller doesn't support IORDY. Probably a pointless check
1601 as the caller should know this */
1602 if (adev->ap->flags & ATA_FLAG_NO_IORDY)
1bc4ccff 1603 return 0;
432729f0
AC
1604 /* PIO3 and higher it is mandatory */
1605 if (adev->pio_mode > XFER_PIO_2)
1606 return 1;
1607 /* We turn it on when possible */
1608 if (ata_id_has_iordy(adev->id))
1bc4ccff 1609 return 1;
432729f0
AC
1610 return 0;
1611}
2e9edbf8 1612
432729f0
AC
1613/**
1614 * ata_pio_mask_no_iordy - Return the non IORDY mask
1615 * @adev: ATA device
1616 *
1617 * Compute the highest mode possible if we are not using iordy. Return
1618 * -1 if no iordy mode is available.
1619 */
1620
1621static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1622{
1bc4ccff 1623 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1bc4ccff 1624 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
432729f0 1625 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1bc4ccff
AC
1626 /* Is the speed faster than the drive allows non IORDY ? */
1627 if (pio) {
1628 /* This is cycle times not frequency - watch the logic! */
1629 if (pio > 240) /* PIO2 is 240nS per cycle */
432729f0
AC
1630 return 3 << ATA_SHIFT_PIO;
1631 return 7 << ATA_SHIFT_PIO;
1bc4ccff
AC
1632 }
1633 }
432729f0 1634 return 3 << ATA_SHIFT_PIO;
1bc4ccff
AC
1635}
1636
1da177e4 1637/**
49016aca 1638 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1639 * @dev: target device
1640 * @p_class: pointer to class of the target device (may be changed)
bff04647 1641 * @flags: ATA_READID_* flags
fe635c7e 1642 * @id: buffer to read IDENTIFY data into
1da177e4 1643 *
49016aca
TH
1644 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1645 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1646 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1647 * for pre-ATA4 drives.
1da177e4
LT
1648 *
1649 * LOCKING:
49016aca
TH
1650 * Kernel thread context (may sleep)
1651 *
1652 * RETURNS:
1653 * 0 on success, -errno otherwise.
1da177e4 1654 */
a9beec95 1655int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1656 unsigned int flags, u16 *id)
1da177e4 1657{
3373efd8 1658 struct ata_port *ap = dev->ap;
49016aca 1659 unsigned int class = *p_class;
a0123703 1660 struct ata_taskfile tf;
49016aca
TH
1661 unsigned int err_mask = 0;
1662 const char *reason;
169439c2 1663 int tried_spinup = 0;
49016aca 1664 int rc;
1da177e4 1665
0dd4b21f 1666 if (ata_msg_ctl(ap))
44877b4e 1667 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1668
49016aca 1669 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
49016aca 1670 retry:
3373efd8 1671 ata_tf_init(dev, &tf);
a0123703 1672
49016aca
TH
1673 switch (class) {
1674 case ATA_DEV_ATA:
a0123703 1675 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1676 break;
1677 case ATA_DEV_ATAPI:
a0123703 1678 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1679 break;
1680 default:
1681 rc = -ENODEV;
1682 reason = "unsupported class";
1683 goto err_out;
1da177e4
LT
1684 }
1685
a0123703 1686 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1687
1688 /* Some devices choke if TF registers contain garbage. Make
1689 * sure those are properly initialized.
1690 */
1691 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1692
1693 /* Device presence detection is unreliable on some
1694 * controllers. Always poll IDENTIFY if available.
1695 */
1696 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 1697
3373efd8 1698 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
49016aca 1699 id, sizeof(id[0]) * ATA_ID_WORDS);
a0123703 1700 if (err_mask) {
800b3996 1701 if (err_mask & AC_ERR_NODEV_HINT) {
55a8e2c8 1702 DPRINTK("ata%u.%d: NODEV after polling detection\n",
44877b4e 1703 ap->print_id, dev->devno);
55a8e2c8
TH
1704 return -ENOENT;
1705 }
1706
49016aca
TH
1707 rc = -EIO;
1708 reason = "I/O error";
1da177e4
LT
1709 goto err_out;
1710 }
1711
49016aca 1712 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1713
49016aca 1714 /* sanity check */
a4f5749b
TH
1715 rc = -EINVAL;
1716 reason = "device reports illegal type";
1717
1718 if (class == ATA_DEV_ATA) {
1719 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1720 goto err_out;
1721 } else {
1722 if (ata_id_is_ata(id))
1723 goto err_out;
49016aca
TH
1724 }
1725
169439c2
ML
1726 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1727 tried_spinup = 1;
1728 /*
1729 * Drive powered-up in standby mode, and requires a specific
1730 * SET_FEATURES spin-up subcommand before it will accept
1731 * anything other than the original IDENTIFY command.
1732 */
1733 ata_tf_init(dev, &tf);
1734 tf.command = ATA_CMD_SET_FEATURES;
1735 tf.feature = SETFEATURES_SPINUP;
1736 tf.protocol = ATA_PROT_NODATA;
1737 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1738 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1739 if (err_mask) {
1740 rc = -EIO;
1741 reason = "SPINUP failed";
1742 goto err_out;
1743 }
1744 /*
1745 * If the drive initially returned incomplete IDENTIFY info,
1746 * we now must reissue the IDENTIFY command.
1747 */
1748 if (id[2] == 0x37c8)
1749 goto retry;
1750 }
1751
bff04647 1752 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
1753 /*
1754 * The exact sequence expected by certain pre-ATA4 drives is:
1755 * SRST RESET
1756 * IDENTIFY
1757 * INITIALIZE DEVICE PARAMETERS
1758 * anything else..
1759 * Some drives were very specific about that exact sequence.
1760 */
1761 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 1762 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
1763 if (err_mask) {
1764 rc = -EIO;
1765 reason = "INIT_DEV_PARAMS failed";
1766 goto err_out;
1767 }
1768
1769 /* current CHS translation info (id[53-58]) might be
1770 * changed. reread the identify device info.
1771 */
bff04647 1772 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
1773 goto retry;
1774 }
1775 }
1776
1777 *p_class = class;
fe635c7e 1778
49016aca
TH
1779 return 0;
1780
1781 err_out:
88574551 1782 if (ata_msg_warn(ap))
0dd4b21f 1783 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 1784 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
1785 return rc;
1786}
1787
3373efd8 1788static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 1789{
3373efd8 1790 return ((dev->ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
1791}
1792
a6e6ce8e
TH
1793static void ata_dev_config_ncq(struct ata_device *dev,
1794 char *desc, size_t desc_sz)
1795{
1796 struct ata_port *ap = dev->ap;
1797 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1798
1799 if (!ata_id_has_ncq(dev->id)) {
1800 desc[0] = '\0';
1801 return;
1802 }
6919a0a6
AC
1803 if (ata_device_blacklisted(dev) & ATA_HORKAGE_NONCQ) {
1804 snprintf(desc, desc_sz, "NCQ (not used)");
1805 return;
1806 }
a6e6ce8e 1807 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 1808 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
1809 dev->flags |= ATA_DFLAG_NCQ;
1810 }
1811
1812 if (hdepth >= ddepth)
1813 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1814 else
1815 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1816}
1817
49016aca 1818/**
ffeae418 1819 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
1820 * @dev: Target device to configure
1821 *
1822 * Configure @dev according to @dev->id. Generic and low-level
1823 * driver specific fixups are also applied.
49016aca
TH
1824 *
1825 * LOCKING:
ffeae418
TH
1826 * Kernel thread context (may sleep)
1827 *
1828 * RETURNS:
1829 * 0 on success, -errno otherwise
49016aca 1830 */
efdaedc4 1831int ata_dev_configure(struct ata_device *dev)
49016aca 1832{
3373efd8 1833 struct ata_port *ap = dev->ap;
efdaedc4 1834 int print_info = ap->eh_context.i.flags & ATA_EHI_PRINTINFO;
1148c3a7 1835 const u16 *id = dev->id;
ff8854b2 1836 unsigned int xfer_mask;
b352e57d 1837 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
1838 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
1839 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 1840 int rc;
49016aca 1841
0dd4b21f 1842 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
44877b4e
TH
1843 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
1844 __FUNCTION__);
ffeae418 1845 return 0;
49016aca
TH
1846 }
1847
0dd4b21f 1848 if (ata_msg_probe(ap))
44877b4e 1849 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1850
08573a86
KCA
1851 /* set _SDD */
1852 rc = ata_acpi_push_id(ap, dev->devno);
1853 if (rc) {
1854 ata_dev_printk(dev, KERN_WARNING, "failed to set _SDD(%d)\n",
1855 rc);
1856 }
1857
1858 /* retrieve and execute the ATA task file of _GTF */
1859 ata_acpi_exec_tfs(ap);
1860
c39f5ebe 1861 /* print device capabilities */
0dd4b21f 1862 if (ata_msg_probe(ap))
88574551
TH
1863 ata_dev_printk(dev, KERN_DEBUG,
1864 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1865 "85:%04x 86:%04x 87:%04x 88:%04x\n",
0dd4b21f 1866 __FUNCTION__,
f15a1daf
TH
1867 id[49], id[82], id[83], id[84],
1868 id[85], id[86], id[87], id[88]);
c39f5ebe 1869
208a9933 1870 /* initialize to-be-configured parameters */
ea1dd4e1 1871 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
1872 dev->max_sectors = 0;
1873 dev->cdb_len = 0;
1874 dev->n_sectors = 0;
1875 dev->cylinders = 0;
1876 dev->heads = 0;
1877 dev->sectors = 0;
1878
1da177e4
LT
1879 /*
1880 * common ATA, ATAPI feature tests
1881 */
1882
ff8854b2 1883 /* find max transfer mode; for printk only */
1148c3a7 1884 xfer_mask = ata_id_xfermask(id);
1da177e4 1885
0dd4b21f
BP
1886 if (ata_msg_probe(ap))
1887 ata_dump_id(id);
1da177e4
LT
1888
1889 /* ATA-specific feature tests */
1890 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
1891 if (ata_id_is_cfa(id)) {
1892 if (id[162] & 1) /* CPRM may make this media unusable */
44877b4e
TH
1893 ata_dev_printk(dev, KERN_WARNING,
1894 "supports DRM functions and may "
1895 "not be fully accessable.\n");
b352e57d
AC
1896 snprintf(revbuf, 7, "CFA");
1897 }
1898 else
1899 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1900
1148c3a7 1901 dev->n_sectors = ata_id_n_sectors(id);
1e999736 1902 dev->n_sectors_boot = dev->n_sectors;
2940740b 1903
3f64f565 1904 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
591a6e8e 1905 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
3f64f565
EM
1906 sizeof(fwrevbuf));
1907
591a6e8e 1908 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
3f64f565
EM
1909 sizeof(modelbuf));
1910
1911 if (dev->id[59] & 0x100)
1912 dev->multi_count = dev->id[59] & 0xff;
1913
1148c3a7 1914 if (ata_id_has_lba(id)) {
4c2d721a 1915 const char *lba_desc;
a6e6ce8e 1916 char ncq_desc[20];
8bf62ece 1917
4c2d721a
TH
1918 lba_desc = "LBA";
1919 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 1920 if (ata_id_has_lba48(id)) {
8bf62ece 1921 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 1922 lba_desc = "LBA48";
6fc49adb
TH
1923
1924 if (dev->n_sectors >= (1UL << 28) &&
1925 ata_id_has_flush_ext(id))
1926 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 1927 }
8bf62ece 1928
1e999736
AC
1929 if (ata_id_hpa_enabled(dev->id))
1930 dev->n_sectors = ata_hpa_resize(dev);
1931
a6e6ce8e
TH
1932 /* config NCQ */
1933 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1934
8bf62ece 1935 /* print device info to dmesg */
3f64f565
EM
1936 if (ata_msg_drv(ap) && print_info) {
1937 ata_dev_printk(dev, KERN_INFO,
1938 "%s: %s, %s, max %s\n",
1939 revbuf, modelbuf, fwrevbuf,
1940 ata_mode_string(xfer_mask));
1941 ata_dev_printk(dev, KERN_INFO,
1942 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 1943 (unsigned long long)dev->n_sectors,
3f64f565
EM
1944 dev->multi_count, lba_desc, ncq_desc);
1945 }
ffeae418 1946 } else {
8bf62ece
AL
1947 /* CHS */
1948
1949 /* Default translation */
1148c3a7
TH
1950 dev->cylinders = id[1];
1951 dev->heads = id[3];
1952 dev->sectors = id[6];
8bf62ece 1953
1148c3a7 1954 if (ata_id_current_chs_valid(id)) {
8bf62ece 1955 /* Current CHS translation is valid. */
1148c3a7
TH
1956 dev->cylinders = id[54];
1957 dev->heads = id[55];
1958 dev->sectors = id[56];
8bf62ece
AL
1959 }
1960
1961 /* print device info to dmesg */
3f64f565 1962 if (ata_msg_drv(ap) && print_info) {
88574551 1963 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
1964 "%s: %s, %s, max %s\n",
1965 revbuf, modelbuf, fwrevbuf,
1966 ata_mode_string(xfer_mask));
a84471fe 1967 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
1968 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
1969 (unsigned long long)dev->n_sectors,
1970 dev->multi_count, dev->cylinders,
1971 dev->heads, dev->sectors);
1972 }
07f6f7d0
AL
1973 }
1974
6e7846e9 1975 dev->cdb_len = 16;
1da177e4
LT
1976 }
1977
1978 /* ATAPI-specific feature tests */
2c13b7ce 1979 else if (dev->class == ATA_DEV_ATAPI) {
08a556db
AL
1980 char *cdb_intr_string = "";
1981
1148c3a7 1982 rc = atapi_cdb_len(id);
1da177e4 1983 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 1984 if (ata_msg_warn(ap))
88574551
TH
1985 ata_dev_printk(dev, KERN_WARNING,
1986 "unsupported CDB len\n");
ffeae418 1987 rc = -EINVAL;
1da177e4
LT
1988 goto err_out_nosup;
1989 }
6e7846e9 1990 dev->cdb_len = (unsigned int) rc;
1da177e4 1991
08a556db 1992 if (ata_id_cdb_intr(dev->id)) {
312f7da2 1993 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
1994 cdb_intr_string = ", CDB intr";
1995 }
312f7da2 1996
1da177e4 1997 /* print device info to dmesg */
5afc8142 1998 if (ata_msg_drv(ap) && print_info)
12436c30
TH
1999 ata_dev_printk(dev, KERN_INFO, "ATAPI, max %s%s\n",
2000 ata_mode_string(xfer_mask),
2001 cdb_intr_string);
1da177e4
LT
2002 }
2003
914ed354
TH
2004 /* determine max_sectors */
2005 dev->max_sectors = ATA_MAX_SECTORS;
2006 if (dev->flags & ATA_DFLAG_LBA48)
2007 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2008
93590859
AC
2009 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2010 /* Let the user know. We don't want to disallow opens for
2011 rescue purposes, or in case the vendor is just a blithering
2012 idiot */
2013 if (print_info) {
2014 ata_dev_printk(dev, KERN_WARNING,
2015"Drive reports diagnostics failure. This may indicate a drive\n");
2016 ata_dev_printk(dev, KERN_WARNING,
2017"fault or invalid emulation. Contact drive vendor for information.\n");
2018 }
2019 }
2020
4b2f3ede 2021 /* limit bridge transfers to udma5, 200 sectors */
3373efd8 2022 if (ata_dev_knobble(dev)) {
5afc8142 2023 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
2024 ata_dev_printk(dev, KERN_INFO,
2025 "applying bridge limits\n");
5a529139 2026 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
2027 dev->max_sectors = ATA_MAX_SECTORS;
2028 }
2029
18d6e9d5 2030 if (ata_device_blacklisted(dev) & ATA_HORKAGE_MAX_SEC_128)
03ec52de
TH
2031 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2032 dev->max_sectors);
18d6e9d5 2033
6f23a31d
AL
2034 /* limit ATAPI DMA to R/W commands only */
2035 if (ata_device_blacklisted(dev) & ATA_HORKAGE_DMA_RW_ONLY)
2036 dev->horkage |= ATA_HORKAGE_DMA_RW_ONLY;
2037
4b2f3ede 2038 if (ap->ops->dev_config)
cd0d3bbc 2039 ap->ops->dev_config(dev);
4b2f3ede 2040
0dd4b21f
BP
2041 if (ata_msg_probe(ap))
2042 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2043 __FUNCTION__, ata_chk_status(ap));
ffeae418 2044 return 0;
1da177e4
LT
2045
2046err_out_nosup:
0dd4b21f 2047 if (ata_msg_probe(ap))
88574551
TH
2048 ata_dev_printk(dev, KERN_DEBUG,
2049 "%s: EXIT, err\n", __FUNCTION__);
ffeae418 2050 return rc;
1da177e4
LT
2051}
2052
be0d18df 2053/**
2e41e8e6 2054 * ata_cable_40wire - return 40 wire cable type
be0d18df
AC
2055 * @ap: port
2056 *
2e41e8e6 2057 * Helper method for drivers which want to hardwire 40 wire cable
be0d18df
AC
2058 * detection.
2059 */
2060
2061int ata_cable_40wire(struct ata_port *ap)
2062{
2063 return ATA_CBL_PATA40;
2064}
2065
2066/**
2e41e8e6 2067 * ata_cable_80wire - return 80 wire cable type
be0d18df
AC
2068 * @ap: port
2069 *
2e41e8e6 2070 * Helper method for drivers which want to hardwire 80 wire cable
be0d18df
AC
2071 * detection.
2072 */
2073
2074int ata_cable_80wire(struct ata_port *ap)
2075{
2076 return ATA_CBL_PATA80;
2077}
2078
2079/**
2080 * ata_cable_unknown - return unknown PATA cable.
2081 * @ap: port
2082 *
2083 * Helper method for drivers which have no PATA cable detection.
2084 */
2085
2086int ata_cable_unknown(struct ata_port *ap)
2087{
2088 return ATA_CBL_PATA_UNK;
2089}
2090
2091/**
2092 * ata_cable_sata - return SATA cable type
2093 * @ap: port
2094 *
2095 * Helper method for drivers which have SATA cables
2096 */
2097
2098int ata_cable_sata(struct ata_port *ap)
2099{
2100 return ATA_CBL_SATA;
2101}
2102
1da177e4
LT
2103/**
2104 * ata_bus_probe - Reset and probe ATA bus
2105 * @ap: Bus to probe
2106 *
0cba632b
JG
2107 * Master ATA bus probing function. Initiates a hardware-dependent
2108 * bus reset, then attempts to identify any devices found on
2109 * the bus.
2110 *
1da177e4 2111 * LOCKING:
0cba632b 2112 * PCI/etc. bus probe sem.
1da177e4
LT
2113 *
2114 * RETURNS:
96072e69 2115 * Zero on success, negative errno otherwise.
1da177e4
LT
2116 */
2117
80289167 2118int ata_bus_probe(struct ata_port *ap)
1da177e4 2119{
28ca5c57 2120 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 2121 int tries[ATA_MAX_DEVICES];
4ae72a1e 2122 int i, rc;
e82cbdb9 2123 struct ata_device *dev;
1da177e4 2124
28ca5c57 2125 ata_port_probe(ap);
c19ba8af 2126
14d2bac1
TH
2127 for (i = 0; i < ATA_MAX_DEVICES; i++)
2128 tries[i] = ATA_PROBE_MAX_TRIES;
2129
2130 retry:
2044470c 2131 /* reset and determine device classes */
52783c5d 2132 ap->ops->phy_reset(ap);
2061a47a 2133
52783c5d
TH
2134 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2135 dev = &ap->device[i];
c19ba8af 2136
52783c5d
TH
2137 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2138 dev->class != ATA_DEV_UNKNOWN)
2139 classes[dev->devno] = dev->class;
2140 else
2141 classes[dev->devno] = ATA_DEV_NONE;
2044470c 2142
52783c5d 2143 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 2144 }
1da177e4 2145
52783c5d 2146 ata_port_probe(ap);
2044470c 2147
b6079ca4
AC
2148 /* after the reset the device state is PIO 0 and the controller
2149 state is undefined. Record the mode */
2150
2151 for (i = 0; i < ATA_MAX_DEVICES; i++)
2152 ap->device[i].pio_mode = XFER_PIO_0;
2153
f31f0cc2
JG
2154 /* read IDENTIFY page and configure devices. We have to do the identify
2155 specific sequence bass-ackwards so that PDIAG- is released by
2156 the slave device */
2157
2158 for (i = ATA_MAX_DEVICES - 1; i >= 0; i--) {
e82cbdb9 2159 dev = &ap->device[i];
28ca5c57 2160
ec573755
TH
2161 if (tries[i])
2162 dev->class = classes[i];
ffeae418 2163
14d2bac1 2164 if (!ata_dev_enabled(dev))
ffeae418 2165 continue;
ffeae418 2166
bff04647
TH
2167 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2168 dev->id);
14d2bac1
TH
2169 if (rc)
2170 goto fail;
f31f0cc2
JG
2171 }
2172
be0d18df
AC
2173 /* Now ask for the cable type as PDIAG- should have been released */
2174 if (ap->ops->cable_detect)
2175 ap->cbl = ap->ops->cable_detect(ap);
2176
f31f0cc2
JG
2177 /* After the identify sequence we can now set up the devices. We do
2178 this in the normal order so that the user doesn't get confused */
2179
2180 for(i = 0; i < ATA_MAX_DEVICES; i++) {
2181 dev = &ap->device[i];
2182 if (!ata_dev_enabled(dev))
2183 continue;
14d2bac1 2184
efdaedc4
TH
2185 ap->eh_context.i.flags |= ATA_EHI_PRINTINFO;
2186 rc = ata_dev_configure(dev);
2187 ap->eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
2188 if (rc)
2189 goto fail;
1da177e4
LT
2190 }
2191
e82cbdb9 2192 /* configure transfer mode */
3adcebb2 2193 rc = ata_set_mode(ap, &dev);
4ae72a1e 2194 if (rc)
51713d35 2195 goto fail;
1da177e4 2196
e82cbdb9
TH
2197 for (i = 0; i < ATA_MAX_DEVICES; i++)
2198 if (ata_dev_enabled(&ap->device[i]))
2199 return 0;
1da177e4 2200
e82cbdb9
TH
2201 /* no device present, disable port */
2202 ata_port_disable(ap);
1da177e4 2203 ap->ops->port_disable(ap);
96072e69 2204 return -ENODEV;
14d2bac1
TH
2205
2206 fail:
4ae72a1e
TH
2207 tries[dev->devno]--;
2208
14d2bac1
TH
2209 switch (rc) {
2210 case -EINVAL:
4ae72a1e 2211 /* eeek, something went very wrong, give up */
14d2bac1
TH
2212 tries[dev->devno] = 0;
2213 break;
4ae72a1e
TH
2214
2215 case -ENODEV:
2216 /* give it just one more chance */
2217 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 2218 case -EIO:
4ae72a1e
TH
2219 if (tries[dev->devno] == 1) {
2220 /* This is the last chance, better to slow
2221 * down than lose it.
2222 */
2223 sata_down_spd_limit(ap);
2224 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2225 }
14d2bac1
TH
2226 }
2227
4ae72a1e 2228 if (!tries[dev->devno])
3373efd8 2229 ata_dev_disable(dev);
ec573755 2230
14d2bac1 2231 goto retry;
1da177e4
LT
2232}
2233
2234/**
0cba632b
JG
2235 * ata_port_probe - Mark port as enabled
2236 * @ap: Port for which we indicate enablement
1da177e4 2237 *
0cba632b
JG
2238 * Modify @ap data structure such that the system
2239 * thinks that the entire port is enabled.
2240 *
cca3974e 2241 * LOCKING: host lock, or some other form of
0cba632b 2242 * serialization.
1da177e4
LT
2243 */
2244
2245void ata_port_probe(struct ata_port *ap)
2246{
198e0fed 2247 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
2248}
2249
3be680b7
TH
2250/**
2251 * sata_print_link_status - Print SATA link status
2252 * @ap: SATA port to printk link status about
2253 *
2254 * This function prints link speed and status of a SATA link.
2255 *
2256 * LOCKING:
2257 * None.
2258 */
43727fbc 2259void sata_print_link_status(struct ata_port *ap)
3be680b7 2260{
6d5f9732 2261 u32 sstatus, scontrol, tmp;
3be680b7 2262
81952c54 2263 if (sata_scr_read(ap, SCR_STATUS, &sstatus))
3be680b7 2264 return;
81952c54 2265 sata_scr_read(ap, SCR_CONTROL, &scontrol);
3be680b7 2266
81952c54 2267 if (ata_port_online(ap)) {
3be680b7 2268 tmp = (sstatus >> 4) & 0xf;
f15a1daf
TH
2269 ata_port_printk(ap, KERN_INFO,
2270 "SATA link up %s (SStatus %X SControl %X)\n",
2271 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 2272 } else {
f15a1daf
TH
2273 ata_port_printk(ap, KERN_INFO,
2274 "SATA link down (SStatus %X SControl %X)\n",
2275 sstatus, scontrol);
3be680b7
TH
2276 }
2277}
2278
1da177e4 2279/**
780a87f7
JG
2280 * __sata_phy_reset - Wake/reset a low-level SATA PHY
2281 * @ap: SATA port associated with target SATA PHY.
1da177e4 2282 *
780a87f7
JG
2283 * This function issues commands to standard SATA Sxxx
2284 * PHY registers, to wake up the phy (and device), and
2285 * clear any reset condition.
1da177e4
LT
2286 *
2287 * LOCKING:
0cba632b 2288 * PCI/etc. bus probe sem.
1da177e4
LT
2289 *
2290 */
2291void __sata_phy_reset(struct ata_port *ap)
2292{
2293 u32 sstatus;
2294 unsigned long timeout = jiffies + (HZ * 5);
2295
2296 if (ap->flags & ATA_FLAG_SATA_RESET) {
cdcca89e 2297 /* issue phy wake/reset */
81952c54 2298 sata_scr_write_flush(ap, SCR_CONTROL, 0x301);
62ba2841
TH
2299 /* Couldn't find anything in SATA I/II specs, but
2300 * AHCI-1.1 10.4.2 says at least 1 ms. */
2301 mdelay(1);
1da177e4 2302 }
81952c54
TH
2303 /* phy wake/clear reset */
2304 sata_scr_write_flush(ap, SCR_CONTROL, 0x300);
1da177e4
LT
2305
2306 /* wait for phy to become ready, if necessary */
2307 do {
2308 msleep(200);
81952c54 2309 sata_scr_read(ap, SCR_STATUS, &sstatus);
1da177e4
LT
2310 if ((sstatus & 0xf) != 1)
2311 break;
2312 } while (time_before(jiffies, timeout));
2313
3be680b7
TH
2314 /* print link status */
2315 sata_print_link_status(ap);
656563e3 2316
3be680b7 2317 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 2318 if (!ata_port_offline(ap))
1da177e4 2319 ata_port_probe(ap);
3be680b7 2320 else
1da177e4 2321 ata_port_disable(ap);
1da177e4 2322
198e0fed 2323 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2324 return;
2325
2326 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2327 ata_port_disable(ap);
2328 return;
2329 }
2330
2331 ap->cbl = ATA_CBL_SATA;
2332}
2333
2334/**
780a87f7
JG
2335 * sata_phy_reset - Reset SATA bus.
2336 * @ap: SATA port associated with target SATA PHY.
1da177e4 2337 *
780a87f7
JG
2338 * This function resets the SATA bus, and then probes
2339 * the bus for devices.
1da177e4
LT
2340 *
2341 * LOCKING:
0cba632b 2342 * PCI/etc. bus probe sem.
1da177e4
LT
2343 *
2344 */
2345void sata_phy_reset(struct ata_port *ap)
2346{
2347 __sata_phy_reset(ap);
198e0fed 2348 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2349 return;
2350 ata_bus_reset(ap);
2351}
2352
ebdfca6e
AC
2353/**
2354 * ata_dev_pair - return other device on cable
ebdfca6e
AC
2355 * @adev: device
2356 *
2357 * Obtain the other device on the same cable, or if none is
2358 * present NULL is returned
2359 */
2e9edbf8 2360
3373efd8 2361struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2362{
3373efd8 2363 struct ata_port *ap = adev->ap;
ebdfca6e 2364 struct ata_device *pair = &ap->device[1 - adev->devno];
e1211e3f 2365 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2366 return NULL;
2367 return pair;
2368}
2369
1da177e4 2370/**
780a87f7
JG
2371 * ata_port_disable - Disable port.
2372 * @ap: Port to be disabled.
1da177e4 2373 *
780a87f7
JG
2374 * Modify @ap data structure such that the system
2375 * thinks that the entire port is disabled, and should
2376 * never attempt to probe or communicate with devices
2377 * on this port.
2378 *
cca3974e 2379 * LOCKING: host lock, or some other form of
780a87f7 2380 * serialization.
1da177e4
LT
2381 */
2382
2383void ata_port_disable(struct ata_port *ap)
2384{
2385 ap->device[0].class = ATA_DEV_NONE;
2386 ap->device[1].class = ATA_DEV_NONE;
198e0fed 2387 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2388}
2389
1c3fae4d 2390/**
3c567b7d 2391 * sata_down_spd_limit - adjust SATA spd limit downward
1c3fae4d
TH
2392 * @ap: Port to adjust SATA spd limit for
2393 *
2394 * Adjust SATA spd limit of @ap downward. Note that this
2395 * function only adjusts the limit. The change must be applied
3c567b7d 2396 * using sata_set_spd().
1c3fae4d
TH
2397 *
2398 * LOCKING:
2399 * Inherited from caller.
2400 *
2401 * RETURNS:
2402 * 0 on success, negative errno on failure
2403 */
3c567b7d 2404int sata_down_spd_limit(struct ata_port *ap)
1c3fae4d 2405{
81952c54
TH
2406 u32 sstatus, spd, mask;
2407 int rc, highbit;
1c3fae4d 2408
81952c54
TH
2409 rc = sata_scr_read(ap, SCR_STATUS, &sstatus);
2410 if (rc)
2411 return rc;
1c3fae4d
TH
2412
2413 mask = ap->sata_spd_limit;
2414 if (mask <= 1)
2415 return -EINVAL;
2416 highbit = fls(mask) - 1;
2417 mask &= ~(1 << highbit);
2418
81952c54 2419 spd = (sstatus >> 4) & 0xf;
1c3fae4d
TH
2420 if (spd <= 1)
2421 return -EINVAL;
2422 spd--;
2423 mask &= (1 << spd) - 1;
2424 if (!mask)
2425 return -EINVAL;
2426
2427 ap->sata_spd_limit = mask;
2428
f15a1daf
TH
2429 ata_port_printk(ap, KERN_WARNING, "limiting SATA link speed to %s\n",
2430 sata_spd_string(fls(mask)));
1c3fae4d
TH
2431
2432 return 0;
2433}
2434
3c567b7d 2435static int __sata_set_spd_needed(struct ata_port *ap, u32 *scontrol)
1c3fae4d
TH
2436{
2437 u32 spd, limit;
2438
2439 if (ap->sata_spd_limit == UINT_MAX)
2440 limit = 0;
2441 else
2442 limit = fls(ap->sata_spd_limit);
2443
2444 spd = (*scontrol >> 4) & 0xf;
2445 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2446
2447 return spd != limit;
2448}
2449
2450/**
3c567b7d 2451 * sata_set_spd_needed - is SATA spd configuration needed
1c3fae4d
TH
2452 * @ap: Port in question
2453 *
2454 * Test whether the spd limit in SControl matches
2455 * @ap->sata_spd_limit. This function is used to determine
2456 * whether hardreset is necessary to apply SATA spd
2457 * configuration.
2458 *
2459 * LOCKING:
2460 * Inherited from caller.
2461 *
2462 * RETURNS:
2463 * 1 if SATA spd configuration is needed, 0 otherwise.
2464 */
3c567b7d 2465int sata_set_spd_needed(struct ata_port *ap)
1c3fae4d
TH
2466{
2467 u32 scontrol;
2468
81952c54 2469 if (sata_scr_read(ap, SCR_CONTROL, &scontrol))
1c3fae4d
TH
2470 return 0;
2471
3c567b7d 2472 return __sata_set_spd_needed(ap, &scontrol);
1c3fae4d
TH
2473}
2474
2475/**
3c567b7d 2476 * sata_set_spd - set SATA spd according to spd limit
1c3fae4d
TH
2477 * @ap: Port to set SATA spd for
2478 *
2479 * Set SATA spd of @ap according to sata_spd_limit.
2480 *
2481 * LOCKING:
2482 * Inherited from caller.
2483 *
2484 * RETURNS:
2485 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2486 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2487 */
3c567b7d 2488int sata_set_spd(struct ata_port *ap)
1c3fae4d
TH
2489{
2490 u32 scontrol;
81952c54 2491 int rc;
1c3fae4d 2492
81952c54
TH
2493 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
2494 return rc;
1c3fae4d 2495
3c567b7d 2496 if (!__sata_set_spd_needed(ap, &scontrol))
1c3fae4d
TH
2497 return 0;
2498
81952c54
TH
2499 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
2500 return rc;
2501
1c3fae4d
TH
2502 return 1;
2503}
2504
452503f9
AC
2505/*
2506 * This mode timing computation functionality is ported over from
2507 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2508 */
2509/*
b352e57d 2510 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2511 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2512 * for UDMA6, which is currently supported only by Maxtor drives.
2513 *
2514 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2515 */
2516
2517static const struct ata_timing ata_timing[] = {
2518
2519 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2520 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2521 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2522 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2523
b352e57d
AC
2524 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2525 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
452503f9
AC
2526 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2527 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2528 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2529
2530/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2e9edbf8 2531
452503f9
AC
2532 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2533 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2534 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2e9edbf8 2535
452503f9
AC
2536 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2537 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2538 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2539
b352e57d
AC
2540 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2541 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
452503f9
AC
2542 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2543 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2544
2545 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2546 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2547 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2548
2549/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2550
2551 { 0xFF }
2552};
2553
2554#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2555#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2556
2557static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2558{
2559 q->setup = EZ(t->setup * 1000, T);
2560 q->act8b = EZ(t->act8b * 1000, T);
2561 q->rec8b = EZ(t->rec8b * 1000, T);
2562 q->cyc8b = EZ(t->cyc8b * 1000, T);
2563 q->active = EZ(t->active * 1000, T);
2564 q->recover = EZ(t->recover * 1000, T);
2565 q->cycle = EZ(t->cycle * 1000, T);
2566 q->udma = EZ(t->udma * 1000, UT);
2567}
2568
2569void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2570 struct ata_timing *m, unsigned int what)
2571{
2572 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2573 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2574 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2575 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2576 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2577 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2578 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2579 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2580}
2581
2582static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2583{
2584 const struct ata_timing *t;
2585
2586 for (t = ata_timing; t->mode != speed; t++)
91190758 2587 if (t->mode == 0xFF)
452503f9 2588 return NULL;
2e9edbf8 2589 return t;
452503f9
AC
2590}
2591
2592int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2593 struct ata_timing *t, int T, int UT)
2594{
2595 const struct ata_timing *s;
2596 struct ata_timing p;
2597
2598 /*
2e9edbf8 2599 * Find the mode.
75b1f2f8 2600 */
452503f9
AC
2601
2602 if (!(s = ata_timing_find_mode(speed)))
2603 return -EINVAL;
2604
75b1f2f8
AL
2605 memcpy(t, s, sizeof(*s));
2606
452503f9
AC
2607 /*
2608 * If the drive is an EIDE drive, it can tell us it needs extended
2609 * PIO/MW_DMA cycle timing.
2610 */
2611
2612 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2613 memset(&p, 0, sizeof(p));
2614 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2615 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2616 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2617 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2618 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2619 }
2620 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2621 }
2622
2623 /*
2624 * Convert the timing to bus clock counts.
2625 */
2626
75b1f2f8 2627 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2628
2629 /*
c893a3ae
RD
2630 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2631 * S.M.A.R.T * and some other commands. We have to ensure that the
2632 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2633 */
2634
fd3367af 2635 if (speed > XFER_PIO_6) {
452503f9
AC
2636 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2637 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2638 }
2639
2640 /*
c893a3ae 2641 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2642 */
2643
2644 if (t->act8b + t->rec8b < t->cyc8b) {
2645 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2646 t->rec8b = t->cyc8b - t->act8b;
2647 }
2648
2649 if (t->active + t->recover < t->cycle) {
2650 t->active += (t->cycle - (t->active + t->recover)) / 2;
2651 t->recover = t->cycle - t->active;
2652 }
4f701d1e
AC
2653
2654 /* In a few cases quantisation may produce enough errors to
2655 leave t->cycle too low for the sum of active and recovery
2656 if so we must correct this */
2657 if (t->active + t->recover > t->cycle)
2658 t->cycle = t->active + t->recover;
452503f9
AC
2659
2660 return 0;
2661}
2662
cf176e1a
TH
2663/**
2664 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 2665 * @dev: Device to adjust xfer masks
458337db 2666 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
2667 *
2668 * Adjust xfer masks of @dev downward. Note that this function
2669 * does not apply the change. Invoking ata_set_mode() afterwards
2670 * will apply the limit.
2671 *
2672 * LOCKING:
2673 * Inherited from caller.
2674 *
2675 * RETURNS:
2676 * 0 on success, negative errno on failure
2677 */
458337db 2678int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 2679{
458337db
TH
2680 char buf[32];
2681 unsigned int orig_mask, xfer_mask;
2682 unsigned int pio_mask, mwdma_mask, udma_mask;
2683 int quiet, highbit;
cf176e1a 2684
458337db
TH
2685 quiet = !!(sel & ATA_DNXFER_QUIET);
2686 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 2687
458337db
TH
2688 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2689 dev->mwdma_mask,
2690 dev->udma_mask);
2691 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 2692
458337db
TH
2693 switch (sel) {
2694 case ATA_DNXFER_PIO:
2695 highbit = fls(pio_mask) - 1;
2696 pio_mask &= ~(1 << highbit);
2697 break;
2698
2699 case ATA_DNXFER_DMA:
2700 if (udma_mask) {
2701 highbit = fls(udma_mask) - 1;
2702 udma_mask &= ~(1 << highbit);
2703 if (!udma_mask)
2704 return -ENOENT;
2705 } else if (mwdma_mask) {
2706 highbit = fls(mwdma_mask) - 1;
2707 mwdma_mask &= ~(1 << highbit);
2708 if (!mwdma_mask)
2709 return -ENOENT;
2710 }
2711 break;
2712
2713 case ATA_DNXFER_40C:
2714 udma_mask &= ATA_UDMA_MASK_40C;
2715 break;
2716
2717 case ATA_DNXFER_FORCE_PIO0:
2718 pio_mask &= 1;
2719 case ATA_DNXFER_FORCE_PIO:
2720 mwdma_mask = 0;
2721 udma_mask = 0;
2722 break;
2723
458337db
TH
2724 default:
2725 BUG();
2726 }
2727
2728 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2729
2730 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2731 return -ENOENT;
2732
2733 if (!quiet) {
2734 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2735 snprintf(buf, sizeof(buf), "%s:%s",
2736 ata_mode_string(xfer_mask),
2737 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2738 else
2739 snprintf(buf, sizeof(buf), "%s",
2740 ata_mode_string(xfer_mask));
2741
2742 ata_dev_printk(dev, KERN_WARNING,
2743 "limiting speed to %s\n", buf);
2744 }
cf176e1a
TH
2745
2746 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2747 &dev->udma_mask);
2748
cf176e1a 2749 return 0;
cf176e1a
TH
2750}
2751
3373efd8 2752static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 2753{
baa1e78a 2754 struct ata_eh_context *ehc = &dev->ap->eh_context;
83206a29
TH
2755 unsigned int err_mask;
2756 int rc;
1da177e4 2757
e8384607 2758 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
2759 if (dev->xfer_shift == ATA_SHIFT_PIO)
2760 dev->flags |= ATA_DFLAG_PIO;
2761
3373efd8 2762 err_mask = ata_dev_set_xfermode(dev);
11750a40
AC
2763 /* Old CFA may refuse this command, which is just fine */
2764 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2765 err_mask &= ~AC_ERR_DEV;
2766
83206a29 2767 if (err_mask) {
f15a1daf
TH
2768 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2769 "(err_mask=0x%x)\n", err_mask);
83206a29
TH
2770 return -EIO;
2771 }
1da177e4 2772
baa1e78a 2773 ehc->i.flags |= ATA_EHI_POST_SETMODE;
3373efd8 2774 rc = ata_dev_revalidate(dev, 0);
baa1e78a 2775 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
5eb45c02 2776 if (rc)
83206a29 2777 return rc;
48a8a14f 2778
23e71c3d
TH
2779 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2780 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 2781
f15a1daf
TH
2782 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2783 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
83206a29 2784 return 0;
1da177e4
LT
2785}
2786
1da177e4 2787/**
04351821 2788 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
1da177e4 2789 * @ap: port on which timings will be programmed
e82cbdb9 2790 * @r_failed_dev: out paramter for failed device
1da177e4 2791 *
04351821
AC
2792 * Standard implementation of the function used to tune and set
2793 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2794 * ata_dev_set_mode() fails, pointer to the failing device is
e82cbdb9 2795 * returned in @r_failed_dev.
780a87f7 2796 *
1da177e4 2797 * LOCKING:
0cba632b 2798 * PCI/etc. bus probe sem.
e82cbdb9
TH
2799 *
2800 * RETURNS:
2801 * 0 on success, negative errno otherwise
1da177e4 2802 */
04351821
AC
2803
2804int ata_do_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
1da177e4 2805{
e8e0619f 2806 struct ata_device *dev;
e82cbdb9 2807 int i, rc = 0, used_dma = 0, found = 0;
1da177e4 2808
3adcebb2 2809
a6d5a51c
TH
2810 /* step 1: calculate xfer_mask */
2811 for (i = 0; i < ATA_MAX_DEVICES; i++) {
acf356b1 2812 unsigned int pio_mask, dma_mask;
a6d5a51c 2813
e8e0619f
TH
2814 dev = &ap->device[i];
2815
e1211e3f 2816 if (!ata_dev_enabled(dev))
a6d5a51c
TH
2817 continue;
2818
3373efd8 2819 ata_dev_xfermask(dev);
1da177e4 2820
acf356b1
TH
2821 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2822 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2823 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2824 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 2825
4f65977d 2826 found = 1;
5444a6f4
AC
2827 if (dev->dma_mode)
2828 used_dma = 1;
a6d5a51c 2829 }
4f65977d 2830 if (!found)
e82cbdb9 2831 goto out;
a6d5a51c
TH
2832
2833 /* step 2: always set host PIO timings */
e8e0619f
TH
2834 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2835 dev = &ap->device[i];
2836 if (!ata_dev_enabled(dev))
2837 continue;
2838
2839 if (!dev->pio_mode) {
f15a1daf 2840 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 2841 rc = -EINVAL;
e82cbdb9 2842 goto out;
e8e0619f
TH
2843 }
2844
2845 dev->xfer_mode = dev->pio_mode;
2846 dev->xfer_shift = ATA_SHIFT_PIO;
2847 if (ap->ops->set_piomode)
2848 ap->ops->set_piomode(ap, dev);
2849 }
1da177e4 2850
a6d5a51c 2851 /* step 3: set host DMA timings */
e8e0619f
TH
2852 for (i = 0; i < ATA_MAX_DEVICES; i++) {
2853 dev = &ap->device[i];
2854
2855 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2856 continue;
2857
2858 dev->xfer_mode = dev->dma_mode;
2859 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2860 if (ap->ops->set_dmamode)
2861 ap->ops->set_dmamode(ap, dev);
2862 }
1da177e4
LT
2863
2864 /* step 4: update devices' xfer mode */
83206a29 2865 for (i = 0; i < ATA_MAX_DEVICES; i++) {
e8e0619f 2866 dev = &ap->device[i];
1da177e4 2867
18d90deb 2868 /* don't update suspended devices' xfer mode */
9666f400 2869 if (!ata_dev_enabled(dev))
83206a29
TH
2870 continue;
2871
3373efd8 2872 rc = ata_dev_set_mode(dev);
5bbc53f4 2873 if (rc)
e82cbdb9 2874 goto out;
83206a29 2875 }
1da177e4 2876
e8e0619f
TH
2877 /* Record simplex status. If we selected DMA then the other
2878 * host channels are not permitted to do so.
5444a6f4 2879 */
cca3974e 2880 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 2881 ap->host->simplex_claimed = ap;
5444a6f4 2882
e8e0619f 2883 /* step5: chip specific finalisation */
1da177e4
LT
2884 if (ap->ops->post_set_mode)
2885 ap->ops->post_set_mode(ap);
e82cbdb9
TH
2886 out:
2887 if (rc)
2888 *r_failed_dev = dev;
2889 return rc;
1da177e4
LT
2890}
2891
04351821
AC
2892/**
2893 * ata_set_mode - Program timings and issue SET FEATURES - XFER
2894 * @ap: port on which timings will be programmed
2895 * @r_failed_dev: out paramter for failed device
2896 *
2897 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2898 * ata_set_mode() fails, pointer to the failing device is
2899 * returned in @r_failed_dev.
2900 *
2901 * LOCKING:
2902 * PCI/etc. bus probe sem.
2903 *
2904 * RETURNS:
2905 * 0 on success, negative errno otherwise
2906 */
2907int ata_set_mode(struct ata_port *ap, struct ata_device **r_failed_dev)
2908{
2909 /* has private set_mode? */
2910 if (ap->ops->set_mode)
2911 return ap->ops->set_mode(ap, r_failed_dev);
2912 return ata_do_set_mode(ap, r_failed_dev);
2913}
2914
1fdffbce
JG
2915/**
2916 * ata_tf_to_host - issue ATA taskfile to host controller
2917 * @ap: port to which command is being issued
2918 * @tf: ATA taskfile register set
2919 *
2920 * Issues ATA taskfile register set to ATA host controller,
2921 * with proper synchronization with interrupt handler and
2922 * other threads.
2923 *
2924 * LOCKING:
cca3974e 2925 * spin_lock_irqsave(host lock)
1fdffbce
JG
2926 */
2927
2928static inline void ata_tf_to_host(struct ata_port *ap,
2929 const struct ata_taskfile *tf)
2930{
2931 ap->ops->tf_load(ap, tf);
2932 ap->ops->exec_command(ap, tf);
2933}
2934
1da177e4
LT
2935/**
2936 * ata_busy_sleep - sleep until BSY clears, or timeout
2937 * @ap: port containing status register to be polled
2938 * @tmout_pat: impatience timeout
2939 * @tmout: overall timeout
2940 *
780a87f7
JG
2941 * Sleep until ATA Status register bit BSY clears,
2942 * or a timeout occurs.
2943 *
d1adc1bb
TH
2944 * LOCKING:
2945 * Kernel thread context (may sleep).
2946 *
2947 * RETURNS:
2948 * 0 on success, -errno otherwise.
1da177e4 2949 */
d1adc1bb
TH
2950int ata_busy_sleep(struct ata_port *ap,
2951 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
2952{
2953 unsigned long timer_start, timeout;
2954 u8 status;
2955
2956 status = ata_busy_wait(ap, ATA_BUSY, 300);
2957 timer_start = jiffies;
2958 timeout = timer_start + tmout_pat;
d1adc1bb
TH
2959 while (status != 0xff && (status & ATA_BUSY) &&
2960 time_before(jiffies, timeout)) {
1da177e4
LT
2961 msleep(50);
2962 status = ata_busy_wait(ap, ATA_BUSY, 3);
2963 }
2964
d1adc1bb 2965 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 2966 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
2967 "port is slow to respond, please be patient "
2968 "(Status 0x%x)\n", status);
1da177e4
LT
2969
2970 timeout = timer_start + tmout;
d1adc1bb
TH
2971 while (status != 0xff && (status & ATA_BUSY) &&
2972 time_before(jiffies, timeout)) {
1da177e4
LT
2973 msleep(50);
2974 status = ata_chk_status(ap);
2975 }
2976
d1adc1bb
TH
2977 if (status == 0xff)
2978 return -ENODEV;
2979
1da177e4 2980 if (status & ATA_BUSY) {
f15a1daf 2981 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
2982 "(%lu secs, Status 0x%x)\n",
2983 tmout / HZ, status);
d1adc1bb 2984 return -EBUSY;
1da177e4
LT
2985 }
2986
2987 return 0;
2988}
2989
d4b2bab4
TH
2990/**
2991 * ata_wait_ready - sleep until BSY clears, or timeout
2992 * @ap: port containing status register to be polled
2993 * @deadline: deadline jiffies for the operation
2994 *
2995 * Sleep until ATA Status register bit BSY clears, or timeout
2996 * occurs.
2997 *
2998 * LOCKING:
2999 * Kernel thread context (may sleep).
3000 *
3001 * RETURNS:
3002 * 0 on success, -errno otherwise.
3003 */
3004int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3005{
3006 unsigned long start = jiffies;
3007 int warned = 0;
3008
3009 while (1) {
3010 u8 status = ata_chk_status(ap);
3011 unsigned long now = jiffies;
3012
3013 if (!(status & ATA_BUSY))
3014 return 0;
3015 if (status == 0xff)
3016 return -ENODEV;
3017 if (time_after(now, deadline))
3018 return -EBUSY;
3019
3020 if (!warned && time_after(now, start + 5 * HZ) &&
3021 (deadline - now > 3 * HZ)) {
3022 ata_port_printk(ap, KERN_WARNING,
3023 "port is slow to respond, please be patient "
3024 "(Status 0x%x)\n", status);
3025 warned = 1;
3026 }
3027
3028 msleep(50);
3029 }
3030}
3031
3032static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3033 unsigned long deadline)
1da177e4
LT
3034{
3035 struct ata_ioports *ioaddr = &ap->ioaddr;
3036 unsigned int dev0 = devmask & (1 << 0);
3037 unsigned int dev1 = devmask & (1 << 1);
9b89391c 3038 int rc, ret = 0;
1da177e4
LT
3039
3040 /* if device 0 was found in ata_devchk, wait for its
3041 * BSY bit to clear
3042 */
d4b2bab4
TH
3043 if (dev0) {
3044 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3045 if (rc) {
3046 if (rc != -ENODEV)
3047 return rc;
3048 ret = rc;
3049 }
d4b2bab4 3050 }
1da177e4
LT
3051
3052 /* if device 1 was found in ata_devchk, wait for
3053 * register access, then wait for BSY to clear
3054 */
1da177e4
LT
3055 while (dev1) {
3056 u8 nsect, lbal;
3057
3058 ap->ops->dev_select(ap, 1);
0d5ff566
TH
3059 nsect = ioread8(ioaddr->nsect_addr);
3060 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
3061 if ((nsect == 1) && (lbal == 1))
3062 break;
d4b2bab4
TH
3063 if (time_after(jiffies, deadline))
3064 return -EBUSY;
1da177e4
LT
3065 msleep(50); /* give drive a breather */
3066 }
d4b2bab4
TH
3067 if (dev1) {
3068 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3069 if (rc) {
3070 if (rc != -ENODEV)
3071 return rc;
3072 ret = rc;
3073 }
d4b2bab4 3074 }
1da177e4
LT
3075
3076 /* is all this really necessary? */
3077 ap->ops->dev_select(ap, 0);
3078 if (dev1)
3079 ap->ops->dev_select(ap, 1);
3080 if (dev0)
3081 ap->ops->dev_select(ap, 0);
d4b2bab4 3082
9b89391c 3083 return ret;
1da177e4
LT
3084}
3085
d4b2bab4
TH
3086static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3087 unsigned long deadline)
1da177e4
LT
3088{
3089 struct ata_ioports *ioaddr = &ap->ioaddr;
3090
44877b4e 3091 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
1da177e4
LT
3092
3093 /* software reset. causes dev0 to be selected */
0d5ff566
TH
3094 iowrite8(ap->ctl, ioaddr->ctl_addr);
3095 udelay(20); /* FIXME: flush */
3096 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3097 udelay(20); /* FIXME: flush */
3098 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3099
3100 /* spec mandates ">= 2ms" before checking status.
3101 * We wait 150ms, because that was the magic delay used for
3102 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
3103 * between when the ATA command register is written, and then
3104 * status is checked. Because waiting for "a while" before
3105 * checking status is fine, post SRST, we perform this magic
3106 * delay here as well.
09c7ad79
AC
3107 *
3108 * Old drivers/ide uses the 2mS rule and then waits for ready
1da177e4
LT
3109 */
3110 msleep(150);
3111
2e9edbf8 3112 /* Before we perform post reset processing we want to see if
298a41ca
TH
3113 * the bus shows 0xFF because the odd clown forgets the D7
3114 * pulldown resistor.
3115 */
d1adc1bb 3116 if (ata_check_status(ap) == 0xFF)
9b89391c 3117 return -ENODEV;
09c7ad79 3118
d4b2bab4 3119 return ata_bus_post_reset(ap, devmask, deadline);
1da177e4
LT
3120}
3121
3122/**
3123 * ata_bus_reset - reset host port and associated ATA channel
3124 * @ap: port to reset
3125 *
3126 * This is typically the first time we actually start issuing
3127 * commands to the ATA channel. We wait for BSY to clear, then
3128 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3129 * result. Determine what devices, if any, are on the channel
3130 * by looking at the device 0/1 error register. Look at the signature
3131 * stored in each device's taskfile registers, to determine if
3132 * the device is ATA or ATAPI.
3133 *
3134 * LOCKING:
0cba632b 3135 * PCI/etc. bus probe sem.
cca3974e 3136 * Obtains host lock.
1da177e4
LT
3137 *
3138 * SIDE EFFECTS:
198e0fed 3139 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
3140 */
3141
3142void ata_bus_reset(struct ata_port *ap)
3143{
3144 struct ata_ioports *ioaddr = &ap->ioaddr;
3145 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3146 u8 err;
aec5c3c1 3147 unsigned int dev0, dev1 = 0, devmask = 0;
9b89391c 3148 int rc;
1da177e4 3149
44877b4e 3150 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
1da177e4
LT
3151
3152 /* determine if device 0/1 are present */
3153 if (ap->flags & ATA_FLAG_SATA_RESET)
3154 dev0 = 1;
3155 else {
3156 dev0 = ata_devchk(ap, 0);
3157 if (slave_possible)
3158 dev1 = ata_devchk(ap, 1);
3159 }
3160
3161 if (dev0)
3162 devmask |= (1 << 0);
3163 if (dev1)
3164 devmask |= (1 << 1);
3165
3166 /* select device 0 again */
3167 ap->ops->dev_select(ap, 0);
3168
3169 /* issue bus reset */
9b89391c
TH
3170 if (ap->flags & ATA_FLAG_SRST) {
3171 rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
3172 if (rc && rc != -ENODEV)
aec5c3c1 3173 goto err_out;
9b89391c 3174 }
1da177e4
LT
3175
3176 /*
3177 * determine by signature whether we have ATA or ATAPI devices
3178 */
b4dc7623 3179 ap->device[0].class = ata_dev_try_classify(ap, 0, &err);
1da177e4 3180 if ((slave_possible) && (err != 0x81))
b4dc7623 3181 ap->device[1].class = ata_dev_try_classify(ap, 1, &err);
1da177e4
LT
3182
3183 /* re-enable interrupts */
83625006 3184 ap->ops->irq_on(ap);
1da177e4
LT
3185
3186 /* is double-select really necessary? */
3187 if (ap->device[1].class != ATA_DEV_NONE)
3188 ap->ops->dev_select(ap, 1);
3189 if (ap->device[0].class != ATA_DEV_NONE)
3190 ap->ops->dev_select(ap, 0);
3191
3192 /* if no devices were detected, disable this port */
3193 if ((ap->device[0].class == ATA_DEV_NONE) &&
3194 (ap->device[1].class == ATA_DEV_NONE))
3195 goto err_out;
3196
3197 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3198 /* set up device control for ATA_FLAG_SATA_RESET */
0d5ff566 3199 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3200 }
3201
3202 DPRINTK("EXIT\n");
3203 return;
3204
3205err_out:
f15a1daf 3206 ata_port_printk(ap, KERN_ERR, "disabling port\n");
1da177e4
LT
3207 ap->ops->port_disable(ap);
3208
3209 DPRINTK("EXIT\n");
3210}
3211
d7bb4cc7
TH
3212/**
3213 * sata_phy_debounce - debounce SATA phy status
3214 * @ap: ATA port to debounce SATA phy status for
3215 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3216 * @deadline: deadline jiffies for the operation
d7bb4cc7
TH
3217 *
3218 * Make sure SStatus of @ap reaches stable state, determined by
3219 * holding the same value where DET is not 1 for @duration polled
3220 * every @interval, before @timeout. Timeout constraints the
d4b2bab4
TH
3221 * beginning of the stable state. Because DET gets stuck at 1 on
3222 * some controllers after hot unplugging, this functions waits
d7bb4cc7
TH
3223 * until timeout then returns 0 if DET is stable at 1.
3224 *
d4b2bab4
TH
3225 * @timeout is further limited by @deadline. The sooner of the
3226 * two is used.
3227 *
d7bb4cc7
TH
3228 * LOCKING:
3229 * Kernel thread context (may sleep)
3230 *
3231 * RETURNS:
3232 * 0 on success, -errno on failure.
3233 */
d4b2bab4
TH
3234int sata_phy_debounce(struct ata_port *ap, const unsigned long *params,
3235 unsigned long deadline)
7a7921e8 3236{
d7bb4cc7 3237 unsigned long interval_msec = params[0];
d4b2bab4
TH
3238 unsigned long duration = msecs_to_jiffies(params[1]);
3239 unsigned long last_jiffies, t;
d7bb4cc7
TH
3240 u32 last, cur;
3241 int rc;
3242
d4b2bab4
TH
3243 t = jiffies + msecs_to_jiffies(params[2]);
3244 if (time_before(t, deadline))
3245 deadline = t;
3246
d7bb4cc7
TH
3247 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
3248 return rc;
3249 cur &= 0xf;
3250
3251 last = cur;
3252 last_jiffies = jiffies;
3253
3254 while (1) {
3255 msleep(interval_msec);
3256 if ((rc = sata_scr_read(ap, SCR_STATUS, &cur)))
3257 return rc;
3258 cur &= 0xf;
3259
3260 /* DET stable? */
3261 if (cur == last) {
d4b2bab4 3262 if (cur == 1 && time_before(jiffies, deadline))
d7bb4cc7
TH
3263 continue;
3264 if (time_after(jiffies, last_jiffies + duration))
3265 return 0;
3266 continue;
3267 }
3268
3269 /* unstable, start over */
3270 last = cur;
3271 last_jiffies = jiffies;
3272
d4b2bab4
TH
3273 /* check deadline */
3274 if (time_after(jiffies, deadline))
d7bb4cc7
TH
3275 return -EBUSY;
3276 }
3277}
3278
3279/**
3280 * sata_phy_resume - resume SATA phy
3281 * @ap: ATA port to resume SATA phy for
3282 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3283 * @deadline: deadline jiffies for the operation
d7bb4cc7
TH
3284 *
3285 * Resume SATA phy of @ap and debounce it.
3286 *
3287 * LOCKING:
3288 * Kernel thread context (may sleep)
3289 *
3290 * RETURNS:
3291 * 0 on success, -errno on failure.
3292 */
d4b2bab4
TH
3293int sata_phy_resume(struct ata_port *ap, const unsigned long *params,
3294 unsigned long deadline)
d7bb4cc7
TH
3295{
3296 u32 scontrol;
81952c54
TH
3297 int rc;
3298
3299 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
3300 return rc;
7a7921e8 3301
852ee16a 3302 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54
TH
3303
3304 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
3305 return rc;
7a7921e8 3306
d7bb4cc7
TH
3307 /* Some PHYs react badly if SStatus is pounded immediately
3308 * after resuming. Delay 200ms before debouncing.
3309 */
3310 msleep(200);
7a7921e8 3311
d4b2bab4 3312 return sata_phy_debounce(ap, params, deadline);
7a7921e8
TH
3313}
3314
f5914a46
TH
3315/**
3316 * ata_std_prereset - prepare for reset
3317 * @ap: ATA port to be reset
d4b2bab4 3318 * @deadline: deadline jiffies for the operation
f5914a46 3319 *
b8cffc6a
TH
3320 * @ap is about to be reset. Initialize it. Failure from
3321 * prereset makes libata abort whole reset sequence and give up
3322 * that port, so prereset should be best-effort. It does its
3323 * best to prepare for reset sequence but if things go wrong, it
3324 * should just whine, not fail.
f5914a46
TH
3325 *
3326 * LOCKING:
3327 * Kernel thread context (may sleep)
3328 *
3329 * RETURNS:
3330 * 0 on success, -errno otherwise.
3331 */
d4b2bab4 3332int ata_std_prereset(struct ata_port *ap, unsigned long deadline)
f5914a46
TH
3333{
3334 struct ata_eh_context *ehc = &ap->eh_context;
e9c83914 3335 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
3336 int rc;
3337
31daabda 3338 /* handle link resume */
28324304
TH
3339 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
3340 (ap->flags & ATA_FLAG_HRST_TO_RESUME))
3341 ehc->i.action |= ATA_EH_HARDRESET;
3342
f5914a46
TH
3343 /* if we're about to do hardreset, nothing more to do */
3344 if (ehc->i.action & ATA_EH_HARDRESET)
3345 return 0;
3346
3347 /* if SATA, resume phy */
3348 if (ap->cbl == ATA_CBL_SATA) {
d4b2bab4 3349 rc = sata_phy_resume(ap, timing, deadline);
b8cffc6a
TH
3350 /* whine about phy resume failure but proceed */
3351 if (rc && rc != -EOPNOTSUPP)
f5914a46
TH
3352 ata_port_printk(ap, KERN_WARNING, "failed to resume "
3353 "link for reset (errno=%d)\n", rc);
f5914a46
TH
3354 }
3355
3356 /* Wait for !BSY if the controller can wait for the first D2H
3357 * Reg FIS and we don't know that no device is attached.
3358 */
b8cffc6a
TH
3359 if (!(ap->flags & ATA_FLAG_SKIP_D2H_BSY) && !ata_port_offline(ap)) {
3360 rc = ata_wait_ready(ap, deadline);
3361 if (rc) {
3362 ata_port_printk(ap, KERN_WARNING, "device not ready "
3363 "(errno=%d), forcing hardreset\n", rc);
3364 ehc->i.action |= ATA_EH_HARDRESET;
3365 }
3366 }
f5914a46
TH
3367
3368 return 0;
3369}
3370
c2bd5804
TH
3371/**
3372 * ata_std_softreset - reset host port via ATA SRST
3373 * @ap: port to reset
c2bd5804 3374 * @classes: resulting classes of attached devices
d4b2bab4 3375 * @deadline: deadline jiffies for the operation
c2bd5804 3376 *
52783c5d 3377 * Reset host port using ATA SRST.
c2bd5804
TH
3378 *
3379 * LOCKING:
3380 * Kernel thread context (may sleep)
3381 *
3382 * RETURNS:
3383 * 0 on success, -errno otherwise.
3384 */
d4b2bab4
TH
3385int ata_std_softreset(struct ata_port *ap, unsigned int *classes,
3386 unsigned long deadline)
c2bd5804
TH
3387{
3388 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
d4b2bab4
TH
3389 unsigned int devmask = 0;
3390 int rc;
c2bd5804
TH
3391 u8 err;
3392
3393 DPRINTK("ENTER\n");
3394
81952c54 3395 if (ata_port_offline(ap)) {
3a39746a
TH
3396 classes[0] = ATA_DEV_NONE;
3397 goto out;
3398 }
3399
c2bd5804
TH
3400 /* determine if device 0/1 are present */
3401 if (ata_devchk(ap, 0))
3402 devmask |= (1 << 0);
3403 if (slave_possible && ata_devchk(ap, 1))
3404 devmask |= (1 << 1);
3405
c2bd5804
TH
3406 /* select device 0 again */
3407 ap->ops->dev_select(ap, 0);
3408
3409 /* issue bus reset */
3410 DPRINTK("about to softreset, devmask=%x\n", devmask);
d4b2bab4 3411 rc = ata_bus_softreset(ap, devmask, deadline);
9b89391c
TH
3412 /* if link is occupied, -ENODEV too is an error */
3413 if (rc && (rc != -ENODEV || sata_scr_valid(ap))) {
d4b2bab4
TH
3414 ata_port_printk(ap, KERN_ERR, "SRST failed (errno=%d)\n", rc);
3415 return rc;
c2bd5804
TH
3416 }
3417
3418 /* determine by signature whether we have ATA or ATAPI devices */
3419 classes[0] = ata_dev_try_classify(ap, 0, &err);
3420 if (slave_possible && err != 0x81)
3421 classes[1] = ata_dev_try_classify(ap, 1, &err);
3422
3a39746a 3423 out:
c2bd5804
TH
3424 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3425 return 0;
3426}
3427
3428/**
b6103f6d 3429 * sata_port_hardreset - reset port via SATA phy reset
c2bd5804 3430 * @ap: port to reset
b6103f6d 3431 * @timing: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3432 * @deadline: deadline jiffies for the operation
c2bd5804
TH
3433 *
3434 * SATA phy-reset host port using DET bits of SControl register.
c2bd5804
TH
3435 *
3436 * LOCKING:
3437 * Kernel thread context (may sleep)
3438 *
3439 * RETURNS:
3440 * 0 on success, -errno otherwise.
3441 */
d4b2bab4
TH
3442int sata_port_hardreset(struct ata_port *ap, const unsigned long *timing,
3443 unsigned long deadline)
c2bd5804 3444{
852ee16a 3445 u32 scontrol;
81952c54 3446 int rc;
852ee16a 3447
c2bd5804
TH
3448 DPRINTK("ENTER\n");
3449
3c567b7d 3450 if (sata_set_spd_needed(ap)) {
1c3fae4d
TH
3451 /* SATA spec says nothing about how to reconfigure
3452 * spd. To be on the safe side, turn off phy during
3453 * reconfiguration. This works for at least ICH7 AHCI
3454 * and Sil3124.
3455 */
81952c54 3456 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
b6103f6d 3457 goto out;
81952c54 3458
a34b6fc0 3459 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54
TH
3460
3461 if ((rc = sata_scr_write(ap, SCR_CONTROL, scontrol)))
b6103f6d 3462 goto out;
1c3fae4d 3463
3c567b7d 3464 sata_set_spd(ap);
1c3fae4d
TH
3465 }
3466
3467 /* issue phy wake/reset */
81952c54 3468 if ((rc = sata_scr_read(ap, SCR_CONTROL, &scontrol)))
b6103f6d 3469 goto out;
81952c54 3470
852ee16a 3471 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54
TH
3472
3473 if ((rc = sata_scr_write_flush(ap, SCR_CONTROL, scontrol)))
b6103f6d 3474 goto out;
c2bd5804 3475
1c3fae4d 3476 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
3477 * 10.4.2 says at least 1 ms.
3478 */
3479 msleep(1);
3480
1c3fae4d 3481 /* bring phy back */
d4b2bab4 3482 rc = sata_phy_resume(ap, timing, deadline);
b6103f6d
TH
3483 out:
3484 DPRINTK("EXIT, rc=%d\n", rc);
3485 return rc;
3486}
3487
3488/**
3489 * sata_std_hardreset - reset host port via SATA phy reset
3490 * @ap: port to reset
3491 * @class: resulting class of attached device
d4b2bab4 3492 * @deadline: deadline jiffies for the operation
b6103f6d
TH
3493 *
3494 * SATA phy-reset host port using DET bits of SControl register,
3495 * wait for !BSY and classify the attached device.
3496 *
3497 * LOCKING:
3498 * Kernel thread context (may sleep)
3499 *
3500 * RETURNS:
3501 * 0 on success, -errno otherwise.
3502 */
d4b2bab4
TH
3503int sata_std_hardreset(struct ata_port *ap, unsigned int *class,
3504 unsigned long deadline)
b6103f6d
TH
3505{
3506 const unsigned long *timing = sata_ehc_deb_timing(&ap->eh_context);
3507 int rc;
3508
3509 DPRINTK("ENTER\n");
3510
3511 /* do hardreset */
d4b2bab4 3512 rc = sata_port_hardreset(ap, timing, deadline);
b6103f6d
TH
3513 if (rc) {
3514 ata_port_printk(ap, KERN_ERR,
3515 "COMRESET failed (errno=%d)\n", rc);
3516 return rc;
3517 }
c2bd5804 3518
c2bd5804 3519 /* TODO: phy layer with polling, timeouts, etc. */
81952c54 3520 if (ata_port_offline(ap)) {
c2bd5804
TH
3521 *class = ATA_DEV_NONE;
3522 DPRINTK("EXIT, link offline\n");
3523 return 0;
3524 }
3525
34fee227
TH
3526 /* wait a while before checking status, see SRST for more info */
3527 msleep(150);
3528
d4b2bab4 3529 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3530 /* link occupied, -ENODEV too is an error */
3531 if (rc) {
f15a1daf 3532 ata_port_printk(ap, KERN_ERR,
d4b2bab4
TH
3533 "COMRESET failed (errno=%d)\n", rc);
3534 return rc;
c2bd5804
TH
3535 }
3536
3a39746a
TH
3537 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3538
c2bd5804
TH
3539 *class = ata_dev_try_classify(ap, 0, NULL);
3540
3541 DPRINTK("EXIT, class=%u\n", *class);
3542 return 0;
3543}
3544
3545/**
3546 * ata_std_postreset - standard postreset callback
3547 * @ap: the target ata_port
3548 * @classes: classes of attached devices
3549 *
3550 * This function is invoked after a successful reset. Note that
3551 * the device might have been reset more than once using
3552 * different reset methods before postreset is invoked.
c2bd5804 3553 *
c2bd5804
TH
3554 * LOCKING:
3555 * Kernel thread context (may sleep)
3556 */
3557void ata_std_postreset(struct ata_port *ap, unsigned int *classes)
3558{
dc2b3515
TH
3559 u32 serror;
3560
c2bd5804
TH
3561 DPRINTK("ENTER\n");
3562
c2bd5804 3563 /* print link status */
81952c54 3564 sata_print_link_status(ap);
c2bd5804 3565
dc2b3515
TH
3566 /* clear SError */
3567 if (sata_scr_read(ap, SCR_ERROR, &serror) == 0)
3568 sata_scr_write(ap, SCR_ERROR, serror);
3569
3a39746a 3570 /* re-enable interrupts */
83625006
AI
3571 if (!ap->ops->error_handler)
3572 ap->ops->irq_on(ap);
c2bd5804
TH
3573
3574 /* is double-select really necessary? */
3575 if (classes[0] != ATA_DEV_NONE)
3576 ap->ops->dev_select(ap, 1);
3577 if (classes[1] != ATA_DEV_NONE)
3578 ap->ops->dev_select(ap, 0);
3579
3a39746a
TH
3580 /* bail out if no device is present */
3581 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3582 DPRINTK("EXIT, no device\n");
3583 return;
3584 }
3585
3586 /* set up device control */
0d5ff566
TH
3587 if (ap->ioaddr.ctl_addr)
3588 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
c2bd5804
TH
3589
3590 DPRINTK("EXIT\n");
3591}
3592
623a3128
TH
3593/**
3594 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3595 * @dev: device to compare against
3596 * @new_class: class of the new device
3597 * @new_id: IDENTIFY page of the new device
3598 *
3599 * Compare @new_class and @new_id against @dev and determine
3600 * whether @dev is the device indicated by @new_class and
3601 * @new_id.
3602 *
3603 * LOCKING:
3604 * None.
3605 *
3606 * RETURNS:
3607 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3608 */
3373efd8
TH
3609static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3610 const u16 *new_id)
623a3128
TH
3611{
3612 const u16 *old_id = dev->id;
a0cf733b
TH
3613 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3614 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
3615 u64 new_n_sectors;
3616
3617 if (dev->class != new_class) {
f15a1daf
TH
3618 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3619 dev->class, new_class);
623a3128
TH
3620 return 0;
3621 }
3622
a0cf733b
TH
3623 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3624 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3625 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3626 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
3627 new_n_sectors = ata_id_n_sectors(new_id);
3628
3629 if (strcmp(model[0], model[1])) {
f15a1daf
TH
3630 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3631 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
3632 return 0;
3633 }
3634
3635 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
3636 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3637 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
3638 return 0;
3639 }
3640
3641 if (dev->class == ATA_DEV_ATA && dev->n_sectors != new_n_sectors) {
f15a1daf
TH
3642 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3643 "%llu != %llu\n",
3644 (unsigned long long)dev->n_sectors,
3645 (unsigned long long)new_n_sectors);
1e999736
AC
3646 /* Are we the boot time size - if so we appear to be the
3647 same disk at this point and our HPA got reapplied */
3648 if (ata_ignore_hpa && dev->n_sectors_boot == new_n_sectors
3649 && ata_id_hpa_enabled(new_id))
3650 return 1;
623a3128
TH
3651 return 0;
3652 }
3653
3654 return 1;
3655}
3656
3657/**
3658 * ata_dev_revalidate - Revalidate ATA device
623a3128 3659 * @dev: device to revalidate
bff04647 3660 * @readid_flags: read ID flags
623a3128
TH
3661 *
3662 * Re-read IDENTIFY page and make sure @dev is still attached to
3663 * the port.
3664 *
3665 * LOCKING:
3666 * Kernel thread context (may sleep)
3667 *
3668 * RETURNS:
3669 * 0 on success, negative errno otherwise
3670 */
bff04647 3671int ata_dev_revalidate(struct ata_device *dev, unsigned int readid_flags)
623a3128 3672{
5eb45c02 3673 unsigned int class = dev->class;
f15a1daf 3674 u16 *id = (void *)dev->ap->sector_buf;
623a3128
TH
3675 int rc;
3676
5eb45c02
TH
3677 if (!ata_dev_enabled(dev)) {
3678 rc = -ENODEV;
3679 goto fail;
3680 }
623a3128 3681
fe635c7e 3682 /* read ID data */
bff04647 3683 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128
TH
3684 if (rc)
3685 goto fail;
3686
3687 /* is the device still there? */
3373efd8 3688 if (!ata_dev_same_device(dev, class, id)) {
623a3128
TH
3689 rc = -ENODEV;
3690 goto fail;
3691 }
3692
fe635c7e 3693 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
623a3128
TH
3694
3695 /* configure device according to the new ID */
efdaedc4 3696 rc = ata_dev_configure(dev);
5eb45c02
TH
3697 if (rc == 0)
3698 return 0;
623a3128
TH
3699
3700 fail:
f15a1daf 3701 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
3702 return rc;
3703}
3704
6919a0a6
AC
3705struct ata_blacklist_entry {
3706 const char *model_num;
3707 const char *model_rev;
3708 unsigned long horkage;
3709};
3710
3711static const struct ata_blacklist_entry ata_device_blacklist [] = {
3712 /* Devices with DMA related problems under Linux */
3713 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3714 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3715 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3716 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3717 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3718 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3719 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3720 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3721 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3722 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3723 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3724 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3725 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3726 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3727 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3728 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3729 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3730 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3731 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3732 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3733 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3734 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3735 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3736 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3737 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3738 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
3739 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3740 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3741 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
3742
18d6e9d5 3743 /* Weird ATAPI devices */
6f23a31d
AL
3744 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 |
3745 ATA_HORKAGE_DMA_RW_ONLY },
18d6e9d5 3746
6919a0a6
AC
3747 /* Devices we expect to fail diagnostics */
3748
3749 /* Devices where NCQ should be avoided */
3750 /* NCQ is slow */
3751 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
09125ea6
TH
3752 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3753 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30
PR
3754 /* NCQ is broken */
3755 { "Maxtor 6L250S0", "BANC1G10", ATA_HORKAGE_NONCQ },
96442925
JA
3756 /* NCQ hard hangs device under heavier load, needs hard power cycle */
3757 { "Maxtor 6B250S0", "BANC1B70", ATA_HORKAGE_NONCQ },
36e337d0
RH
3758 /* Blacklist entries taken from Silicon Image 3124/3132
3759 Windows driver .inf file - also several Linux problem reports */
3760 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
3761 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
3762 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
6919a0a6
AC
3763
3764 /* Devices with NCQ limits */
3765
3766 /* End Marker */
3767 { }
1da177e4 3768};
2e9edbf8 3769
6919a0a6 3770unsigned long ata_device_blacklisted(const struct ata_device *dev)
1da177e4 3771{
8bfa79fc
TH
3772 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3773 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 3774 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 3775
8bfa79fc
TH
3776 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3777 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 3778
6919a0a6 3779 while (ad->model_num) {
8bfa79fc 3780 if (!strcmp(ad->model_num, model_num)) {
6919a0a6
AC
3781 if (ad->model_rev == NULL)
3782 return ad->horkage;
8bfa79fc 3783 if (!strcmp(ad->model_rev, model_rev))
6919a0a6 3784 return ad->horkage;
f4b15fef 3785 }
6919a0a6 3786 ad++;
f4b15fef 3787 }
1da177e4
LT
3788 return 0;
3789}
3790
6919a0a6
AC
3791static int ata_dma_blacklisted(const struct ata_device *dev)
3792{
3793 /* We don't support polling DMA.
3794 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3795 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3796 */
3797 if ((dev->ap->flags & ATA_FLAG_PIO_POLLING) &&
3798 (dev->flags & ATA_DFLAG_CDB_INTR))
3799 return 1;
3800 return (ata_device_blacklisted(dev) & ATA_HORKAGE_NODMA) ? 1 : 0;
3801}
3802
a6d5a51c
TH
3803/**
3804 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
3805 * @dev: Device to compute xfermask for
3806 *
acf356b1
TH
3807 * Compute supported xfermask of @dev and store it in
3808 * dev->*_mask. This function is responsible for applying all
3809 * known limits including host controller limits, device
3810 * blacklist, etc...
a6d5a51c
TH
3811 *
3812 * LOCKING:
3813 * None.
a6d5a51c 3814 */
3373efd8 3815static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 3816{
3373efd8 3817 struct ata_port *ap = dev->ap;
cca3974e 3818 struct ata_host *host = ap->host;
a6d5a51c 3819 unsigned long xfer_mask;
1da177e4 3820
37deecb5 3821 /* controller modes available */
565083e1
TH
3822 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3823 ap->mwdma_mask, ap->udma_mask);
3824
8343f889 3825 /* drive modes available */
37deecb5
TH
3826 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3827 dev->mwdma_mask, dev->udma_mask);
3828 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 3829
b352e57d
AC
3830 /*
3831 * CFA Advanced TrueIDE timings are not allowed on a shared
3832 * cable
3833 */
3834 if (ata_dev_pair(dev)) {
3835 /* No PIO5 or PIO6 */
3836 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
3837 /* No MWDMA3 or MWDMA 4 */
3838 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
3839 }
3840
37deecb5
TH
3841 if (ata_dma_blacklisted(dev)) {
3842 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
3843 ata_dev_printk(dev, KERN_WARNING,
3844 "device is on DMA blacklist, disabling DMA\n");
37deecb5 3845 }
a6d5a51c 3846
14d66ab7
PV
3847 if ((host->flags & ATA_HOST_SIMPLEX) &&
3848 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5
TH
3849 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
3850 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
3851 "other device, disabling DMA\n");
5444a6f4 3852 }
565083e1 3853
e424675f
JG
3854 if (ap->flags & ATA_FLAG_NO_IORDY)
3855 xfer_mask &= ata_pio_mask_no_iordy(dev);
3856
5444a6f4 3857 if (ap->ops->mode_filter)
a76b62ca 3858 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
5444a6f4 3859
8343f889
RH
3860 /* Apply cable rule here. Don't apply it early because when
3861 * we handle hot plug the cable type can itself change.
3862 * Check this last so that we know if the transfer rate was
3863 * solely limited by the cable.
3864 * Unknown or 80 wire cables reported host side are checked
3865 * drive side as well. Cases where we know a 40wire cable
3866 * is used safely for 80 are not checked here.
3867 */
3868 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
3869 /* UDMA/44 or higher would be available */
3870 if((ap->cbl == ATA_CBL_PATA40) ||
3871 (ata_drive_40wire(dev->id) &&
3872 (ap->cbl == ATA_CBL_PATA_UNK ||
3873 ap->cbl == ATA_CBL_PATA80))) {
3874 ata_dev_printk(dev, KERN_WARNING,
3875 "limited to UDMA/33 due to 40-wire cable\n");
3876 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
3877 }
3878
565083e1
TH
3879 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
3880 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
3881}
3882
1da177e4
LT
3883/**
3884 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
3885 * @dev: Device to which command will be sent
3886 *
780a87f7
JG
3887 * Issue SET FEATURES - XFER MODE command to device @dev
3888 * on port @ap.
3889 *
1da177e4 3890 * LOCKING:
0cba632b 3891 * PCI/etc. bus probe sem.
83206a29
TH
3892 *
3893 * RETURNS:
3894 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
3895 */
3896
3373efd8 3897static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 3898{
a0123703 3899 struct ata_taskfile tf;
83206a29 3900 unsigned int err_mask;
1da177e4
LT
3901
3902 /* set up set-features taskfile */
3903 DPRINTK("set features - xfer mode\n");
3904
3373efd8 3905 ata_tf_init(dev, &tf);
a0123703
TH
3906 tf.command = ATA_CMD_SET_FEATURES;
3907 tf.feature = SETFEATURES_XFER;
3908 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3909 tf.protocol = ATA_PROT_NODATA;
3910 tf.nsect = dev->xfer_mode;
1da177e4 3911
3373efd8 3912 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1da177e4 3913
83206a29
TH
3914 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3915 return err_mask;
1da177e4
LT
3916}
3917
8bf62ece
AL
3918/**
3919 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 3920 * @dev: Device to which command will be sent
e2a7f77a
RD
3921 * @heads: Number of heads (taskfile parameter)
3922 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
3923 *
3924 * LOCKING:
6aff8f1f
TH
3925 * Kernel thread context (may sleep)
3926 *
3927 * RETURNS:
3928 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 3929 */
3373efd8
TH
3930static unsigned int ata_dev_init_params(struct ata_device *dev,
3931 u16 heads, u16 sectors)
8bf62ece 3932{
a0123703 3933 struct ata_taskfile tf;
6aff8f1f 3934 unsigned int err_mask;
8bf62ece
AL
3935
3936 /* Number of sectors per track 1-255. Number of heads 1-16 */
3937 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 3938 return AC_ERR_INVALID;
8bf62ece
AL
3939
3940 /* set up init dev params taskfile */
3941 DPRINTK("init dev params \n");
3942
3373efd8 3943 ata_tf_init(dev, &tf);
a0123703
TH
3944 tf.command = ATA_CMD_INIT_DEV_PARAMS;
3945 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
3946 tf.protocol = ATA_PROT_NODATA;
3947 tf.nsect = sectors;
3948 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 3949
3373efd8 3950 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
8bf62ece 3951
6aff8f1f
TH
3952 DPRINTK("EXIT, err_mask=%x\n", err_mask);
3953 return err_mask;
8bf62ece
AL
3954}
3955
1da177e4 3956/**
0cba632b
JG
3957 * ata_sg_clean - Unmap DMA memory associated with command
3958 * @qc: Command containing DMA memory to be released
3959 *
3960 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
3961 *
3962 * LOCKING:
cca3974e 3963 * spin_lock_irqsave(host lock)
1da177e4 3964 */
70e6ad0c 3965void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
3966{
3967 struct ata_port *ap = qc->ap;
cedc9a47 3968 struct scatterlist *sg = qc->__sg;
1da177e4 3969 int dir = qc->dma_dir;
cedc9a47 3970 void *pad_buf = NULL;
1da177e4 3971
a4631474
TH
3972 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
3973 WARN_ON(sg == NULL);
1da177e4
LT
3974
3975 if (qc->flags & ATA_QCFLAG_SINGLE)
f131883e 3976 WARN_ON(qc->n_elem > 1);
1da177e4 3977
2c13b7ce 3978 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 3979
cedc9a47
JG
3980 /* if we padded the buffer out to 32-bit bound, and data
3981 * xfer direction is from-device, we must copy from the
3982 * pad buffer back into the supplied buffer
3983 */
3984 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
3985 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
3986
3987 if (qc->flags & ATA_QCFLAG_SG) {
e1410f2d 3988 if (qc->n_elem)
2f1f610b 3989 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
cedc9a47
JG
3990 /* restore last sg */
3991 sg[qc->orig_n_elem - 1].length += qc->pad_len;
3992 if (pad_buf) {
3993 struct scatterlist *psg = &qc->pad_sgent;
3994 void *addr = kmap_atomic(psg->page, KM_IRQ0);
3995 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
dfa15988 3996 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
3997 }
3998 } else {
2e242fa9 3999 if (qc->n_elem)
2f1f610b 4000 dma_unmap_single(ap->dev,
e1410f2d
JG
4001 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
4002 dir);
cedc9a47
JG
4003 /* restore sg */
4004 sg->length += qc->pad_len;
4005 if (pad_buf)
4006 memcpy(qc->buf_virt + sg->length - qc->pad_len,
4007 pad_buf, qc->pad_len);
4008 }
1da177e4
LT
4009
4010 qc->flags &= ~ATA_QCFLAG_DMAMAP;
cedc9a47 4011 qc->__sg = NULL;
1da177e4
LT
4012}
4013
4014/**
4015 * ata_fill_sg - Fill PCI IDE PRD table
4016 * @qc: Metadata associated with taskfile to be transferred
4017 *
780a87f7
JG
4018 * Fill PCI IDE PRD (scatter-gather) table with segments
4019 * associated with the current disk command.
4020 *
1da177e4 4021 * LOCKING:
cca3974e 4022 * spin_lock_irqsave(host lock)
1da177e4
LT
4023 *
4024 */
4025static void ata_fill_sg(struct ata_queued_cmd *qc)
4026{
1da177e4 4027 struct ata_port *ap = qc->ap;
cedc9a47
JG
4028 struct scatterlist *sg;
4029 unsigned int idx;
1da177e4 4030
a4631474 4031 WARN_ON(qc->__sg == NULL);
f131883e 4032 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1da177e4
LT
4033
4034 idx = 0;
cedc9a47 4035 ata_for_each_sg(sg, qc) {
1da177e4
LT
4036 u32 addr, offset;
4037 u32 sg_len, len;
4038
4039 /* determine if physical DMA addr spans 64K boundary.
4040 * Note h/w doesn't support 64-bit, so we unconditionally
4041 * truncate dma_addr_t to u32.
4042 */
4043 addr = (u32) sg_dma_address(sg);
4044 sg_len = sg_dma_len(sg);
4045
4046 while (sg_len) {
4047 offset = addr & 0xffff;
4048 len = sg_len;
4049 if ((offset + sg_len) > 0x10000)
4050 len = 0x10000 - offset;
4051
4052 ap->prd[idx].addr = cpu_to_le32(addr);
4053 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
4054 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4055
4056 idx++;
4057 sg_len -= len;
4058 addr += len;
4059 }
4060 }
4061
4062 if (idx)
4063 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4064}
4065/**
4066 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4067 * @qc: Metadata associated with taskfile to check
4068 *
780a87f7
JG
4069 * Allow low-level driver to filter ATA PACKET commands, returning
4070 * a status indicating whether or not it is OK to use DMA for the
4071 * supplied PACKET command.
4072 *
1da177e4 4073 * LOCKING:
cca3974e 4074 * spin_lock_irqsave(host lock)
0cba632b 4075 *
1da177e4
LT
4076 * RETURNS: 0 when ATAPI DMA can be used
4077 * nonzero otherwise
4078 */
4079int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4080{
4081 struct ata_port *ap = qc->ap;
4082 int rc = 0; /* Assume ATAPI DMA is OK by default */
4083
6f23a31d
AL
4084 /* some drives can only do ATAPI DMA on read/write */
4085 if (unlikely(qc->dev->horkage & ATA_HORKAGE_DMA_RW_ONLY)) {
4086 struct scsi_cmnd *cmd = qc->scsicmd;
4087 u8 *scsicmd = cmd->cmnd;
4088
4089 switch (scsicmd[0]) {
4090 case READ_10:
4091 case WRITE_10:
4092 case READ_12:
4093 case WRITE_12:
4094 case READ_6:
4095 case WRITE_6:
4096 /* atapi dma maybe ok */
4097 break;
4098 default:
4099 /* turn off atapi dma */
4100 return 1;
4101 }
4102 }
4103
1da177e4
LT
4104 if (ap->ops->check_atapi_dma)
4105 rc = ap->ops->check_atapi_dma(qc);
4106
4107 return rc;
4108}
4109/**
4110 * ata_qc_prep - Prepare taskfile for submission
4111 * @qc: Metadata associated with taskfile to be prepared
4112 *
780a87f7
JG
4113 * Prepare ATA taskfile for submission.
4114 *
1da177e4 4115 * LOCKING:
cca3974e 4116 * spin_lock_irqsave(host lock)
1da177e4
LT
4117 */
4118void ata_qc_prep(struct ata_queued_cmd *qc)
4119{
4120 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4121 return;
4122
4123 ata_fill_sg(qc);
4124}
4125
e46834cd
BK
4126void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4127
0cba632b
JG
4128/**
4129 * ata_sg_init_one - Associate command with memory buffer
4130 * @qc: Command to be associated
4131 * @buf: Memory buffer
4132 * @buflen: Length of memory buffer, in bytes.
4133 *
4134 * Initialize the data-related elements of queued_cmd @qc
4135 * to point to a single memory buffer, @buf of byte length @buflen.
4136 *
4137 * LOCKING:
cca3974e 4138 * spin_lock_irqsave(host lock)
0cba632b
JG
4139 */
4140
1da177e4
LT
4141void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4142{
1da177e4
LT
4143 qc->flags |= ATA_QCFLAG_SINGLE;
4144
cedc9a47 4145 qc->__sg = &qc->sgent;
1da177e4 4146 qc->n_elem = 1;
cedc9a47 4147 qc->orig_n_elem = 1;
1da177e4 4148 qc->buf_virt = buf;
233277ca 4149 qc->nbytes = buflen;
1da177e4 4150
61c0596c 4151 sg_init_one(&qc->sgent, buf, buflen);
1da177e4
LT
4152}
4153
0cba632b
JG
4154/**
4155 * ata_sg_init - Associate command with scatter-gather table.
4156 * @qc: Command to be associated
4157 * @sg: Scatter-gather table.
4158 * @n_elem: Number of elements in s/g table.
4159 *
4160 * Initialize the data-related elements of queued_cmd @qc
4161 * to point to a scatter-gather table @sg, containing @n_elem
4162 * elements.
4163 *
4164 * LOCKING:
cca3974e 4165 * spin_lock_irqsave(host lock)
0cba632b
JG
4166 */
4167
1da177e4
LT
4168void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4169 unsigned int n_elem)
4170{
4171 qc->flags |= ATA_QCFLAG_SG;
cedc9a47 4172 qc->__sg = sg;
1da177e4 4173 qc->n_elem = n_elem;
cedc9a47 4174 qc->orig_n_elem = n_elem;
1da177e4
LT
4175}
4176
4177/**
0cba632b
JG
4178 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4179 * @qc: Command with memory buffer to be mapped.
4180 *
4181 * DMA-map the memory buffer associated with queued_cmd @qc.
1da177e4
LT
4182 *
4183 * LOCKING:
cca3974e 4184 * spin_lock_irqsave(host lock)
1da177e4
LT
4185 *
4186 * RETURNS:
0cba632b 4187 * Zero on success, negative on error.
1da177e4
LT
4188 */
4189
4190static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4191{
4192 struct ata_port *ap = qc->ap;
4193 int dir = qc->dma_dir;
cedc9a47 4194 struct scatterlist *sg = qc->__sg;
1da177e4 4195 dma_addr_t dma_address;
2e242fa9 4196 int trim_sg = 0;
1da177e4 4197
cedc9a47
JG
4198 /* we must lengthen transfers to end on a 32-bit boundary */
4199 qc->pad_len = sg->length & 3;
4200 if (qc->pad_len) {
4201 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4202 struct scatterlist *psg = &qc->pad_sgent;
4203
a4631474 4204 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4205
4206 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4207
4208 if (qc->tf.flags & ATA_TFLAG_WRITE)
4209 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
4210 qc->pad_len);
4211
4212 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4213 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4214 /* trim sg */
4215 sg->length -= qc->pad_len;
2e242fa9
TH
4216 if (sg->length == 0)
4217 trim_sg = 1;
cedc9a47
JG
4218
4219 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4220 sg->length, qc->pad_len);
4221 }
4222
2e242fa9
TH
4223 if (trim_sg) {
4224 qc->n_elem--;
e1410f2d
JG
4225 goto skip_map;
4226 }
4227
2f1f610b 4228 dma_address = dma_map_single(ap->dev, qc->buf_virt,
32529e01 4229 sg->length, dir);
537a95d9
TH
4230 if (dma_mapping_error(dma_address)) {
4231 /* restore sg */
4232 sg->length += qc->pad_len;
1da177e4 4233 return -1;
537a95d9 4234 }
1da177e4
LT
4235
4236 sg_dma_address(sg) = dma_address;
32529e01 4237 sg_dma_len(sg) = sg->length;
1da177e4 4238
2e242fa9 4239skip_map:
1da177e4
LT
4240 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
4241 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4242
4243 return 0;
4244}
4245
4246/**
0cba632b
JG
4247 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4248 * @qc: Command with scatter-gather table to be mapped.
4249 *
4250 * DMA-map the scatter-gather table associated with queued_cmd @qc.
1da177e4
LT
4251 *
4252 * LOCKING:
cca3974e 4253 * spin_lock_irqsave(host lock)
1da177e4
LT
4254 *
4255 * RETURNS:
0cba632b 4256 * Zero on success, negative on error.
1da177e4
LT
4257 *
4258 */
4259
4260static int ata_sg_setup(struct ata_queued_cmd *qc)
4261{
4262 struct ata_port *ap = qc->ap;
cedc9a47
JG
4263 struct scatterlist *sg = qc->__sg;
4264 struct scatterlist *lsg = &sg[qc->n_elem - 1];
e1410f2d 4265 int n_elem, pre_n_elem, dir, trim_sg = 0;
1da177e4 4266
44877b4e 4267 VPRINTK("ENTER, ata%u\n", ap->print_id);
a4631474 4268 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
1da177e4 4269
cedc9a47
JG
4270 /* we must lengthen transfers to end on a 32-bit boundary */
4271 qc->pad_len = lsg->length & 3;
4272 if (qc->pad_len) {
4273 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4274 struct scatterlist *psg = &qc->pad_sgent;
4275 unsigned int offset;
4276
a4631474 4277 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4278
4279 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4280
4281 /*
4282 * psg->page/offset are used to copy to-be-written
4283 * data in this function or read data in ata_sg_clean.
4284 */
4285 offset = lsg->offset + lsg->length - qc->pad_len;
4286 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
4287 psg->offset = offset_in_page(offset);
4288
4289 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4290 void *addr = kmap_atomic(psg->page, KM_IRQ0);
4291 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
dfa15988 4292 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4293 }
4294
4295 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4296 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4297 /* trim last sg */
4298 lsg->length -= qc->pad_len;
e1410f2d
JG
4299 if (lsg->length == 0)
4300 trim_sg = 1;
cedc9a47
JG
4301
4302 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4303 qc->n_elem - 1, lsg->length, qc->pad_len);
4304 }
4305
e1410f2d
JG
4306 pre_n_elem = qc->n_elem;
4307 if (trim_sg && pre_n_elem)
4308 pre_n_elem--;
4309
4310 if (!pre_n_elem) {
4311 n_elem = 0;
4312 goto skip_map;
4313 }
4314
1da177e4 4315 dir = qc->dma_dir;
2f1f610b 4316 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
537a95d9
TH
4317 if (n_elem < 1) {
4318 /* restore last sg */
4319 lsg->length += qc->pad_len;
1da177e4 4320 return -1;
537a95d9 4321 }
1da177e4
LT
4322
4323 DPRINTK("%d sg elements mapped\n", n_elem);
4324
e1410f2d 4325skip_map:
1da177e4
LT
4326 qc->n_elem = n_elem;
4327
4328 return 0;
4329}
4330
0baab86b 4331/**
c893a3ae 4332 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
4333 * @buf: Buffer to swap
4334 * @buf_words: Number of 16-bit words in buffer.
4335 *
4336 * Swap halves of 16-bit words if needed to convert from
4337 * little-endian byte order to native cpu byte order, or
4338 * vice-versa.
4339 *
4340 * LOCKING:
6f0ef4fa 4341 * Inherited from caller.
0baab86b 4342 */
1da177e4
LT
4343void swap_buf_le16(u16 *buf, unsigned int buf_words)
4344{
4345#ifdef __BIG_ENDIAN
4346 unsigned int i;
4347
4348 for (i = 0; i < buf_words; i++)
4349 buf[i] = le16_to_cpu(buf[i]);
4350#endif /* __BIG_ENDIAN */
4351}
4352
6ae4cfb5 4353/**
0d5ff566 4354 * ata_data_xfer - Transfer data by PIO
a6b2c5d4 4355 * @adev: device to target
6ae4cfb5
AL
4356 * @buf: data buffer
4357 * @buflen: buffer length
344babaa 4358 * @write_data: read/write
6ae4cfb5
AL
4359 *
4360 * Transfer data from/to the device data register by PIO.
4361 *
4362 * LOCKING:
4363 * Inherited from caller.
6ae4cfb5 4364 */
0d5ff566
TH
4365void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
4366 unsigned int buflen, int write_data)
1da177e4 4367{
a6b2c5d4 4368 struct ata_port *ap = adev->ap;
6ae4cfb5 4369 unsigned int words = buflen >> 1;
1da177e4 4370
6ae4cfb5 4371 /* Transfer multiple of 2 bytes */
1da177e4 4372 if (write_data)
0d5ff566 4373 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
1da177e4 4374 else
0d5ff566 4375 ioread16_rep(ap->ioaddr.data_addr, buf, words);
6ae4cfb5
AL
4376
4377 /* Transfer trailing 1 byte, if any. */
4378 if (unlikely(buflen & 0x01)) {
4379 u16 align_buf[1] = { 0 };
4380 unsigned char *trailing_buf = buf + buflen - 1;
4381
4382 if (write_data) {
4383 memcpy(align_buf, trailing_buf, 1);
0d5ff566 4384 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
6ae4cfb5 4385 } else {
0d5ff566 4386 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
6ae4cfb5
AL
4387 memcpy(trailing_buf, align_buf, 1);
4388 }
4389 }
1da177e4
LT
4390}
4391
75e99585 4392/**
0d5ff566 4393 * ata_data_xfer_noirq - Transfer data by PIO
75e99585
AC
4394 * @adev: device to target
4395 * @buf: data buffer
4396 * @buflen: buffer length
4397 * @write_data: read/write
4398 *
88574551 4399 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
4400 * transfer with interrupts disabled.
4401 *
4402 * LOCKING:
4403 * Inherited from caller.
4404 */
0d5ff566
TH
4405void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
4406 unsigned int buflen, int write_data)
75e99585
AC
4407{
4408 unsigned long flags;
4409 local_irq_save(flags);
0d5ff566 4410 ata_data_xfer(adev, buf, buflen, write_data);
75e99585
AC
4411 local_irq_restore(flags);
4412}
4413
4414
6ae4cfb5 4415/**
5a5dbd18 4416 * ata_pio_sector - Transfer a sector of data.
6ae4cfb5
AL
4417 * @qc: Command on going
4418 *
5a5dbd18 4419 * Transfer qc->sect_size bytes of data from/to the ATA device.
6ae4cfb5
AL
4420 *
4421 * LOCKING:
4422 * Inherited from caller.
4423 */
4424
1da177e4
LT
4425static void ata_pio_sector(struct ata_queued_cmd *qc)
4426{
4427 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4428 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4429 struct ata_port *ap = qc->ap;
4430 struct page *page;
4431 unsigned int offset;
4432 unsigned char *buf;
4433
5a5dbd18 4434 if (qc->curbytes == qc->nbytes - qc->sect_size)
14be71f4 4435 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4436
4437 page = sg[qc->cursg].page;
726f0785 4438 offset = sg[qc->cursg].offset + qc->cursg_ofs;
1da177e4
LT
4439
4440 /* get the current page and offset */
4441 page = nth_page(page, (offset >> PAGE_SHIFT));
4442 offset %= PAGE_SIZE;
4443
1da177e4
LT
4444 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4445
91b8b313
AL
4446 if (PageHighMem(page)) {
4447 unsigned long flags;
4448
a6b2c5d4 4449 /* FIXME: use a bounce buffer */
91b8b313
AL
4450 local_irq_save(flags);
4451 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4452
91b8b313 4453 /* do the actual data transfer */
5a5dbd18 4454 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
1da177e4 4455
91b8b313
AL
4456 kunmap_atomic(buf, KM_IRQ0);
4457 local_irq_restore(flags);
4458 } else {
4459 buf = page_address(page);
5a5dbd18 4460 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
91b8b313 4461 }
1da177e4 4462
5a5dbd18
ML
4463 qc->curbytes += qc->sect_size;
4464 qc->cursg_ofs += qc->sect_size;
1da177e4 4465
726f0785 4466 if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
1da177e4
LT
4467 qc->cursg++;
4468 qc->cursg_ofs = 0;
4469 }
1da177e4 4470}
1da177e4 4471
07f6f7d0 4472/**
5a5dbd18 4473 * ata_pio_sectors - Transfer one or many sectors.
07f6f7d0
AL
4474 * @qc: Command on going
4475 *
5a5dbd18 4476 * Transfer one or many sectors of data from/to the
07f6f7d0
AL
4477 * ATA device for the DRQ request.
4478 *
4479 * LOCKING:
4480 * Inherited from caller.
4481 */
1da177e4 4482
07f6f7d0
AL
4483static void ata_pio_sectors(struct ata_queued_cmd *qc)
4484{
4485 if (is_multi_taskfile(&qc->tf)) {
4486 /* READ/WRITE MULTIPLE */
4487 unsigned int nsect;
4488
587005de 4489 WARN_ON(qc->dev->multi_count == 0);
1da177e4 4490
5a5dbd18 4491 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
726f0785 4492 qc->dev->multi_count);
07f6f7d0
AL
4493 while (nsect--)
4494 ata_pio_sector(qc);
4495 } else
4496 ata_pio_sector(qc);
4497}
4498
c71c1857
AL
4499/**
4500 * atapi_send_cdb - Write CDB bytes to hardware
4501 * @ap: Port to which ATAPI device is attached.
4502 * @qc: Taskfile currently active
4503 *
4504 * When device has indicated its readiness to accept
4505 * a CDB, this function is called. Send the CDB.
4506 *
4507 * LOCKING:
4508 * caller.
4509 */
4510
4511static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4512{
4513 /* send SCSI cdb */
4514 DPRINTK("send cdb\n");
db024d53 4515 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 4516
a6b2c5d4 4517 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
4518 ata_altstatus(ap); /* flush */
4519
4520 switch (qc->tf.protocol) {
4521 case ATA_PROT_ATAPI:
4522 ap->hsm_task_state = HSM_ST;
4523 break;
4524 case ATA_PROT_ATAPI_NODATA:
4525 ap->hsm_task_state = HSM_ST_LAST;
4526 break;
4527 case ATA_PROT_ATAPI_DMA:
4528 ap->hsm_task_state = HSM_ST_LAST;
4529 /* initiate bmdma */
4530 ap->ops->bmdma_start(qc);
4531 break;
4532 }
1da177e4
LT
4533}
4534
6ae4cfb5
AL
4535/**
4536 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
4537 * @qc: Command on going
4538 * @bytes: number of bytes
4539 *
4540 * Transfer Transfer data from/to the ATAPI device.
4541 *
4542 * LOCKING:
4543 * Inherited from caller.
4544 *
4545 */
4546
1da177e4
LT
4547static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4548{
4549 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4550 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4551 struct ata_port *ap = qc->ap;
4552 struct page *page;
4553 unsigned char *buf;
4554 unsigned int offset, count;
4555
563a6e1f 4556 if (qc->curbytes + bytes >= qc->nbytes)
14be71f4 4557 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4558
4559next_sg:
563a6e1f 4560 if (unlikely(qc->cursg >= qc->n_elem)) {
7fb6ec28 4561 /*
563a6e1f
AL
4562 * The end of qc->sg is reached and the device expects
4563 * more data to transfer. In order not to overrun qc->sg
4564 * and fulfill length specified in the byte count register,
4565 * - for read case, discard trailing data from the device
4566 * - for write case, padding zero data to the device
4567 */
4568 u16 pad_buf[1] = { 0 };
4569 unsigned int words = bytes >> 1;
4570 unsigned int i;
4571
4572 if (words) /* warning if bytes > 1 */
f15a1daf
TH
4573 ata_dev_printk(qc->dev, KERN_WARNING,
4574 "%u bytes trailing data\n", bytes);
563a6e1f
AL
4575
4576 for (i = 0; i < words; i++)
a6b2c5d4 4577 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
563a6e1f 4578
14be71f4 4579 ap->hsm_task_state = HSM_ST_LAST;
563a6e1f
AL
4580 return;
4581 }
4582
cedc9a47 4583 sg = &qc->__sg[qc->cursg];
1da177e4 4584
1da177e4
LT
4585 page = sg->page;
4586 offset = sg->offset + qc->cursg_ofs;
4587
4588 /* get the current page and offset */
4589 page = nth_page(page, (offset >> PAGE_SHIFT));
4590 offset %= PAGE_SIZE;
4591
6952df03 4592 /* don't overrun current sg */
32529e01 4593 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
4594
4595 /* don't cross page boundaries */
4596 count = min(count, (unsigned int)PAGE_SIZE - offset);
4597
7282aa4b
AL
4598 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4599
91b8b313
AL
4600 if (PageHighMem(page)) {
4601 unsigned long flags;
4602
a6b2c5d4 4603 /* FIXME: use bounce buffer */
91b8b313
AL
4604 local_irq_save(flags);
4605 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4606
91b8b313 4607 /* do the actual data transfer */
a6b2c5d4 4608 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
7282aa4b 4609
91b8b313
AL
4610 kunmap_atomic(buf, KM_IRQ0);
4611 local_irq_restore(flags);
4612 } else {
4613 buf = page_address(page);
a6b2c5d4 4614 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
91b8b313 4615 }
1da177e4
LT
4616
4617 bytes -= count;
4618 qc->curbytes += count;
4619 qc->cursg_ofs += count;
4620
32529e01 4621 if (qc->cursg_ofs == sg->length) {
1da177e4
LT
4622 qc->cursg++;
4623 qc->cursg_ofs = 0;
4624 }
4625
563a6e1f 4626 if (bytes)
1da177e4 4627 goto next_sg;
1da177e4
LT
4628}
4629
6ae4cfb5
AL
4630/**
4631 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
4632 * @qc: Command on going
4633 *
4634 * Transfer Transfer data from/to the ATAPI device.
4635 *
4636 * LOCKING:
4637 * Inherited from caller.
6ae4cfb5
AL
4638 */
4639
1da177e4
LT
4640static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4641{
4642 struct ata_port *ap = qc->ap;
4643 struct ata_device *dev = qc->dev;
4644 unsigned int ireason, bc_lo, bc_hi, bytes;
4645 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4646
eec4c3f3
AL
4647 /* Abuse qc->result_tf for temp storage of intermediate TF
4648 * here to save some kernel stack usage.
4649 * For normal completion, qc->result_tf is not relevant. For
4650 * error, qc->result_tf is later overwritten by ata_qc_complete().
4651 * So, the correctness of qc->result_tf is not affected.
4652 */
4653 ap->ops->tf_read(ap, &qc->result_tf);
4654 ireason = qc->result_tf.nsect;
4655 bc_lo = qc->result_tf.lbam;
4656 bc_hi = qc->result_tf.lbah;
1da177e4
LT
4657 bytes = (bc_hi << 8) | bc_lo;
4658
4659 /* shall be cleared to zero, indicating xfer of data */
4660 if (ireason & (1 << 0))
4661 goto err_out;
4662
4663 /* make sure transfer direction matches expected */
4664 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4665 if (do_write != i_write)
4666 goto err_out;
4667
44877b4e 4668 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
312f7da2 4669
1da177e4
LT
4670 __atapi_pio_bytes(qc, bytes);
4671
4672 return;
4673
4674err_out:
f15a1daf 4675 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
11a56d24 4676 qc->err_mask |= AC_ERR_HSM;
14be71f4 4677 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
4678}
4679
4680/**
c234fb00
AL
4681 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4682 * @ap: the target ata_port
4683 * @qc: qc on going
1da177e4 4684 *
c234fb00
AL
4685 * RETURNS:
4686 * 1 if ok in workqueue, 0 otherwise.
1da177e4 4687 */
c234fb00
AL
4688
4689static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 4690{
c234fb00
AL
4691 if (qc->tf.flags & ATA_TFLAG_POLLING)
4692 return 1;
1da177e4 4693
c234fb00
AL
4694 if (ap->hsm_task_state == HSM_ST_FIRST) {
4695 if (qc->tf.protocol == ATA_PROT_PIO &&
4696 (qc->tf.flags & ATA_TFLAG_WRITE))
4697 return 1;
1da177e4 4698
c234fb00
AL
4699 if (is_atapi_taskfile(&qc->tf) &&
4700 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
4701 return 1;
fe79e683
AL
4702 }
4703
c234fb00
AL
4704 return 0;
4705}
1da177e4 4706
c17ea20d
TH
4707/**
4708 * ata_hsm_qc_complete - finish a qc running on standard HSM
4709 * @qc: Command to complete
4710 * @in_wq: 1 if called from workqueue, 0 otherwise
4711 *
4712 * Finish @qc which is running on standard HSM.
4713 *
4714 * LOCKING:
cca3974e 4715 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
4716 * Otherwise, none on entry and grabs host lock.
4717 */
4718static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
4719{
4720 struct ata_port *ap = qc->ap;
4721 unsigned long flags;
4722
4723 if (ap->ops->error_handler) {
4724 if (in_wq) {
ba6a1308 4725 spin_lock_irqsave(ap->lock, flags);
c17ea20d 4726
cca3974e
JG
4727 /* EH might have kicked in while host lock is
4728 * released.
c17ea20d
TH
4729 */
4730 qc = ata_qc_from_tag(ap, qc->tag);
4731 if (qc) {
4732 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
83625006 4733 ap->ops->irq_on(ap);
c17ea20d
TH
4734 ata_qc_complete(qc);
4735 } else
4736 ata_port_freeze(ap);
4737 }
4738
ba6a1308 4739 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4740 } else {
4741 if (likely(!(qc->err_mask & AC_ERR_HSM)))
4742 ata_qc_complete(qc);
4743 else
4744 ata_port_freeze(ap);
4745 }
4746 } else {
4747 if (in_wq) {
ba6a1308 4748 spin_lock_irqsave(ap->lock, flags);
83625006 4749 ap->ops->irq_on(ap);
c17ea20d 4750 ata_qc_complete(qc);
ba6a1308 4751 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
4752 } else
4753 ata_qc_complete(qc);
4754 }
1da177e4 4755
c81e29b4 4756 ata_altstatus(ap); /* flush */
c17ea20d
TH
4757}
4758
bb5cb290
AL
4759/**
4760 * ata_hsm_move - move the HSM to the next state.
4761 * @ap: the target ata_port
4762 * @qc: qc on going
4763 * @status: current device status
4764 * @in_wq: 1 if called from workqueue, 0 otherwise
4765 *
4766 * RETURNS:
4767 * 1 when poll next status needed, 0 otherwise.
4768 */
9a1004d0
TH
4769int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
4770 u8 status, int in_wq)
e2cec771 4771{
bb5cb290
AL
4772 unsigned long flags = 0;
4773 int poll_next;
4774
6912ccd5
AL
4775 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
4776
bb5cb290
AL
4777 /* Make sure ata_qc_issue_prot() does not throw things
4778 * like DMA polling into the workqueue. Notice that
4779 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
4780 */
c234fb00 4781 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 4782
e2cec771 4783fsm_start:
999bb6f4 4784 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
44877b4e 4785 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
999bb6f4 4786
e2cec771
AL
4787 switch (ap->hsm_task_state) {
4788 case HSM_ST_FIRST:
bb5cb290
AL
4789 /* Send first data block or PACKET CDB */
4790
4791 /* If polling, we will stay in the work queue after
4792 * sending the data. Otherwise, interrupt handler
4793 * takes over after sending the data.
4794 */
4795 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
4796
e2cec771 4797 /* check device status */
3655d1d3
AL
4798 if (unlikely((status & ATA_DRQ) == 0)) {
4799 /* handle BSY=0, DRQ=0 as error */
4800 if (likely(status & (ATA_ERR | ATA_DF)))
4801 /* device stops HSM for abort/error */
4802 qc->err_mask |= AC_ERR_DEV;
4803 else
4804 /* HSM violation. Let EH handle this */
4805 qc->err_mask |= AC_ERR_HSM;
4806
14be71f4 4807 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 4808 goto fsm_start;
1da177e4
LT
4809 }
4810
71601958
AL
4811 /* Device should not ask for data transfer (DRQ=1)
4812 * when it finds something wrong.
eee6c32f
AL
4813 * We ignore DRQ here and stop the HSM by
4814 * changing hsm_task_state to HSM_ST_ERR and
4815 * let the EH abort the command or reset the device.
71601958
AL
4816 */
4817 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
4818 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
4819 "error, dev_stat 0x%X\n", status);
3655d1d3 4820 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4821 ap->hsm_task_state = HSM_ST_ERR;
4822 goto fsm_start;
71601958 4823 }
1da177e4 4824
bb5cb290
AL
4825 /* Send the CDB (atapi) or the first data block (ata pio out).
4826 * During the state transition, interrupt handler shouldn't
4827 * be invoked before the data transfer is complete and
4828 * hsm_task_state is changed. Hence, the following locking.
4829 */
4830 if (in_wq)
ba6a1308 4831 spin_lock_irqsave(ap->lock, flags);
1da177e4 4832
bb5cb290
AL
4833 if (qc->tf.protocol == ATA_PROT_PIO) {
4834 /* PIO data out protocol.
4835 * send first data block.
4836 */
0565c26d 4837
bb5cb290
AL
4838 /* ata_pio_sectors() might change the state
4839 * to HSM_ST_LAST. so, the state is changed here
4840 * before ata_pio_sectors().
4841 */
4842 ap->hsm_task_state = HSM_ST;
4843 ata_pio_sectors(qc);
4844 ata_altstatus(ap); /* flush */
4845 } else
4846 /* send CDB */
4847 atapi_send_cdb(ap, qc);
4848
4849 if (in_wq)
ba6a1308 4850 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
4851
4852 /* if polling, ata_pio_task() handles the rest.
4853 * otherwise, interrupt handler takes over from here.
4854 */
e2cec771 4855 break;
1c848984 4856
e2cec771
AL
4857 case HSM_ST:
4858 /* complete command or read/write the data register */
4859 if (qc->tf.protocol == ATA_PROT_ATAPI) {
4860 /* ATAPI PIO protocol */
4861 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
4862 /* No more data to transfer or device error.
4863 * Device error will be tagged in HSM_ST_LAST.
4864 */
e2cec771
AL
4865 ap->hsm_task_state = HSM_ST_LAST;
4866 goto fsm_start;
4867 }
1da177e4 4868
71601958
AL
4869 /* Device should not ask for data transfer (DRQ=1)
4870 * when it finds something wrong.
eee6c32f
AL
4871 * We ignore DRQ here and stop the HSM by
4872 * changing hsm_task_state to HSM_ST_ERR and
4873 * let the EH abort the command or reset the device.
71601958
AL
4874 */
4875 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
4876 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
4877 "device error, dev_stat 0x%X\n",
4878 status);
3655d1d3 4879 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
4880 ap->hsm_task_state = HSM_ST_ERR;
4881 goto fsm_start;
71601958 4882 }
1da177e4 4883
e2cec771 4884 atapi_pio_bytes(qc);
7fb6ec28 4885
e2cec771
AL
4886 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
4887 /* bad ireason reported by device */
4888 goto fsm_start;
1da177e4 4889
e2cec771
AL
4890 } else {
4891 /* ATA PIO protocol */
4892 if (unlikely((status & ATA_DRQ) == 0)) {
4893 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
4894 if (likely(status & (ATA_ERR | ATA_DF)))
4895 /* device stops HSM for abort/error */
4896 qc->err_mask |= AC_ERR_DEV;
4897 else
55a8e2c8
TH
4898 /* HSM violation. Let EH handle this.
4899 * Phantom devices also trigger this
4900 * condition. Mark hint.
4901 */
4902 qc->err_mask |= AC_ERR_HSM |
4903 AC_ERR_NODEV_HINT;
3655d1d3 4904
e2cec771
AL
4905 ap->hsm_task_state = HSM_ST_ERR;
4906 goto fsm_start;
4907 }
1da177e4 4908
eee6c32f
AL
4909 /* For PIO reads, some devices may ask for
4910 * data transfer (DRQ=1) alone with ERR=1.
4911 * We respect DRQ here and transfer one
4912 * block of junk data before changing the
4913 * hsm_task_state to HSM_ST_ERR.
4914 *
4915 * For PIO writes, ERR=1 DRQ=1 doesn't make
4916 * sense since the data block has been
4917 * transferred to the device.
71601958
AL
4918 */
4919 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
4920 /* data might be corrputed */
4921 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
4922
4923 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
4924 ata_pio_sectors(qc);
4925 ata_altstatus(ap);
4926 status = ata_wait_idle(ap);
4927 }
4928
3655d1d3
AL
4929 if (status & (ATA_BUSY | ATA_DRQ))
4930 qc->err_mask |= AC_ERR_HSM;
4931
eee6c32f
AL
4932 /* ata_pio_sectors() might change the
4933 * state to HSM_ST_LAST. so, the state
4934 * is changed after ata_pio_sectors().
4935 */
4936 ap->hsm_task_state = HSM_ST_ERR;
4937 goto fsm_start;
71601958
AL
4938 }
4939
e2cec771
AL
4940 ata_pio_sectors(qc);
4941
4942 if (ap->hsm_task_state == HSM_ST_LAST &&
4943 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
4944 /* all data read */
4945 ata_altstatus(ap);
52a32205 4946 status = ata_wait_idle(ap);
e2cec771
AL
4947 goto fsm_start;
4948 }
4949 }
4950
4951 ata_altstatus(ap); /* flush */
bb5cb290 4952 poll_next = 1;
1da177e4
LT
4953 break;
4954
14be71f4 4955 case HSM_ST_LAST:
6912ccd5
AL
4956 if (unlikely(!ata_ok(status))) {
4957 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
4958 ap->hsm_task_state = HSM_ST_ERR;
4959 goto fsm_start;
4960 }
4961
4962 /* no more data to transfer */
4332a771 4963 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
44877b4e 4964 ap->print_id, qc->dev->devno, status);
e2cec771 4965
6912ccd5
AL
4966 WARN_ON(qc->err_mask);
4967
e2cec771 4968 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 4969
e2cec771 4970 /* complete taskfile transaction */
c17ea20d 4971 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
4972
4973 poll_next = 0;
1da177e4
LT
4974 break;
4975
14be71f4 4976 case HSM_ST_ERR:
e2cec771
AL
4977 /* make sure qc->err_mask is available to
4978 * know what's wrong and recover
4979 */
4980 WARN_ON(qc->err_mask == 0);
4981
4982 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 4983
999bb6f4 4984 /* complete taskfile transaction */
c17ea20d 4985 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
4986
4987 poll_next = 0;
e2cec771
AL
4988 break;
4989 default:
bb5cb290 4990 poll_next = 0;
6912ccd5 4991 BUG();
1da177e4
LT
4992 }
4993
bb5cb290 4994 return poll_next;
1da177e4
LT
4995}
4996
65f27f38 4997static void ata_pio_task(struct work_struct *work)
8061f5f0 4998{
65f27f38
DH
4999 struct ata_port *ap =
5000 container_of(work, struct ata_port, port_task.work);
5001 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 5002 u8 status;
a1af3734 5003 int poll_next;
8061f5f0 5004
7fb6ec28 5005fsm_start:
a1af3734 5006 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 5007
a1af3734
AL
5008 /*
5009 * This is purely heuristic. This is a fast path.
5010 * Sometimes when we enter, BSY will be cleared in
5011 * a chk-status or two. If not, the drive is probably seeking
5012 * or something. Snooze for a couple msecs, then
5013 * chk-status again. If still busy, queue delayed work.
5014 */
5015 status = ata_busy_wait(ap, ATA_BUSY, 5);
5016 if (status & ATA_BUSY) {
5017 msleep(2);
5018 status = ata_busy_wait(ap, ATA_BUSY, 10);
5019 if (status & ATA_BUSY) {
31ce6dae 5020 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
a1af3734
AL
5021 return;
5022 }
8061f5f0
TH
5023 }
5024
a1af3734
AL
5025 /* move the HSM */
5026 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 5027
a1af3734
AL
5028 /* another command or interrupt handler
5029 * may be running at this point.
5030 */
5031 if (poll_next)
7fb6ec28 5032 goto fsm_start;
8061f5f0
TH
5033}
5034
1da177e4
LT
5035/**
5036 * ata_qc_new - Request an available ATA command, for queueing
5037 * @ap: Port associated with device @dev
5038 * @dev: Device from whom we request an available command structure
5039 *
5040 * LOCKING:
0cba632b 5041 * None.
1da177e4
LT
5042 */
5043
5044static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5045{
5046 struct ata_queued_cmd *qc = NULL;
5047 unsigned int i;
5048
e3180499 5049 /* no command while frozen */
b51e9e5d 5050 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
5051 return NULL;
5052
2ab7db1f
TH
5053 /* the last tag is reserved for internal command. */
5054 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 5055 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 5056 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
5057 break;
5058 }
5059
5060 if (qc)
5061 qc->tag = i;
5062
5063 return qc;
5064}
5065
5066/**
5067 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
5068 * @dev: Device from whom we request an available command structure
5069 *
5070 * LOCKING:
0cba632b 5071 * None.
1da177e4
LT
5072 */
5073
3373efd8 5074struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 5075{
3373efd8 5076 struct ata_port *ap = dev->ap;
1da177e4
LT
5077 struct ata_queued_cmd *qc;
5078
5079 qc = ata_qc_new(ap);
5080 if (qc) {
1da177e4
LT
5081 qc->scsicmd = NULL;
5082 qc->ap = ap;
5083 qc->dev = dev;
1da177e4 5084
2c13b7ce 5085 ata_qc_reinit(qc);
1da177e4
LT
5086 }
5087
5088 return qc;
5089}
5090
1da177e4
LT
5091/**
5092 * ata_qc_free - free unused ata_queued_cmd
5093 * @qc: Command to complete
5094 *
5095 * Designed to free unused ata_queued_cmd object
5096 * in case something prevents using it.
5097 *
5098 * LOCKING:
cca3974e 5099 * spin_lock_irqsave(host lock)
1da177e4
LT
5100 */
5101void ata_qc_free(struct ata_queued_cmd *qc)
5102{
4ba946e9
TH
5103 struct ata_port *ap = qc->ap;
5104 unsigned int tag;
5105
a4631474 5106 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 5107
4ba946e9
TH
5108 qc->flags = 0;
5109 tag = qc->tag;
5110 if (likely(ata_tag_valid(tag))) {
4ba946e9 5111 qc->tag = ATA_TAG_POISON;
6cec4a39 5112 clear_bit(tag, &ap->qc_allocated);
4ba946e9 5113 }
1da177e4
LT
5114}
5115
76014427 5116void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 5117{
dedaf2b0
TH
5118 struct ata_port *ap = qc->ap;
5119
a4631474
TH
5120 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5121 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
5122
5123 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5124 ata_sg_clean(qc);
5125
7401abf2 5126 /* command should be marked inactive atomically with qc completion */
dedaf2b0
TH
5127 if (qc->tf.protocol == ATA_PROT_NCQ)
5128 ap->sactive &= ~(1 << qc->tag);
5129 else
5130 ap->active_tag = ATA_TAG_POISON;
7401abf2 5131
3f3791d3
AL
5132 /* atapi: mark qc as inactive to prevent the interrupt handler
5133 * from completing the command twice later, before the error handler
5134 * is called. (when rc != 0 and atapi request sense is needed)
5135 */
5136 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 5137 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 5138
1da177e4 5139 /* call completion callback */
77853bf2 5140 qc->complete_fn(qc);
1da177e4
LT
5141}
5142
39599a53
TH
5143static void fill_result_tf(struct ata_queued_cmd *qc)
5144{
5145 struct ata_port *ap = qc->ap;
5146
39599a53 5147 qc->result_tf.flags = qc->tf.flags;
4742d54f 5148 ap->ops->tf_read(ap, &qc->result_tf);
39599a53
TH
5149}
5150
f686bcb8
TH
5151/**
5152 * ata_qc_complete - Complete an active ATA command
5153 * @qc: Command to complete
5154 * @err_mask: ATA Status register contents
5155 *
5156 * Indicate to the mid and upper layers that an ATA
5157 * command has completed, with either an ok or not-ok status.
5158 *
5159 * LOCKING:
cca3974e 5160 * spin_lock_irqsave(host lock)
f686bcb8
TH
5161 */
5162void ata_qc_complete(struct ata_queued_cmd *qc)
5163{
5164 struct ata_port *ap = qc->ap;
5165
5166 /* XXX: New EH and old EH use different mechanisms to
5167 * synchronize EH with regular execution path.
5168 *
5169 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5170 * Normal execution path is responsible for not accessing a
5171 * failed qc. libata core enforces the rule by returning NULL
5172 * from ata_qc_from_tag() for failed qcs.
5173 *
5174 * Old EH depends on ata_qc_complete() nullifying completion
5175 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5176 * not synchronize with interrupt handler. Only PIO task is
5177 * taken care of.
5178 */
5179 if (ap->ops->error_handler) {
b51e9e5d 5180 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
5181
5182 if (unlikely(qc->err_mask))
5183 qc->flags |= ATA_QCFLAG_FAILED;
5184
5185 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5186 if (!ata_tag_internal(qc->tag)) {
5187 /* always fill result TF for failed qc */
39599a53 5188 fill_result_tf(qc);
f686bcb8
TH
5189 ata_qc_schedule_eh(qc);
5190 return;
5191 }
5192 }
5193
5194 /* read result TF if requested */
5195 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5196 fill_result_tf(qc);
f686bcb8
TH
5197
5198 __ata_qc_complete(qc);
5199 } else {
5200 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5201 return;
5202
5203 /* read result TF if failed or requested */
5204 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5205 fill_result_tf(qc);
f686bcb8
TH
5206
5207 __ata_qc_complete(qc);
5208 }
5209}
5210
dedaf2b0
TH
5211/**
5212 * ata_qc_complete_multiple - Complete multiple qcs successfully
5213 * @ap: port in question
5214 * @qc_active: new qc_active mask
5215 * @finish_qc: LLDD callback invoked before completing a qc
5216 *
5217 * Complete in-flight commands. This functions is meant to be
5218 * called from low-level driver's interrupt routine to complete
5219 * requests normally. ap->qc_active and @qc_active is compared
5220 * and commands are completed accordingly.
5221 *
5222 * LOCKING:
cca3974e 5223 * spin_lock_irqsave(host lock)
dedaf2b0
TH
5224 *
5225 * RETURNS:
5226 * Number of completed commands on success, -errno otherwise.
5227 */
5228int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5229 void (*finish_qc)(struct ata_queued_cmd *))
5230{
5231 int nr_done = 0;
5232 u32 done_mask;
5233 int i;
5234
5235 done_mask = ap->qc_active ^ qc_active;
5236
5237 if (unlikely(done_mask & qc_active)) {
5238 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5239 "(%08x->%08x)\n", ap->qc_active, qc_active);
5240 return -EINVAL;
5241 }
5242
5243 for (i = 0; i < ATA_MAX_QUEUE; i++) {
5244 struct ata_queued_cmd *qc;
5245
5246 if (!(done_mask & (1 << i)))
5247 continue;
5248
5249 if ((qc = ata_qc_from_tag(ap, i))) {
5250 if (finish_qc)
5251 finish_qc(qc);
5252 ata_qc_complete(qc);
5253 nr_done++;
5254 }
5255 }
5256
5257 return nr_done;
5258}
5259
1da177e4
LT
5260static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
5261{
5262 struct ata_port *ap = qc->ap;
5263
5264 switch (qc->tf.protocol) {
3dc1d881 5265 case ATA_PROT_NCQ:
1da177e4
LT
5266 case ATA_PROT_DMA:
5267 case ATA_PROT_ATAPI_DMA:
5268 return 1;
5269
5270 case ATA_PROT_ATAPI:
5271 case ATA_PROT_PIO:
1da177e4
LT
5272 if (ap->flags & ATA_FLAG_PIO_DMA)
5273 return 1;
5274
5275 /* fall through */
5276
5277 default:
5278 return 0;
5279 }
5280
5281 /* never reached */
5282}
5283
5284/**
5285 * ata_qc_issue - issue taskfile to device
5286 * @qc: command to issue to device
5287 *
5288 * Prepare an ATA command to submission to device.
5289 * This includes mapping the data into a DMA-able
5290 * area, filling in the S/G table, and finally
5291 * writing the taskfile to hardware, starting the command.
5292 *
5293 * LOCKING:
cca3974e 5294 * spin_lock_irqsave(host lock)
1da177e4 5295 */
8e0e694a 5296void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
5297{
5298 struct ata_port *ap = qc->ap;
5299
dedaf2b0
TH
5300 /* Make sure only one non-NCQ command is outstanding. The
5301 * check is skipped for old EH because it reuses active qc to
5302 * request ATAPI sense.
5303 */
5304 WARN_ON(ap->ops->error_handler && ata_tag_valid(ap->active_tag));
5305
5306 if (qc->tf.protocol == ATA_PROT_NCQ) {
5307 WARN_ON(ap->sactive & (1 << qc->tag));
5308 ap->sactive |= 1 << qc->tag;
5309 } else {
5310 WARN_ON(ap->sactive);
5311 ap->active_tag = qc->tag;
5312 }
5313
e4a70e76 5314 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 5315 ap->qc_active |= 1 << qc->tag;
e4a70e76 5316
1da177e4
LT
5317 if (ata_should_dma_map(qc)) {
5318 if (qc->flags & ATA_QCFLAG_SG) {
5319 if (ata_sg_setup(qc))
8e436af9 5320 goto sg_err;
1da177e4
LT
5321 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
5322 if (ata_sg_setup_one(qc))
8e436af9 5323 goto sg_err;
1da177e4
LT
5324 }
5325 } else {
5326 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5327 }
5328
5329 ap->ops->qc_prep(qc);
5330
8e0e694a
TH
5331 qc->err_mask |= ap->ops->qc_issue(qc);
5332 if (unlikely(qc->err_mask))
5333 goto err;
5334 return;
1da177e4 5335
8e436af9
TH
5336sg_err:
5337 qc->flags &= ~ATA_QCFLAG_DMAMAP;
8e0e694a
TH
5338 qc->err_mask |= AC_ERR_SYSTEM;
5339err:
5340 ata_qc_complete(qc);
1da177e4
LT
5341}
5342
5343/**
5344 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
5345 * @qc: command to issue to device
5346 *
5347 * Using various libata functions and hooks, this function
5348 * starts an ATA command. ATA commands are grouped into
5349 * classes called "protocols", and issuing each type of protocol
5350 * is slightly different.
5351 *
0baab86b
EF
5352 * May be used as the qc_issue() entry in ata_port_operations.
5353 *
1da177e4 5354 * LOCKING:
cca3974e 5355 * spin_lock_irqsave(host lock)
1da177e4
LT
5356 *
5357 * RETURNS:
9a3d9eb0 5358 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
5359 */
5360
9a3d9eb0 5361unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
5362{
5363 struct ata_port *ap = qc->ap;
5364
e50362ec
AL
5365 /* Use polling pio if the LLD doesn't handle
5366 * interrupt driven pio and atapi CDB interrupt.
5367 */
5368 if (ap->flags & ATA_FLAG_PIO_POLLING) {
5369 switch (qc->tf.protocol) {
5370 case ATA_PROT_PIO:
e3472cbe 5371 case ATA_PROT_NODATA:
e50362ec
AL
5372 case ATA_PROT_ATAPI:
5373 case ATA_PROT_ATAPI_NODATA:
5374 qc->tf.flags |= ATA_TFLAG_POLLING;
5375 break;
5376 case ATA_PROT_ATAPI_DMA:
5377 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 5378 /* see ata_dma_blacklisted() */
e50362ec
AL
5379 BUG();
5380 break;
5381 default:
5382 break;
5383 }
5384 }
5385
3d3cca37
TH
5386 /* Some controllers show flaky interrupt behavior after
5387 * setting xfer mode. Use polling instead.
5388 */
5389 if (unlikely(qc->tf.command == ATA_CMD_SET_FEATURES &&
5390 qc->tf.feature == SETFEATURES_XFER) &&
5391 (ap->flags & ATA_FLAG_SETXFER_POLLING))
5392 qc->tf.flags |= ATA_TFLAG_POLLING;
5393
312f7da2 5394 /* select the device */
1da177e4
LT
5395 ata_dev_select(ap, qc->dev->devno, 1, 0);
5396
312f7da2 5397 /* start the command */
1da177e4
LT
5398 switch (qc->tf.protocol) {
5399 case ATA_PROT_NODATA:
312f7da2
AL
5400 if (qc->tf.flags & ATA_TFLAG_POLLING)
5401 ata_qc_set_polling(qc);
5402
e5338254 5403 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
5404 ap->hsm_task_state = HSM_ST_LAST;
5405
5406 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5407 ata_port_queue_task(ap, ata_pio_task, qc, 0);
312f7da2 5408
1da177e4
LT
5409 break;
5410
5411 case ATA_PROT_DMA:
587005de 5412 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5413
1da177e4
LT
5414 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5415 ap->ops->bmdma_setup(qc); /* set up bmdma */
5416 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 5417 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
5418 break;
5419
312f7da2
AL
5420 case ATA_PROT_PIO:
5421 if (qc->tf.flags & ATA_TFLAG_POLLING)
5422 ata_qc_set_polling(qc);
1da177e4 5423
e5338254 5424 ata_tf_to_host(ap, &qc->tf);
312f7da2 5425
54f00389
AL
5426 if (qc->tf.flags & ATA_TFLAG_WRITE) {
5427 /* PIO data out protocol */
5428 ap->hsm_task_state = HSM_ST_FIRST;
31ce6dae 5429 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5430
5431 /* always send first data block using
e27486db 5432 * the ata_pio_task() codepath.
54f00389 5433 */
312f7da2 5434 } else {
54f00389
AL
5435 /* PIO data in protocol */
5436 ap->hsm_task_state = HSM_ST;
5437
5438 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5439 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5440
5441 /* if polling, ata_pio_task() handles the rest.
5442 * otherwise, interrupt handler takes over from here.
5443 */
312f7da2
AL
5444 }
5445
1da177e4
LT
5446 break;
5447
1da177e4 5448 case ATA_PROT_ATAPI:
1da177e4 5449 case ATA_PROT_ATAPI_NODATA:
312f7da2
AL
5450 if (qc->tf.flags & ATA_TFLAG_POLLING)
5451 ata_qc_set_polling(qc);
5452
e5338254 5453 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 5454
312f7da2
AL
5455 ap->hsm_task_state = HSM_ST_FIRST;
5456
5457 /* send cdb by polling if no cdb interrupt */
5458 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
5459 (qc->tf.flags & ATA_TFLAG_POLLING))
31ce6dae 5460 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5461 break;
5462
5463 case ATA_PROT_ATAPI_DMA:
587005de 5464 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5465
1da177e4
LT
5466 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5467 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
5468 ap->hsm_task_state = HSM_ST_FIRST;
5469
5470 /* send cdb by polling if no cdb interrupt */
5471 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
31ce6dae 5472 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5473 break;
5474
5475 default:
5476 WARN_ON(1);
9a3d9eb0 5477 return AC_ERR_SYSTEM;
1da177e4
LT
5478 }
5479
5480 return 0;
5481}
5482
1da177e4
LT
5483/**
5484 * ata_host_intr - Handle host interrupt for given (port, task)
5485 * @ap: Port on which interrupt arrived (possibly...)
5486 * @qc: Taskfile currently active in engine
5487 *
5488 * Handle host interrupt for given queued command. Currently,
5489 * only DMA interrupts are handled. All other commands are
5490 * handled via polling with interrupts disabled (nIEN bit).
5491 *
5492 * LOCKING:
cca3974e 5493 * spin_lock_irqsave(host lock)
1da177e4
LT
5494 *
5495 * RETURNS:
5496 * One if interrupt was handled, zero if not (shared irq).
5497 */
5498
5499inline unsigned int ata_host_intr (struct ata_port *ap,
5500 struct ata_queued_cmd *qc)
5501{
ea54763f 5502 struct ata_eh_info *ehi = &ap->eh_info;
312f7da2 5503 u8 status, host_stat = 0;
1da177e4 5504
312f7da2 5505 VPRINTK("ata%u: protocol %d task_state %d\n",
44877b4e 5506 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 5507
312f7da2
AL
5508 /* Check whether we are expecting interrupt in this state */
5509 switch (ap->hsm_task_state) {
5510 case HSM_ST_FIRST:
6912ccd5
AL
5511 /* Some pre-ATAPI-4 devices assert INTRQ
5512 * at this state when ready to receive CDB.
5513 */
1da177e4 5514
312f7da2
AL
5515 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5516 * The flag was turned on only for atapi devices.
5517 * No need to check is_atapi_taskfile(&qc->tf) again.
5518 */
5519 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 5520 goto idle_irq;
1da177e4 5521 break;
312f7da2
AL
5522 case HSM_ST_LAST:
5523 if (qc->tf.protocol == ATA_PROT_DMA ||
5524 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5525 /* check status of DMA engine */
5526 host_stat = ap->ops->bmdma_status(ap);
44877b4e
TH
5527 VPRINTK("ata%u: host_stat 0x%X\n",
5528 ap->print_id, host_stat);
312f7da2
AL
5529
5530 /* if it's not our irq... */
5531 if (!(host_stat & ATA_DMA_INTR))
5532 goto idle_irq;
5533
5534 /* before we do anything else, clear DMA-Start bit */
5535 ap->ops->bmdma_stop(qc);
a4f16610
AL
5536
5537 if (unlikely(host_stat & ATA_DMA_ERR)) {
5538 /* error when transfering data to/from memory */
5539 qc->err_mask |= AC_ERR_HOST_BUS;
5540 ap->hsm_task_state = HSM_ST_ERR;
5541 }
312f7da2
AL
5542 }
5543 break;
5544 case HSM_ST:
5545 break;
1da177e4
LT
5546 default:
5547 goto idle_irq;
5548 }
5549
312f7da2
AL
5550 /* check altstatus */
5551 status = ata_altstatus(ap);
5552 if (status & ATA_BUSY)
5553 goto idle_irq;
1da177e4 5554
312f7da2
AL
5555 /* check main status, clearing INTRQ */
5556 status = ata_chk_status(ap);
5557 if (unlikely(status & ATA_BUSY))
5558 goto idle_irq;
1da177e4 5559
312f7da2
AL
5560 /* ack bmdma irq events */
5561 ap->ops->irq_clear(ap);
1da177e4 5562
bb5cb290 5563 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
5564
5565 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5566 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5567 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5568
1da177e4
LT
5569 return 1; /* irq handled */
5570
5571idle_irq:
5572 ap->stats.idle_irq++;
5573
5574#ifdef ATA_IRQ_TRAP
5575 if ((ap->stats.idle_irq % 1000) == 0) {
83625006 5576 ap->ops->irq_ack(ap, 0); /* debug trap */
f15a1daf 5577 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 5578 return 1;
1da177e4
LT
5579 }
5580#endif
5581 return 0; /* irq not handled */
5582}
5583
5584/**
5585 * ata_interrupt - Default ATA host interrupt handler
0cba632b 5586 * @irq: irq line (unused)
cca3974e 5587 * @dev_instance: pointer to our ata_host information structure
1da177e4 5588 *
0cba632b
JG
5589 * Default interrupt handler for PCI IDE devices. Calls
5590 * ata_host_intr() for each port that is not disabled.
5591 *
1da177e4 5592 * LOCKING:
cca3974e 5593 * Obtains host lock during operation.
1da177e4
LT
5594 *
5595 * RETURNS:
0cba632b 5596 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
5597 */
5598
7d12e780 5599irqreturn_t ata_interrupt (int irq, void *dev_instance)
1da177e4 5600{
cca3974e 5601 struct ata_host *host = dev_instance;
1da177e4
LT
5602 unsigned int i;
5603 unsigned int handled = 0;
5604 unsigned long flags;
5605
5606 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 5607 spin_lock_irqsave(&host->lock, flags);
1da177e4 5608
cca3974e 5609 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5610 struct ata_port *ap;
5611
cca3974e 5612 ap = host->ports[i];
c1389503 5613 if (ap &&
029f5468 5614 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
5615 struct ata_queued_cmd *qc;
5616
5617 qc = ata_qc_from_tag(ap, ap->active_tag);
312f7da2 5618 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 5619 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
5620 handled |= ata_host_intr(ap, qc);
5621 }
5622 }
5623
cca3974e 5624 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
5625
5626 return IRQ_RETVAL(handled);
5627}
5628
34bf2170
TH
5629/**
5630 * sata_scr_valid - test whether SCRs are accessible
5631 * @ap: ATA port to test SCR accessibility for
5632 *
5633 * Test whether SCRs are accessible for @ap.
5634 *
5635 * LOCKING:
5636 * None.
5637 *
5638 * RETURNS:
5639 * 1 if SCRs are accessible, 0 otherwise.
5640 */
5641int sata_scr_valid(struct ata_port *ap)
5642{
5643 return ap->cbl == ATA_CBL_SATA && ap->ops->scr_read;
5644}
5645
5646/**
5647 * sata_scr_read - read SCR register of the specified port
5648 * @ap: ATA port to read SCR for
5649 * @reg: SCR to read
5650 * @val: Place to store read value
5651 *
5652 * Read SCR register @reg of @ap into *@val. This function is
5653 * guaranteed to succeed if the cable type of the port is SATA
5654 * and the port implements ->scr_read.
5655 *
5656 * LOCKING:
5657 * None.
5658 *
5659 * RETURNS:
5660 * 0 on success, negative errno on failure.
5661 */
5662int sata_scr_read(struct ata_port *ap, int reg, u32 *val)
5663{
5664 if (sata_scr_valid(ap)) {
5665 *val = ap->ops->scr_read(ap, reg);
5666 return 0;
5667 }
5668 return -EOPNOTSUPP;
5669}
5670
5671/**
5672 * sata_scr_write - write SCR register of the specified port
5673 * @ap: ATA port to write SCR for
5674 * @reg: SCR to write
5675 * @val: value to write
5676 *
5677 * Write @val to SCR register @reg of @ap. This function is
5678 * guaranteed to succeed if the cable type of the port is SATA
5679 * and the port implements ->scr_read.
5680 *
5681 * LOCKING:
5682 * None.
5683 *
5684 * RETURNS:
5685 * 0 on success, negative errno on failure.
5686 */
5687int sata_scr_write(struct ata_port *ap, int reg, u32 val)
5688{
5689 if (sata_scr_valid(ap)) {
5690 ap->ops->scr_write(ap, reg, val);
5691 return 0;
5692 }
5693 return -EOPNOTSUPP;
5694}
5695
5696/**
5697 * sata_scr_write_flush - write SCR register of the specified port and flush
5698 * @ap: ATA port to write SCR for
5699 * @reg: SCR to write
5700 * @val: value to write
5701 *
5702 * This function is identical to sata_scr_write() except that this
5703 * function performs flush after writing to the register.
5704 *
5705 * LOCKING:
5706 * None.
5707 *
5708 * RETURNS:
5709 * 0 on success, negative errno on failure.
5710 */
5711int sata_scr_write_flush(struct ata_port *ap, int reg, u32 val)
5712{
5713 if (sata_scr_valid(ap)) {
5714 ap->ops->scr_write(ap, reg, val);
5715 ap->ops->scr_read(ap, reg);
5716 return 0;
5717 }
5718 return -EOPNOTSUPP;
5719}
5720
5721/**
5722 * ata_port_online - test whether the given port is online
5723 * @ap: ATA port to test
5724 *
5725 * Test whether @ap is online. Note that this function returns 0
5726 * if online status of @ap cannot be obtained, so
5727 * ata_port_online(ap) != !ata_port_offline(ap).
5728 *
5729 * LOCKING:
5730 * None.
5731 *
5732 * RETURNS:
5733 * 1 if the port online status is available and online.
5734 */
5735int ata_port_online(struct ata_port *ap)
5736{
5737 u32 sstatus;
5738
5739 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) == 0x3)
5740 return 1;
5741 return 0;
5742}
5743
5744/**
5745 * ata_port_offline - test whether the given port is offline
5746 * @ap: ATA port to test
5747 *
5748 * Test whether @ap is offline. Note that this function returns
5749 * 0 if offline status of @ap cannot be obtained, so
5750 * ata_port_online(ap) != !ata_port_offline(ap).
5751 *
5752 * LOCKING:
5753 * None.
5754 *
5755 * RETURNS:
5756 * 1 if the port offline status is available and offline.
5757 */
5758int ata_port_offline(struct ata_port *ap)
5759{
5760 u32 sstatus;
5761
5762 if (!sata_scr_read(ap, SCR_STATUS, &sstatus) && (sstatus & 0xf) != 0x3)
5763 return 1;
5764 return 0;
5765}
0baab86b 5766
77b08fb5 5767int ata_flush_cache(struct ata_device *dev)
9b847548 5768{
977e6b9f 5769 unsigned int err_mask;
9b847548
JA
5770 u8 cmd;
5771
5772 if (!ata_try_flush_cache(dev))
5773 return 0;
5774
6fc49adb 5775 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
5776 cmd = ATA_CMD_FLUSH_EXT;
5777 else
5778 cmd = ATA_CMD_FLUSH;
5779
977e6b9f
TH
5780 err_mask = ata_do_simple_cmd(dev, cmd);
5781 if (err_mask) {
5782 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
5783 return -EIO;
5784 }
5785
5786 return 0;
9b847548
JA
5787}
5788
6ffa01d8 5789#ifdef CONFIG_PM
cca3974e
JG
5790static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
5791 unsigned int action, unsigned int ehi_flags,
5792 int wait)
500530f6
TH
5793{
5794 unsigned long flags;
5795 int i, rc;
5796
cca3974e
JG
5797 for (i = 0; i < host->n_ports; i++) {
5798 struct ata_port *ap = host->ports[i];
500530f6
TH
5799
5800 /* Previous resume operation might still be in
5801 * progress. Wait for PM_PENDING to clear.
5802 */
5803 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
5804 ata_port_wait_eh(ap);
5805 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5806 }
5807
5808 /* request PM ops to EH */
5809 spin_lock_irqsave(ap->lock, flags);
5810
5811 ap->pm_mesg = mesg;
5812 if (wait) {
5813 rc = 0;
5814 ap->pm_result = &rc;
5815 }
5816
5817 ap->pflags |= ATA_PFLAG_PM_PENDING;
5818 ap->eh_info.action |= action;
5819 ap->eh_info.flags |= ehi_flags;
5820
5821 ata_port_schedule_eh(ap);
5822
5823 spin_unlock_irqrestore(ap->lock, flags);
5824
5825 /* wait and check result */
5826 if (wait) {
5827 ata_port_wait_eh(ap);
5828 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
5829 if (rc)
5830 return rc;
5831 }
5832 }
5833
5834 return 0;
5835}
5836
5837/**
cca3974e
JG
5838 * ata_host_suspend - suspend host
5839 * @host: host to suspend
500530f6
TH
5840 * @mesg: PM message
5841 *
cca3974e 5842 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
5843 * function requests EH to perform PM operations and waits for EH
5844 * to finish.
5845 *
5846 * LOCKING:
5847 * Kernel thread context (may sleep).
5848 *
5849 * RETURNS:
5850 * 0 on success, -errno on failure.
5851 */
cca3974e 5852int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6 5853{
9666f400 5854 int rc;
500530f6 5855
cca3974e 5856 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
9666f400
TH
5857 if (rc == 0)
5858 host->dev->power.power_state = mesg;
500530f6
TH
5859 return rc;
5860}
5861
5862/**
cca3974e
JG
5863 * ata_host_resume - resume host
5864 * @host: host to resume
500530f6 5865 *
cca3974e 5866 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
5867 * function requests EH to perform PM operations and returns.
5868 * Note that all resume operations are performed parallely.
5869 *
5870 * LOCKING:
5871 * Kernel thread context (may sleep).
5872 */
cca3974e 5873void ata_host_resume(struct ata_host *host)
500530f6 5874{
cca3974e
JG
5875 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
5876 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
5877 host->dev->power.power_state = PMSG_ON;
500530f6 5878}
6ffa01d8 5879#endif
500530f6 5880
c893a3ae
RD
5881/**
5882 * ata_port_start - Set port up for dma.
5883 * @ap: Port to initialize
5884 *
5885 * Called just after data structures for each port are
5886 * initialized. Allocates space for PRD table.
5887 *
5888 * May be used as the port_start() entry in ata_port_operations.
5889 *
5890 * LOCKING:
5891 * Inherited from caller.
5892 */
f0d36efd 5893int ata_port_start(struct ata_port *ap)
1da177e4 5894{
2f1f610b 5895 struct device *dev = ap->dev;
6037d6bb 5896 int rc;
1da177e4 5897
f0d36efd
TH
5898 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
5899 GFP_KERNEL);
1da177e4
LT
5900 if (!ap->prd)
5901 return -ENOMEM;
5902
6037d6bb 5903 rc = ata_pad_alloc(ap, dev);
f0d36efd 5904 if (rc)
6037d6bb 5905 return rc;
1da177e4 5906
f0d36efd
TH
5907 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
5908 (unsigned long long)ap->prd_dma);
1da177e4
LT
5909 return 0;
5910}
5911
3ef3b43d
TH
5912/**
5913 * ata_dev_init - Initialize an ata_device structure
5914 * @dev: Device structure to initialize
5915 *
5916 * Initialize @dev in preparation for probing.
5917 *
5918 * LOCKING:
5919 * Inherited from caller.
5920 */
5921void ata_dev_init(struct ata_device *dev)
5922{
5923 struct ata_port *ap = dev->ap;
72fa4b74
TH
5924 unsigned long flags;
5925
5a04bf4b
TH
5926 /* SATA spd limit is bound to the first device */
5927 ap->sata_spd_limit = ap->hw_sata_spd_limit;
5928
72fa4b74
TH
5929 /* High bits of dev->flags are used to record warm plug
5930 * requests which occur asynchronously. Synchronize using
cca3974e 5931 * host lock.
72fa4b74 5932 */
ba6a1308 5933 spin_lock_irqsave(ap->lock, flags);
72fa4b74 5934 dev->flags &= ~ATA_DFLAG_INIT_MASK;
ba6a1308 5935 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 5936
72fa4b74
TH
5937 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
5938 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
5939 dev->pio_mask = UINT_MAX;
5940 dev->mwdma_mask = UINT_MAX;
5941 dev->udma_mask = UINT_MAX;
5942}
5943
1da177e4 5944/**
f3187195
TH
5945 * ata_port_alloc - allocate and initialize basic ATA port resources
5946 * @host: ATA host this allocated port belongs to
1da177e4 5947 *
f3187195
TH
5948 * Allocate and initialize basic ATA port resources.
5949 *
5950 * RETURNS:
5951 * Allocate ATA port on success, NULL on failure.
0cba632b 5952 *
1da177e4 5953 * LOCKING:
f3187195 5954 * Inherited from calling layer (may sleep).
1da177e4 5955 */
f3187195 5956struct ata_port *ata_port_alloc(struct ata_host *host)
1da177e4 5957{
f3187195 5958 struct ata_port *ap;
1da177e4
LT
5959 unsigned int i;
5960
f3187195
TH
5961 DPRINTK("ENTER\n");
5962
5963 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
5964 if (!ap)
5965 return NULL;
5966
f4d6d004 5967 ap->pflags |= ATA_PFLAG_INITIALIZING;
cca3974e 5968 ap->lock = &host->lock;
198e0fed 5969 ap->flags = ATA_FLAG_DISABLED;
f3187195 5970 ap->print_id = -1;
1da177e4 5971 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 5972 ap->host = host;
f3187195
TH
5973 ap->dev = host->dev;
5974
5a04bf4b 5975 ap->hw_sata_spd_limit = UINT_MAX;
1da177e4
LT
5976 ap->active_tag = ATA_TAG_POISON;
5977 ap->last_ctl = 0xFF;
bd5d825c
BP
5978
5979#if defined(ATA_VERBOSE_DEBUG)
5980 /* turn on all debugging levels */
5981 ap->msg_enable = 0x00FF;
5982#elif defined(ATA_DEBUG)
5983 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 5984#else
0dd4b21f 5985 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 5986#endif
1da177e4 5987
65f27f38
DH
5988 INIT_DELAYED_WORK(&ap->port_task, NULL);
5989 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
5990 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 5991 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 5992 init_waitqueue_head(&ap->eh_wait_q);
1da177e4 5993
838df628 5994 ap->cbl = ATA_CBL_NONE;
838df628 5995
acf356b1
TH
5996 for (i = 0; i < ATA_MAX_DEVICES; i++) {
5997 struct ata_device *dev = &ap->device[i];
38d87234 5998 dev->ap = ap;
72fa4b74 5999 dev->devno = i;
3ef3b43d 6000 ata_dev_init(dev);
acf356b1 6001 }
1da177e4
LT
6002
6003#ifdef ATA_IRQ_TRAP
6004 ap->stats.unhandled_irq = 1;
6005 ap->stats.idle_irq = 1;
6006#endif
1da177e4 6007 return ap;
1da177e4
LT
6008}
6009
f0d36efd
TH
6010static void ata_host_release(struct device *gendev, void *res)
6011{
6012 struct ata_host *host = dev_get_drvdata(gendev);
6013 int i;
6014
6015 for (i = 0; i < host->n_ports; i++) {
6016 struct ata_port *ap = host->ports[i];
6017
ecef7253
TH
6018 if (!ap)
6019 continue;
6020
6021 if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop)
f0d36efd 6022 ap->ops->port_stop(ap);
f0d36efd
TH
6023 }
6024
ecef7253 6025 if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop)
f0d36efd 6026 host->ops->host_stop(host);
1aa56cca 6027
1aa506e4
TH
6028 for (i = 0; i < host->n_ports; i++) {
6029 struct ata_port *ap = host->ports[i];
6030
4911487a
TH
6031 if (!ap)
6032 continue;
6033
6034 if (ap->scsi_host)
1aa506e4
TH
6035 scsi_host_put(ap->scsi_host);
6036
4911487a 6037 kfree(ap);
1aa506e4
TH
6038 host->ports[i] = NULL;
6039 }
6040
1aa56cca 6041 dev_set_drvdata(gendev, NULL);
f0d36efd
TH
6042}
6043
f3187195
TH
6044/**
6045 * ata_host_alloc - allocate and init basic ATA host resources
6046 * @dev: generic device this host is associated with
6047 * @max_ports: maximum number of ATA ports associated with this host
6048 *
6049 * Allocate and initialize basic ATA host resources. LLD calls
6050 * this function to allocate a host, initializes it fully and
6051 * attaches it using ata_host_register().
6052 *
6053 * @max_ports ports are allocated and host->n_ports is
6054 * initialized to @max_ports. The caller is allowed to decrease
6055 * host->n_ports before calling ata_host_register(). The unused
6056 * ports will be automatically freed on registration.
6057 *
6058 * RETURNS:
6059 * Allocate ATA host on success, NULL on failure.
6060 *
6061 * LOCKING:
6062 * Inherited from calling layer (may sleep).
6063 */
6064struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6065{
6066 struct ata_host *host;
6067 size_t sz;
6068 int i;
6069
6070 DPRINTK("ENTER\n");
6071
6072 if (!devres_open_group(dev, NULL, GFP_KERNEL))
6073 return NULL;
6074
6075 /* alloc a container for our list of ATA ports (buses) */
6076 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6077 /* alloc a container for our list of ATA ports (buses) */
6078 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6079 if (!host)
6080 goto err_out;
6081
6082 devres_add(dev, host);
6083 dev_set_drvdata(dev, host);
6084
6085 spin_lock_init(&host->lock);
6086 host->dev = dev;
6087 host->n_ports = max_ports;
6088
6089 /* allocate ports bound to this host */
6090 for (i = 0; i < max_ports; i++) {
6091 struct ata_port *ap;
6092
6093 ap = ata_port_alloc(host);
6094 if (!ap)
6095 goto err_out;
6096
6097 ap->port_no = i;
6098 host->ports[i] = ap;
6099 }
6100
6101 devres_remove_group(dev, NULL);
6102 return host;
6103
6104 err_out:
6105 devres_release_group(dev, NULL);
6106 return NULL;
6107}
6108
f5cda257
TH
6109/**
6110 * ata_host_alloc_pinfo - alloc host and init with port_info array
6111 * @dev: generic device this host is associated with
6112 * @ppi: array of ATA port_info to initialize host with
6113 * @n_ports: number of ATA ports attached to this host
6114 *
6115 * Allocate ATA host and initialize with info from @ppi. If NULL
6116 * terminated, @ppi may contain fewer entries than @n_ports. The
6117 * last entry will be used for the remaining ports.
6118 *
6119 * RETURNS:
6120 * Allocate ATA host on success, NULL on failure.
6121 *
6122 * LOCKING:
6123 * Inherited from calling layer (may sleep).
6124 */
6125struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6126 const struct ata_port_info * const * ppi,
6127 int n_ports)
6128{
6129 const struct ata_port_info *pi;
6130 struct ata_host *host;
6131 int i, j;
6132
6133 host = ata_host_alloc(dev, n_ports);
6134 if (!host)
6135 return NULL;
6136
6137 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6138 struct ata_port *ap = host->ports[i];
6139
6140 if (ppi[j])
6141 pi = ppi[j++];
6142
6143 ap->pio_mask = pi->pio_mask;
6144 ap->mwdma_mask = pi->mwdma_mask;
6145 ap->udma_mask = pi->udma_mask;
6146 ap->flags |= pi->flags;
6147 ap->ops = pi->port_ops;
6148
6149 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6150 host->ops = pi->port_ops;
6151 if (!host->private_data && pi->private_data)
6152 host->private_data = pi->private_data;
6153 }
6154
6155 return host;
6156}
6157
ecef7253
TH
6158/**
6159 * ata_host_start - start and freeze ports of an ATA host
6160 * @host: ATA host to start ports for
6161 *
6162 * Start and then freeze ports of @host. Started status is
6163 * recorded in host->flags, so this function can be called
6164 * multiple times. Ports are guaranteed to get started only
f3187195
TH
6165 * once. If host->ops isn't initialized yet, its set to the
6166 * first non-dummy port ops.
ecef7253
TH
6167 *
6168 * LOCKING:
6169 * Inherited from calling layer (may sleep).
6170 *
6171 * RETURNS:
6172 * 0 if all ports are started successfully, -errno otherwise.
6173 */
6174int ata_host_start(struct ata_host *host)
6175{
6176 int i, rc;
6177
6178 if (host->flags & ATA_HOST_STARTED)
6179 return 0;
6180
6181 for (i = 0; i < host->n_ports; i++) {
6182 struct ata_port *ap = host->ports[i];
6183
f3187195
TH
6184 if (!host->ops && !ata_port_is_dummy(ap))
6185 host->ops = ap->ops;
6186
ecef7253
TH
6187 if (ap->ops->port_start) {
6188 rc = ap->ops->port_start(ap);
6189 if (rc) {
6190 ata_port_printk(ap, KERN_ERR, "failed to "
6191 "start port (errno=%d)\n", rc);
6192 goto err_out;
6193 }
6194 }
6195
6196 ata_eh_freeze_port(ap);
6197 }
6198
6199 host->flags |= ATA_HOST_STARTED;
6200 return 0;
6201
6202 err_out:
6203 while (--i >= 0) {
6204 struct ata_port *ap = host->ports[i];
6205
6206 if (ap->ops->port_stop)
6207 ap->ops->port_stop(ap);
6208 }
6209 return rc;
6210}
6211
b03732f0 6212/**
cca3974e
JG
6213 * ata_sas_host_init - Initialize a host struct
6214 * @host: host to initialize
6215 * @dev: device host is attached to
6216 * @flags: host flags
6217 * @ops: port_ops
b03732f0
BK
6218 *
6219 * LOCKING:
6220 * PCI/etc. bus probe sem.
6221 *
6222 */
f3187195 6223/* KILLME - the only user left is ipr */
cca3974e
JG
6224void ata_host_init(struct ata_host *host, struct device *dev,
6225 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 6226{
cca3974e
JG
6227 spin_lock_init(&host->lock);
6228 host->dev = dev;
6229 host->flags = flags;
6230 host->ops = ops;
b03732f0
BK
6231}
6232
f3187195
TH
6233/**
6234 * ata_host_register - register initialized ATA host
6235 * @host: ATA host to register
6236 * @sht: template for SCSI host
6237 *
6238 * Register initialized ATA host. @host is allocated using
6239 * ata_host_alloc() and fully initialized by LLD. This function
6240 * starts ports, registers @host with ATA and SCSI layers and
6241 * probe registered devices.
6242 *
6243 * LOCKING:
6244 * Inherited from calling layer (may sleep).
6245 *
6246 * RETURNS:
6247 * 0 on success, -errno otherwise.
6248 */
6249int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6250{
6251 int i, rc;
6252
6253 /* host must have been started */
6254 if (!(host->flags & ATA_HOST_STARTED)) {
6255 dev_printk(KERN_ERR, host->dev,
6256 "BUG: trying to register unstarted host\n");
6257 WARN_ON(1);
6258 return -EINVAL;
6259 }
6260
6261 /* Blow away unused ports. This happens when LLD can't
6262 * determine the exact number of ports to allocate at
6263 * allocation time.
6264 */
6265 for (i = host->n_ports; host->ports[i]; i++)
6266 kfree(host->ports[i]);
6267
6268 /* give ports names and add SCSI hosts */
6269 for (i = 0; i < host->n_ports; i++)
6270 host->ports[i]->print_id = ata_print_id++;
6271
6272 rc = ata_scsi_add_hosts(host, sht);
6273 if (rc)
6274 return rc;
6275
6276 /* set cable, sata_spd_limit and report */
6277 for (i = 0; i < host->n_ports; i++) {
6278 struct ata_port *ap = host->ports[i];
6279 int irq_line;
6280 u32 scontrol;
6281 unsigned long xfer_mask;
6282
6283 /* set SATA cable type if still unset */
6284 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6285 ap->cbl = ATA_CBL_SATA;
6286
6287 /* init sata_spd_limit to the current value */
6288 if (sata_scr_read(ap, SCR_CONTROL, &scontrol) == 0) {
6289 int spd = (scontrol >> 4) & 0xf;
6290 ap->hw_sata_spd_limit &= (1 << spd) - 1;
6291 }
6292 ap->sata_spd_limit = ap->hw_sata_spd_limit;
6293
6294 /* report the secondary IRQ for second channel legacy */
6295 irq_line = host->irq;
6296 if (i == 1 && host->irq2)
6297 irq_line = host->irq2;
6298
6299 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6300 ap->udma_mask);
6301
6302 /* print per-port info to dmesg */
6303 if (!ata_port_is_dummy(ap))
6304 ata_port_printk(ap, KERN_INFO, "%cATA max %s cmd 0x%p "
6305 "ctl 0x%p bmdma 0x%p irq %d\n",
6306 ap->cbl == ATA_CBL_SATA ? 'S' : 'P',
6307 ata_mode_string(xfer_mask),
6308 ap->ioaddr.cmd_addr,
6309 ap->ioaddr.ctl_addr,
6310 ap->ioaddr.bmdma_addr,
6311 irq_line);
6312 else
6313 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
6314 }
6315
6316 /* perform each probe synchronously */
6317 DPRINTK("probe begin\n");
6318 for (i = 0; i < host->n_ports; i++) {
6319 struct ata_port *ap = host->ports[i];
6320 int rc;
6321
6322 /* probe */
6323 if (ap->ops->error_handler) {
6324 struct ata_eh_info *ehi = &ap->eh_info;
6325 unsigned long flags;
6326
6327 ata_port_probe(ap);
6328
6329 /* kick EH for boot probing */
6330 spin_lock_irqsave(ap->lock, flags);
6331
6332 ehi->probe_mask = (1 << ATA_MAX_DEVICES) - 1;
6333 ehi->action |= ATA_EH_SOFTRESET;
6334 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6335
f4d6d004 6336 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
f3187195
TH
6337 ap->pflags |= ATA_PFLAG_LOADING;
6338 ata_port_schedule_eh(ap);
6339
6340 spin_unlock_irqrestore(ap->lock, flags);
6341
6342 /* wait for EH to finish */
6343 ata_port_wait_eh(ap);
6344 } else {
6345 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6346 rc = ata_bus_probe(ap);
6347 DPRINTK("ata%u: bus probe end\n", ap->print_id);
6348
6349 if (rc) {
6350 /* FIXME: do something useful here?
6351 * Current libata behavior will
6352 * tear down everything when
6353 * the module is removed
6354 * or the h/w is unplugged.
6355 */
6356 }
6357 }
6358 }
6359
6360 /* probes are done, now scan each port's disk(s) */
6361 DPRINTK("host probe begin\n");
6362 for (i = 0; i < host->n_ports; i++) {
6363 struct ata_port *ap = host->ports[i];
6364
6365 ata_scsi_scan_host(ap);
6366 }
6367
6368 return 0;
6369}
6370
f5cda257
TH
6371/**
6372 * ata_host_activate - start host, request IRQ and register it
6373 * @host: target ATA host
6374 * @irq: IRQ to request
6375 * @irq_handler: irq_handler used when requesting IRQ
6376 * @irq_flags: irq_flags used when requesting IRQ
6377 * @sht: scsi_host_template to use when registering the host
6378 *
6379 * After allocating an ATA host and initializing it, most libata
6380 * LLDs perform three steps to activate the host - start host,
6381 * request IRQ and register it. This helper takes necessasry
6382 * arguments and performs the three steps in one go.
6383 *
6384 * LOCKING:
6385 * Inherited from calling layer (may sleep).
6386 *
6387 * RETURNS:
6388 * 0 on success, -errno otherwise.
6389 */
6390int ata_host_activate(struct ata_host *host, int irq,
6391 irq_handler_t irq_handler, unsigned long irq_flags,
6392 struct scsi_host_template *sht)
6393{
6394 int rc;
6395
6396 rc = ata_host_start(host);
6397 if (rc)
6398 return rc;
6399
6400 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6401 dev_driver_string(host->dev), host);
6402 if (rc)
6403 return rc;
6404
6405 rc = ata_host_register(host, sht);
6406 /* if failed, just free the IRQ and leave ports alone */
6407 if (rc)
6408 devm_free_irq(host->dev, irq, host);
6409
6410 return rc;
6411}
6412
720ba126
TH
6413/**
6414 * ata_port_detach - Detach ATA port in prepration of device removal
6415 * @ap: ATA port to be detached
6416 *
6417 * Detach all ATA devices and the associated SCSI devices of @ap;
6418 * then, remove the associated SCSI host. @ap is guaranteed to
6419 * be quiescent on return from this function.
6420 *
6421 * LOCKING:
6422 * Kernel thread context (may sleep).
6423 */
6424void ata_port_detach(struct ata_port *ap)
6425{
6426 unsigned long flags;
6427 int i;
6428
6429 if (!ap->ops->error_handler)
c3cf30a9 6430 goto skip_eh;
720ba126
TH
6431
6432 /* tell EH we're leaving & flush EH */
ba6a1308 6433 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 6434 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 6435 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6436
6437 ata_port_wait_eh(ap);
6438
6439 /* EH is now guaranteed to see UNLOADING, so no new device
6440 * will be attached. Disable all existing devices.
6441 */
ba6a1308 6442 spin_lock_irqsave(ap->lock, flags);
720ba126
TH
6443
6444 for (i = 0; i < ATA_MAX_DEVICES; i++)
6445 ata_dev_disable(&ap->device[i]);
6446
ba6a1308 6447 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6448
6449 /* Final freeze & EH. All in-flight commands are aborted. EH
6450 * will be skipped and retrials will be terminated with bad
6451 * target.
6452 */
ba6a1308 6453 spin_lock_irqsave(ap->lock, flags);
720ba126 6454 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 6455 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6456
6457 ata_port_wait_eh(ap);
6458
6459 /* Flush hotplug task. The sequence is similar to
6460 * ata_port_flush_task().
6461 */
28e53bdd 6462 cancel_work_sync(&ap->hotplug_task.work); /* akpm: why? */
720ba126 6463 cancel_delayed_work(&ap->hotplug_task);
28e53bdd 6464 cancel_work_sync(&ap->hotplug_task.work);
720ba126 6465
c3cf30a9 6466 skip_eh:
720ba126 6467 /* remove the associated SCSI host */
cca3974e 6468 scsi_remove_host(ap->scsi_host);
720ba126
TH
6469}
6470
0529c159
TH
6471/**
6472 * ata_host_detach - Detach all ports of an ATA host
6473 * @host: Host to detach
6474 *
6475 * Detach all ports of @host.
6476 *
6477 * LOCKING:
6478 * Kernel thread context (may sleep).
6479 */
6480void ata_host_detach(struct ata_host *host)
6481{
6482 int i;
6483
6484 for (i = 0; i < host->n_ports; i++)
6485 ata_port_detach(host->ports[i]);
6486}
6487
1da177e4
LT
6488/**
6489 * ata_std_ports - initialize ioaddr with standard port offsets.
6490 * @ioaddr: IO address structure to be initialized
0baab86b
EF
6491 *
6492 * Utility function which initializes data_addr, error_addr,
6493 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
6494 * device_addr, status_addr, and command_addr to standard offsets
6495 * relative to cmd_addr.
6496 *
6497 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 6498 */
0baab86b 6499
1da177e4
LT
6500void ata_std_ports(struct ata_ioports *ioaddr)
6501{
6502 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
6503 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
6504 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
6505 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
6506 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
6507 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
6508 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
6509 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
6510 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
6511 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
6512}
6513
0baab86b 6514
374b1873
JG
6515#ifdef CONFIG_PCI
6516
1da177e4
LT
6517/**
6518 * ata_pci_remove_one - PCI layer callback for device removal
6519 * @pdev: PCI device that was removed
6520 *
b878ca5d
TH
6521 * PCI layer indicates to libata via this hook that hot-unplug or
6522 * module unload event has occurred. Detach all ports. Resource
6523 * release is handled via devres.
1da177e4
LT
6524 *
6525 * LOCKING:
6526 * Inherited from PCI layer (may sleep).
6527 */
f0d36efd 6528void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4
LT
6529{
6530 struct device *dev = pci_dev_to_dev(pdev);
cca3974e 6531 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 6532
b878ca5d 6533 ata_host_detach(host);
1da177e4
LT
6534}
6535
6536/* move to PCI subsystem */
057ace5e 6537int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
6538{
6539 unsigned long tmp = 0;
6540
6541 switch (bits->width) {
6542 case 1: {
6543 u8 tmp8 = 0;
6544 pci_read_config_byte(pdev, bits->reg, &tmp8);
6545 tmp = tmp8;
6546 break;
6547 }
6548 case 2: {
6549 u16 tmp16 = 0;
6550 pci_read_config_word(pdev, bits->reg, &tmp16);
6551 tmp = tmp16;
6552 break;
6553 }
6554 case 4: {
6555 u32 tmp32 = 0;
6556 pci_read_config_dword(pdev, bits->reg, &tmp32);
6557 tmp = tmp32;
6558 break;
6559 }
6560
6561 default:
6562 return -EINVAL;
6563 }
6564
6565 tmp &= bits->mask;
6566
6567 return (tmp == bits->val) ? 1 : 0;
6568}
9b847548 6569
6ffa01d8 6570#ifdef CONFIG_PM
3c5100c1 6571void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
6572{
6573 pci_save_state(pdev);
4c90d971 6574 pci_disable_device(pdev);
500530f6 6575
4c90d971 6576 if (mesg.event == PM_EVENT_SUSPEND)
500530f6 6577 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
6578}
6579
553c4aa6 6580int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 6581{
553c4aa6
TH
6582 int rc;
6583
9b847548
JA
6584 pci_set_power_state(pdev, PCI_D0);
6585 pci_restore_state(pdev);
553c4aa6 6586
b878ca5d 6587 rc = pcim_enable_device(pdev);
553c4aa6
TH
6588 if (rc) {
6589 dev_printk(KERN_ERR, &pdev->dev,
6590 "failed to enable device after resume (%d)\n", rc);
6591 return rc;
6592 }
6593
9b847548 6594 pci_set_master(pdev);
553c4aa6 6595 return 0;
500530f6
TH
6596}
6597
3c5100c1 6598int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 6599{
cca3974e 6600 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
6601 int rc = 0;
6602
cca3974e 6603 rc = ata_host_suspend(host, mesg);
500530f6
TH
6604 if (rc)
6605 return rc;
6606
3c5100c1 6607 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
6608
6609 return 0;
6610}
6611
6612int ata_pci_device_resume(struct pci_dev *pdev)
6613{
cca3974e 6614 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 6615 int rc;
500530f6 6616
553c4aa6
TH
6617 rc = ata_pci_device_do_resume(pdev);
6618 if (rc == 0)
6619 ata_host_resume(host);
6620 return rc;
9b847548 6621}
6ffa01d8
TH
6622#endif /* CONFIG_PM */
6623
1da177e4
LT
6624#endif /* CONFIG_PCI */
6625
6626
1da177e4
LT
6627static int __init ata_init(void)
6628{
a8601e5f 6629 ata_probe_timeout *= HZ;
1da177e4
LT
6630 ata_wq = create_workqueue("ata");
6631 if (!ata_wq)
6632 return -ENOMEM;
6633
453b07ac
TH
6634 ata_aux_wq = create_singlethread_workqueue("ata_aux");
6635 if (!ata_aux_wq) {
6636 destroy_workqueue(ata_wq);
6637 return -ENOMEM;
6638 }
6639
1da177e4
LT
6640 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
6641 return 0;
6642}
6643
6644static void __exit ata_exit(void)
6645{
6646 destroy_workqueue(ata_wq);
453b07ac 6647 destroy_workqueue(ata_aux_wq);
1da177e4
LT
6648}
6649
a4625085 6650subsys_initcall(ata_init);
1da177e4
LT
6651module_exit(ata_exit);
6652
67846b30 6653static unsigned long ratelimit_time;
34af946a 6654static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
6655
6656int ata_ratelimit(void)
6657{
6658 int rc;
6659 unsigned long flags;
6660
6661 spin_lock_irqsave(&ata_ratelimit_lock, flags);
6662
6663 if (time_after(jiffies, ratelimit_time)) {
6664 rc = 1;
6665 ratelimit_time = jiffies + (HZ/5);
6666 } else
6667 rc = 0;
6668
6669 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
6670
6671 return rc;
6672}
6673
c22daff4
TH
6674/**
6675 * ata_wait_register - wait until register value changes
6676 * @reg: IO-mapped register
6677 * @mask: Mask to apply to read register value
6678 * @val: Wait condition
6679 * @interval_msec: polling interval in milliseconds
6680 * @timeout_msec: timeout in milliseconds
6681 *
6682 * Waiting for some bits of register to change is a common
6683 * operation for ATA controllers. This function reads 32bit LE
6684 * IO-mapped register @reg and tests for the following condition.
6685 *
6686 * (*@reg & mask) != val
6687 *
6688 * If the condition is met, it returns; otherwise, the process is
6689 * repeated after @interval_msec until timeout.
6690 *
6691 * LOCKING:
6692 * Kernel thread context (may sleep)
6693 *
6694 * RETURNS:
6695 * The final register value.
6696 */
6697u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
6698 unsigned long interval_msec,
6699 unsigned long timeout_msec)
6700{
6701 unsigned long timeout;
6702 u32 tmp;
6703
6704 tmp = ioread32(reg);
6705
6706 /* Calculate timeout _after_ the first read to make sure
6707 * preceding writes reach the controller before starting to
6708 * eat away the timeout.
6709 */
6710 timeout = jiffies + (timeout_msec * HZ) / 1000;
6711
6712 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
6713 msleep(interval_msec);
6714 tmp = ioread32(reg);
6715 }
6716
6717 return tmp;
6718}
6719
dd5b06c4
TH
6720/*
6721 * Dummy port_ops
6722 */
6723static void ata_dummy_noret(struct ata_port *ap) { }
6724static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
6725static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
6726
6727static u8 ata_dummy_check_status(struct ata_port *ap)
6728{
6729 return ATA_DRDY;
6730}
6731
6732static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
6733{
6734 return AC_ERR_SYSTEM;
6735}
6736
6737const struct ata_port_operations ata_dummy_port_ops = {
6738 .port_disable = ata_port_disable,
6739 .check_status = ata_dummy_check_status,
6740 .check_altstatus = ata_dummy_check_status,
6741 .dev_select = ata_noop_dev_select,
6742 .qc_prep = ata_noop_qc_prep,
6743 .qc_issue = ata_dummy_qc_issue,
6744 .freeze = ata_dummy_noret,
6745 .thaw = ata_dummy_noret,
6746 .error_handler = ata_dummy_noret,
6747 .post_internal_cmd = ata_dummy_qc_noret,
6748 .irq_clear = ata_dummy_noret,
6749 .port_start = ata_dummy_ret0,
6750 .port_stop = ata_dummy_noret,
6751};
6752
21b0ad4f
TH
6753const struct ata_port_info ata_dummy_port_info = {
6754 .port_ops = &ata_dummy_port_ops,
6755};
6756
1da177e4
LT
6757/*
6758 * libata is essentially a library of internal helper functions for
6759 * low-level ATA host controller drivers. As such, the API/ABI is
6760 * likely to change as new drivers are added and updated.
6761 * Do not depend on ABI/API stability.
6762 */
6763
e9c83914
TH
6764EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
6765EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
6766EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 6767EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
21b0ad4f 6768EXPORT_SYMBOL_GPL(ata_dummy_port_info);
1da177e4
LT
6769EXPORT_SYMBOL_GPL(ata_std_bios_param);
6770EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 6771EXPORT_SYMBOL_GPL(ata_host_init);
f3187195 6772EXPORT_SYMBOL_GPL(ata_host_alloc);
f5cda257 6773EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
ecef7253 6774EXPORT_SYMBOL_GPL(ata_host_start);
f3187195 6775EXPORT_SYMBOL_GPL(ata_host_register);
f5cda257 6776EXPORT_SYMBOL_GPL(ata_host_activate);
0529c159 6777EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4
LT
6778EXPORT_SYMBOL_GPL(ata_sg_init);
6779EXPORT_SYMBOL_GPL(ata_sg_init_one);
9a1004d0 6780EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 6781EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 6782EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 6783EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
6784EXPORT_SYMBOL_GPL(ata_tf_load);
6785EXPORT_SYMBOL_GPL(ata_tf_read);
6786EXPORT_SYMBOL_GPL(ata_noop_dev_select);
6787EXPORT_SYMBOL_GPL(ata_std_dev_select);
43727fbc 6788EXPORT_SYMBOL_GPL(sata_print_link_status);
1da177e4
LT
6789EXPORT_SYMBOL_GPL(ata_tf_to_fis);
6790EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6791EXPORT_SYMBOL_GPL(ata_check_status);
6792EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
6793EXPORT_SYMBOL_GPL(ata_exec_command);
6794EXPORT_SYMBOL_GPL(ata_port_start);
1da177e4 6795EXPORT_SYMBOL_GPL(ata_interrupt);
04351821 6796EXPORT_SYMBOL_GPL(ata_do_set_mode);
0d5ff566
TH
6797EXPORT_SYMBOL_GPL(ata_data_xfer);
6798EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
1da177e4 6799EXPORT_SYMBOL_GPL(ata_qc_prep);
e46834cd 6800EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
6801EXPORT_SYMBOL_GPL(ata_bmdma_setup);
6802EXPORT_SYMBOL_GPL(ata_bmdma_start);
6803EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
6804EXPORT_SYMBOL_GPL(ata_bmdma_status);
6805EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
6806EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
6807EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
6808EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
6809EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
6810EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 6811EXPORT_SYMBOL_GPL(ata_port_probe);
10305f0f 6812EXPORT_SYMBOL_GPL(ata_dev_disable);
3c567b7d 6813EXPORT_SYMBOL_GPL(sata_set_spd);
d7bb4cc7
TH
6814EXPORT_SYMBOL_GPL(sata_phy_debounce);
6815EXPORT_SYMBOL_GPL(sata_phy_resume);
1da177e4
LT
6816EXPORT_SYMBOL_GPL(sata_phy_reset);
6817EXPORT_SYMBOL_GPL(__sata_phy_reset);
6818EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 6819EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 6820EXPORT_SYMBOL_GPL(ata_std_softreset);
b6103f6d 6821EXPORT_SYMBOL_GPL(sata_port_hardreset);
c2bd5804
TH
6822EXPORT_SYMBOL_GPL(sata_std_hardreset);
6823EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
6824EXPORT_SYMBOL_GPL(ata_dev_classify);
6825EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 6826EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 6827EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 6828EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 6829EXPORT_SYMBOL_GPL(ata_busy_sleep);
d4b2bab4 6830EXPORT_SYMBOL_GPL(ata_wait_ready);
86e45b6b 6831EXPORT_SYMBOL_GPL(ata_port_queue_task);
1da177e4
LT
6832EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
6833EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 6834EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 6835EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 6836EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4 6837EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
6838EXPORT_SYMBOL_GPL(sata_scr_valid);
6839EXPORT_SYMBOL_GPL(sata_scr_read);
6840EXPORT_SYMBOL_GPL(sata_scr_write);
6841EXPORT_SYMBOL_GPL(sata_scr_write_flush);
6842EXPORT_SYMBOL_GPL(ata_port_online);
6843EXPORT_SYMBOL_GPL(ata_port_offline);
6ffa01d8 6844#ifdef CONFIG_PM
cca3974e
JG
6845EXPORT_SYMBOL_GPL(ata_host_suspend);
6846EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 6847#endif /* CONFIG_PM */
6a62a04d
TH
6848EXPORT_SYMBOL_GPL(ata_id_string);
6849EXPORT_SYMBOL_GPL(ata_id_c_string);
10305f0f 6850EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
6919a0a6 6851EXPORT_SYMBOL_GPL(ata_device_blacklisted);
1da177e4
LT
6852EXPORT_SYMBOL_GPL(ata_scsi_simulate);
6853
1bc4ccff 6854EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
452503f9
AC
6855EXPORT_SYMBOL_GPL(ata_timing_compute);
6856EXPORT_SYMBOL_GPL(ata_timing_merge);
6857
1da177e4
LT
6858#ifdef CONFIG_PCI
6859EXPORT_SYMBOL_GPL(pci_test_config_bits);
d491b27b 6860EXPORT_SYMBOL_GPL(ata_pci_init_native_host);
1626aeb8 6861EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
21b0ad4f 6862EXPORT_SYMBOL_GPL(ata_pci_prepare_native_host);
1da177e4
LT
6863EXPORT_SYMBOL_GPL(ata_pci_init_one);
6864EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6ffa01d8 6865#ifdef CONFIG_PM
500530f6
TH
6866EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
6867EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
6868EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
6869EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 6870#endif /* CONFIG_PM */
67951ade
AC
6871EXPORT_SYMBOL_GPL(ata_pci_default_filter);
6872EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 6873#endif /* CONFIG_PCI */
9b847548 6874
ece1d636 6875EXPORT_SYMBOL_GPL(ata_eng_timeout);
7b70fc03
TH
6876EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
6877EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499
TH
6878EXPORT_SYMBOL_GPL(ata_port_freeze);
6879EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
6880EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
6881EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
6882EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 6883EXPORT_SYMBOL_GPL(ata_do_eh);
83625006
AI
6884EXPORT_SYMBOL_GPL(ata_irq_on);
6885EXPORT_SYMBOL_GPL(ata_dummy_irq_on);
6886EXPORT_SYMBOL_GPL(ata_irq_ack);
6887EXPORT_SYMBOL_GPL(ata_dummy_irq_ack);
a619f981 6888EXPORT_SYMBOL_GPL(ata_dev_try_classify);
be0d18df
AC
6889
6890EXPORT_SYMBOL_GPL(ata_cable_40wire);
6891EXPORT_SYMBOL_GPL(ata_cable_80wire);
6892EXPORT_SYMBOL_GPL(ata_cable_unknown);
6893EXPORT_SYMBOL_GPL(ata_cable_sata);