]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/ata/libata-core.c
libata-pmp: update ata_eh_reset() for PMP
[net-next-2.6.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
1da177e4
LT
33 */
34
1da177e4
LT
35#include <linux/kernel.h>
36#include <linux/module.h>
37#include <linux/pci.h>
38#include <linux/init.h>
39#include <linux/list.h>
40#include <linux/mm.h>
41#include <linux/highmem.h>
42#include <linux/spinlock.h>
43#include <linux/blkdev.h>
44#include <linux/delay.h>
45#include <linux/timer.h>
46#include <linux/interrupt.h>
47#include <linux/completion.h>
48#include <linux/suspend.h>
49#include <linux/workqueue.h>
67846b30 50#include <linux/jiffies.h>
378f058c 51#include <linux/scatterlist.h>
1da177e4 52#include <scsi/scsi.h>
193515d5 53#include <scsi/scsi_cmnd.h>
1da177e4
LT
54#include <scsi/scsi_host.h>
55#include <linux/libata.h>
56#include <asm/io.h>
57#include <asm/semaphore.h>
58#include <asm/byteorder.h>
59
60#include "libata.h"
61
fda0efc5 62
d7bb4cc7 63/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
64const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
65const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
66const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 67
3373efd8
TH
68static unsigned int ata_dev_init_params(struct ata_device *dev,
69 u16 heads, u16 sectors);
70static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
9f45cbd3 71static unsigned int ata_dev_set_AN(struct ata_device *dev, u8 enable);
3373efd8 72static void ata_dev_xfermask(struct ata_device *dev);
75683fe7 73static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
1da177e4 74
f3187195 75unsigned int ata_print_id = 1;
1da177e4
LT
76static struct workqueue_struct *ata_wq;
77
453b07ac
TH
78struct workqueue_struct *ata_aux_wq;
79
418dc1f5 80int atapi_enabled = 1;
1623c81e
JG
81module_param(atapi_enabled, int, 0444);
82MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
83
95de719a
AL
84int atapi_dmadir = 0;
85module_param(atapi_dmadir, int, 0444);
86MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
87
baf4fdfa
ML
88int atapi_passthru16 = 1;
89module_param(atapi_passthru16, int, 0444);
90MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
91
c3c013a2
JG
92int libata_fua = 0;
93module_param_named(fua, libata_fua, int, 0444);
94MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
95
1e999736
AC
96static int ata_ignore_hpa = 0;
97module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
98MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
99
a8601e5f
AM
100static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
101module_param(ata_probe_timeout, int, 0444);
102MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
103
d7d0dad6
JG
104int libata_noacpi = 1;
105module_param_named(noacpi, libata_noacpi, int, 0444);
11ef697b
KCA
106MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in suspend/resume when set");
107
1da177e4
LT
108MODULE_AUTHOR("Jeff Garzik");
109MODULE_DESCRIPTION("Library module for ATA devices");
110MODULE_LICENSE("GPL");
111MODULE_VERSION(DRV_VERSION);
112
0baab86b 113
1da177e4
LT
114/**
115 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
116 * @tf: Taskfile to convert
1da177e4 117 * @pmp: Port multiplier port
9977126c
TH
118 * @is_cmd: This FIS is for command
119 * @fis: Buffer into which data will output
1da177e4
LT
120 *
121 * Converts a standard ATA taskfile to a Serial ATA
122 * FIS structure (Register - Host to Device).
123 *
124 * LOCKING:
125 * Inherited from caller.
126 */
9977126c 127void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
1da177e4 128{
9977126c
TH
129 fis[0] = 0x27; /* Register - Host to Device FIS */
130 fis[1] = pmp & 0xf; /* Port multiplier number*/
131 if (is_cmd)
132 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
133
1da177e4
LT
134 fis[2] = tf->command;
135 fis[3] = tf->feature;
136
137 fis[4] = tf->lbal;
138 fis[5] = tf->lbam;
139 fis[6] = tf->lbah;
140 fis[7] = tf->device;
141
142 fis[8] = tf->hob_lbal;
143 fis[9] = tf->hob_lbam;
144 fis[10] = tf->hob_lbah;
145 fis[11] = tf->hob_feature;
146
147 fis[12] = tf->nsect;
148 fis[13] = tf->hob_nsect;
149 fis[14] = 0;
150 fis[15] = tf->ctl;
151
152 fis[16] = 0;
153 fis[17] = 0;
154 fis[18] = 0;
155 fis[19] = 0;
156}
157
158/**
159 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
160 * @fis: Buffer from which data will be input
161 * @tf: Taskfile to output
162 *
e12a1be6 163 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
164 *
165 * LOCKING:
166 * Inherited from caller.
167 */
168
057ace5e 169void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
170{
171 tf->command = fis[2]; /* status */
172 tf->feature = fis[3]; /* error */
173
174 tf->lbal = fis[4];
175 tf->lbam = fis[5];
176 tf->lbah = fis[6];
177 tf->device = fis[7];
178
179 tf->hob_lbal = fis[8];
180 tf->hob_lbam = fis[9];
181 tf->hob_lbah = fis[10];
182
183 tf->nsect = fis[12];
184 tf->hob_nsect = fis[13];
185}
186
8cbd6df1
AL
187static const u8 ata_rw_cmds[] = {
188 /* pio multi */
189 ATA_CMD_READ_MULTI,
190 ATA_CMD_WRITE_MULTI,
191 ATA_CMD_READ_MULTI_EXT,
192 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
193 0,
194 0,
195 0,
196 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
197 /* pio */
198 ATA_CMD_PIO_READ,
199 ATA_CMD_PIO_WRITE,
200 ATA_CMD_PIO_READ_EXT,
201 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
202 0,
203 0,
204 0,
205 0,
8cbd6df1
AL
206 /* dma */
207 ATA_CMD_READ,
208 ATA_CMD_WRITE,
209 ATA_CMD_READ_EXT,
9a3dccc4
TH
210 ATA_CMD_WRITE_EXT,
211 0,
212 0,
213 0,
214 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 215};
1da177e4
LT
216
217/**
8cbd6df1 218 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
219 * @tf: command to examine and configure
220 * @dev: device tf belongs to
1da177e4 221 *
2e9edbf8 222 * Examine the device configuration and tf->flags to calculate
8cbd6df1 223 * the proper read/write commands and protocol to use.
1da177e4
LT
224 *
225 * LOCKING:
226 * caller.
227 */
bd056d7e 228static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 229{
9a3dccc4 230 u8 cmd;
1da177e4 231
9a3dccc4 232 int index, fua, lba48, write;
2e9edbf8 233
9a3dccc4 234 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
235 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
236 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 237
8cbd6df1
AL
238 if (dev->flags & ATA_DFLAG_PIO) {
239 tf->protocol = ATA_PROT_PIO;
9a3dccc4 240 index = dev->multi_count ? 0 : 8;
9af5c9c9 241 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
242 /* Unable to use DMA due to host limitation */
243 tf->protocol = ATA_PROT_PIO;
0565c26d 244 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
245 } else {
246 tf->protocol = ATA_PROT_DMA;
9a3dccc4 247 index = 16;
8cbd6df1 248 }
1da177e4 249
9a3dccc4
TH
250 cmd = ata_rw_cmds[index + fua + lba48 + write];
251 if (cmd) {
252 tf->command = cmd;
253 return 0;
254 }
255 return -1;
1da177e4
LT
256}
257
35b649fe
TH
258/**
259 * ata_tf_read_block - Read block address from ATA taskfile
260 * @tf: ATA taskfile of interest
261 * @dev: ATA device @tf belongs to
262 *
263 * LOCKING:
264 * None.
265 *
266 * Read block address from @tf. This function can handle all
267 * three address formats - LBA, LBA48 and CHS. tf->protocol and
268 * flags select the address format to use.
269 *
270 * RETURNS:
271 * Block address read from @tf.
272 */
273u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
274{
275 u64 block = 0;
276
277 if (tf->flags & ATA_TFLAG_LBA) {
278 if (tf->flags & ATA_TFLAG_LBA48) {
279 block |= (u64)tf->hob_lbah << 40;
280 block |= (u64)tf->hob_lbam << 32;
281 block |= tf->hob_lbal << 24;
282 } else
283 block |= (tf->device & 0xf) << 24;
284
285 block |= tf->lbah << 16;
286 block |= tf->lbam << 8;
287 block |= tf->lbal;
288 } else {
289 u32 cyl, head, sect;
290
291 cyl = tf->lbam | (tf->lbah << 8);
292 head = tf->device & 0xf;
293 sect = tf->lbal;
294
295 block = (cyl * dev->heads + head) * dev->sectors + sect;
296 }
297
298 return block;
299}
300
bd056d7e
TH
301/**
302 * ata_build_rw_tf - Build ATA taskfile for given read/write request
303 * @tf: Target ATA taskfile
304 * @dev: ATA device @tf belongs to
305 * @block: Block address
306 * @n_block: Number of blocks
307 * @tf_flags: RW/FUA etc...
308 * @tag: tag
309 *
310 * LOCKING:
311 * None.
312 *
313 * Build ATA taskfile @tf for read/write request described by
314 * @block, @n_block, @tf_flags and @tag on @dev.
315 *
316 * RETURNS:
317 *
318 * 0 on success, -ERANGE if the request is too large for @dev,
319 * -EINVAL if the request is invalid.
320 */
321int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
322 u64 block, u32 n_block, unsigned int tf_flags,
323 unsigned int tag)
324{
325 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
326 tf->flags |= tf_flags;
327
6d1245bf 328 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
329 /* yay, NCQ */
330 if (!lba_48_ok(block, n_block))
331 return -ERANGE;
332
333 tf->protocol = ATA_PROT_NCQ;
334 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
335
336 if (tf->flags & ATA_TFLAG_WRITE)
337 tf->command = ATA_CMD_FPDMA_WRITE;
338 else
339 tf->command = ATA_CMD_FPDMA_READ;
340
341 tf->nsect = tag << 3;
342 tf->hob_feature = (n_block >> 8) & 0xff;
343 tf->feature = n_block & 0xff;
344
345 tf->hob_lbah = (block >> 40) & 0xff;
346 tf->hob_lbam = (block >> 32) & 0xff;
347 tf->hob_lbal = (block >> 24) & 0xff;
348 tf->lbah = (block >> 16) & 0xff;
349 tf->lbam = (block >> 8) & 0xff;
350 tf->lbal = block & 0xff;
351
352 tf->device = 1 << 6;
353 if (tf->flags & ATA_TFLAG_FUA)
354 tf->device |= 1 << 7;
355 } else if (dev->flags & ATA_DFLAG_LBA) {
356 tf->flags |= ATA_TFLAG_LBA;
357
358 if (lba_28_ok(block, n_block)) {
359 /* use LBA28 */
360 tf->device |= (block >> 24) & 0xf;
361 } else if (lba_48_ok(block, n_block)) {
362 if (!(dev->flags & ATA_DFLAG_LBA48))
363 return -ERANGE;
364
365 /* use LBA48 */
366 tf->flags |= ATA_TFLAG_LBA48;
367
368 tf->hob_nsect = (n_block >> 8) & 0xff;
369
370 tf->hob_lbah = (block >> 40) & 0xff;
371 tf->hob_lbam = (block >> 32) & 0xff;
372 tf->hob_lbal = (block >> 24) & 0xff;
373 } else
374 /* request too large even for LBA48 */
375 return -ERANGE;
376
377 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
378 return -EINVAL;
379
380 tf->nsect = n_block & 0xff;
381
382 tf->lbah = (block >> 16) & 0xff;
383 tf->lbam = (block >> 8) & 0xff;
384 tf->lbal = block & 0xff;
385
386 tf->device |= ATA_LBA;
387 } else {
388 /* CHS */
389 u32 sect, head, cyl, track;
390
391 /* The request -may- be too large for CHS addressing. */
392 if (!lba_28_ok(block, n_block))
393 return -ERANGE;
394
395 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
396 return -EINVAL;
397
398 /* Convert LBA to CHS */
399 track = (u32)block / dev->sectors;
400 cyl = track / dev->heads;
401 head = track % dev->heads;
402 sect = (u32)block % dev->sectors + 1;
403
404 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
405 (u32)block, track, cyl, head, sect);
406
407 /* Check whether the converted CHS can fit.
408 Cylinder: 0-65535
409 Head: 0-15
410 Sector: 1-255*/
411 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
412 return -ERANGE;
413
414 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
415 tf->lbal = sect;
416 tf->lbam = cyl;
417 tf->lbah = cyl >> 8;
418 tf->device |= head;
419 }
420
421 return 0;
422}
423
cb95d562
TH
424/**
425 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
426 * @pio_mask: pio_mask
427 * @mwdma_mask: mwdma_mask
428 * @udma_mask: udma_mask
429 *
430 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
431 * unsigned int xfer_mask.
432 *
433 * LOCKING:
434 * None.
435 *
436 * RETURNS:
437 * Packed xfer_mask.
438 */
439static unsigned int ata_pack_xfermask(unsigned int pio_mask,
440 unsigned int mwdma_mask,
441 unsigned int udma_mask)
442{
443 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
444 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
445 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
446}
447
c0489e4e
TH
448/**
449 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
450 * @xfer_mask: xfer_mask to unpack
451 * @pio_mask: resulting pio_mask
452 * @mwdma_mask: resulting mwdma_mask
453 * @udma_mask: resulting udma_mask
454 *
455 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
456 * Any NULL distination masks will be ignored.
457 */
458static void ata_unpack_xfermask(unsigned int xfer_mask,
459 unsigned int *pio_mask,
460 unsigned int *mwdma_mask,
461 unsigned int *udma_mask)
462{
463 if (pio_mask)
464 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
465 if (mwdma_mask)
466 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
467 if (udma_mask)
468 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
469}
470
cb95d562 471static const struct ata_xfer_ent {
be9a50c8 472 int shift, bits;
cb95d562
TH
473 u8 base;
474} ata_xfer_tbl[] = {
475 { ATA_SHIFT_PIO, ATA_BITS_PIO, XFER_PIO_0 },
476 { ATA_SHIFT_MWDMA, ATA_BITS_MWDMA, XFER_MW_DMA_0 },
477 { ATA_SHIFT_UDMA, ATA_BITS_UDMA, XFER_UDMA_0 },
478 { -1, },
479};
480
481/**
482 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
483 * @xfer_mask: xfer_mask of interest
484 *
485 * Return matching XFER_* value for @xfer_mask. Only the highest
486 * bit of @xfer_mask is considered.
487 *
488 * LOCKING:
489 * None.
490 *
491 * RETURNS:
492 * Matching XFER_* value, 0 if no match found.
493 */
494static u8 ata_xfer_mask2mode(unsigned int xfer_mask)
495{
496 int highbit = fls(xfer_mask) - 1;
497 const struct ata_xfer_ent *ent;
498
499 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
500 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
501 return ent->base + highbit - ent->shift;
502 return 0;
503}
504
505/**
506 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
507 * @xfer_mode: XFER_* of interest
508 *
509 * Return matching xfer_mask for @xfer_mode.
510 *
511 * LOCKING:
512 * None.
513 *
514 * RETURNS:
515 * Matching xfer_mask, 0 if no match found.
516 */
517static unsigned int ata_xfer_mode2mask(u8 xfer_mode)
518{
519 const struct ata_xfer_ent *ent;
520
521 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
522 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
523 return 1 << (ent->shift + xfer_mode - ent->base);
524 return 0;
525}
526
527/**
528 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
529 * @xfer_mode: XFER_* of interest
530 *
531 * Return matching xfer_shift for @xfer_mode.
532 *
533 * LOCKING:
534 * None.
535 *
536 * RETURNS:
537 * Matching xfer_shift, -1 if no match found.
538 */
539static int ata_xfer_mode2shift(unsigned int xfer_mode)
540{
541 const struct ata_xfer_ent *ent;
542
543 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
544 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
545 return ent->shift;
546 return -1;
547}
548
1da177e4 549/**
1da7b0d0
TH
550 * ata_mode_string - convert xfer_mask to string
551 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
552 *
553 * Determine string which represents the highest speed
1da7b0d0 554 * (highest bit in @modemask).
1da177e4
LT
555 *
556 * LOCKING:
557 * None.
558 *
559 * RETURNS:
560 * Constant C string representing highest speed listed in
1da7b0d0 561 * @mode_mask, or the constant C string "<n/a>".
1da177e4 562 */
1da7b0d0 563static const char *ata_mode_string(unsigned int xfer_mask)
1da177e4 564{
75f554bc
TH
565 static const char * const xfer_mode_str[] = {
566 "PIO0",
567 "PIO1",
568 "PIO2",
569 "PIO3",
570 "PIO4",
b352e57d
AC
571 "PIO5",
572 "PIO6",
75f554bc
TH
573 "MWDMA0",
574 "MWDMA1",
575 "MWDMA2",
b352e57d
AC
576 "MWDMA3",
577 "MWDMA4",
75f554bc
TH
578 "UDMA/16",
579 "UDMA/25",
580 "UDMA/33",
581 "UDMA/44",
582 "UDMA/66",
583 "UDMA/100",
584 "UDMA/133",
585 "UDMA7",
586 };
1da7b0d0 587 int highbit;
1da177e4 588
1da7b0d0
TH
589 highbit = fls(xfer_mask) - 1;
590 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
591 return xfer_mode_str[highbit];
1da177e4 592 return "<n/a>";
1da177e4
LT
593}
594
4c360c81
TH
595static const char *sata_spd_string(unsigned int spd)
596{
597 static const char * const spd_str[] = {
598 "1.5 Gbps",
599 "3.0 Gbps",
600 };
601
602 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
603 return "<unknown>";
604 return spd_str[spd - 1];
605}
606
3373efd8 607void ata_dev_disable(struct ata_device *dev)
0b8efb0a 608{
09d7f9b0 609 if (ata_dev_enabled(dev)) {
9af5c9c9 610 if (ata_msg_drv(dev->link->ap))
09d7f9b0 611 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
4ae72a1e
TH
612 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
613 ATA_DNXFER_QUIET);
0b8efb0a
TH
614 dev->class++;
615 }
616}
617
1da177e4 618/**
0d5ff566 619 * ata_devchk - PATA device presence detection
1da177e4
LT
620 * @ap: ATA channel to examine
621 * @device: Device to examine (starting at zero)
622 *
623 * This technique was originally described in
624 * Hale Landis's ATADRVR (www.ata-atapi.com), and
625 * later found its way into the ATA/ATAPI spec.
626 *
627 * Write a pattern to the ATA shadow registers,
628 * and if a device is present, it will respond by
629 * correctly storing and echoing back the
630 * ATA shadow register contents.
631 *
632 * LOCKING:
633 * caller.
634 */
635
0d5ff566 636static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1da177e4
LT
637{
638 struct ata_ioports *ioaddr = &ap->ioaddr;
639 u8 nsect, lbal;
640
641 ap->ops->dev_select(ap, device);
642
0d5ff566
TH
643 iowrite8(0x55, ioaddr->nsect_addr);
644 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 645
0d5ff566
TH
646 iowrite8(0xaa, ioaddr->nsect_addr);
647 iowrite8(0x55, ioaddr->lbal_addr);
1da177e4 648
0d5ff566
TH
649 iowrite8(0x55, ioaddr->nsect_addr);
650 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 651
0d5ff566
TH
652 nsect = ioread8(ioaddr->nsect_addr);
653 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
654
655 if ((nsect == 0x55) && (lbal == 0xaa))
656 return 1; /* we found a device */
657
658 return 0; /* nothing found */
659}
660
1da177e4
LT
661/**
662 * ata_dev_classify - determine device type based on ATA-spec signature
663 * @tf: ATA taskfile register set for device to be identified
664 *
665 * Determine from taskfile register contents whether a device is
666 * ATA or ATAPI, as per "Signature and persistence" section
667 * of ATA/PI spec (volume 1, sect 5.14).
668 *
669 * LOCKING:
670 * None.
671 *
672 * RETURNS:
673 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
674 * the event of failure.
675 */
676
057ace5e 677unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
678{
679 /* Apple's open source Darwin code hints that some devices only
680 * put a proper signature into the LBA mid/high registers,
681 * So, we only check those. It's sufficient for uniqueness.
682 */
683
684 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
685 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
686 DPRINTK("found ATA device by sig\n");
687 return ATA_DEV_ATA;
688 }
689
690 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
691 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
692 DPRINTK("found ATAPI device by sig\n");
693 return ATA_DEV_ATAPI;
694 }
695
696 DPRINTK("unknown device\n");
697 return ATA_DEV_UNKNOWN;
698}
699
700/**
701 * ata_dev_try_classify - Parse returned ATA device signature
3f19859e
TH
702 * @dev: ATA device to classify (starting at zero)
703 * @present: device seems present
b4dc7623 704 * @r_err: Value of error register on completion
1da177e4
LT
705 *
706 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
707 * an ATA/ATAPI-defined set of values is placed in the ATA
708 * shadow registers, indicating the results of device detection
709 * and diagnostics.
710 *
711 * Select the ATA device, and read the values from the ATA shadow
712 * registers. Then parse according to the Error register value,
713 * and the spec-defined values examined by ata_dev_classify().
714 *
715 * LOCKING:
716 * caller.
b4dc7623
TH
717 *
718 * RETURNS:
719 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4 720 */
3f19859e
TH
721unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
722 u8 *r_err)
1da177e4 723{
3f19859e 724 struct ata_port *ap = dev->link->ap;
1da177e4
LT
725 struct ata_taskfile tf;
726 unsigned int class;
727 u8 err;
728
3f19859e 729 ap->ops->dev_select(ap, dev->devno);
1da177e4
LT
730
731 memset(&tf, 0, sizeof(tf));
732
1da177e4 733 ap->ops->tf_read(ap, &tf);
0169e284 734 err = tf.feature;
b4dc7623
TH
735 if (r_err)
736 *r_err = err;
1da177e4 737
93590859 738 /* see if device passed diags: if master then continue and warn later */
3f19859e 739 if (err == 0 && dev->devno == 0)
93590859 740 /* diagnostic fail : do nothing _YET_ */
3f19859e 741 dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
93590859 742 else if (err == 1)
1da177e4 743 /* do nothing */ ;
3f19859e 744 else if ((dev->devno == 0) && (err == 0x81))
1da177e4
LT
745 /* do nothing */ ;
746 else
b4dc7623 747 return ATA_DEV_NONE;
1da177e4 748
b4dc7623 749 /* determine if device is ATA or ATAPI */
1da177e4 750 class = ata_dev_classify(&tf);
b4dc7623 751
d7fbee05
TH
752 if (class == ATA_DEV_UNKNOWN) {
753 /* If the device failed diagnostic, it's likely to
754 * have reported incorrect device signature too.
755 * Assume ATA device if the device seems present but
756 * device signature is invalid with diagnostic
757 * failure.
758 */
759 if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
760 class = ATA_DEV_ATA;
761 else
762 class = ATA_DEV_NONE;
763 } else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
764 class = ATA_DEV_NONE;
765
b4dc7623 766 return class;
1da177e4
LT
767}
768
769/**
6a62a04d 770 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
771 * @id: IDENTIFY DEVICE results we will examine
772 * @s: string into which data is output
773 * @ofs: offset into identify device page
774 * @len: length of string to return. must be an even number.
775 *
776 * The strings in the IDENTIFY DEVICE page are broken up into
777 * 16-bit chunks. Run through the string, and output each
778 * 8-bit chunk linearly, regardless of platform.
779 *
780 * LOCKING:
781 * caller.
782 */
783
6a62a04d
TH
784void ata_id_string(const u16 *id, unsigned char *s,
785 unsigned int ofs, unsigned int len)
1da177e4
LT
786{
787 unsigned int c;
788
789 while (len > 0) {
790 c = id[ofs] >> 8;
791 *s = c;
792 s++;
793
794 c = id[ofs] & 0xff;
795 *s = c;
796 s++;
797
798 ofs++;
799 len -= 2;
800 }
801}
802
0e949ff3 803/**
6a62a04d 804 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
805 * @id: IDENTIFY DEVICE results we will examine
806 * @s: string into which data is output
807 * @ofs: offset into identify device page
808 * @len: length of string to return. must be an odd number.
809 *
6a62a04d 810 * This function is identical to ata_id_string except that it
0e949ff3
TH
811 * trims trailing spaces and terminates the resulting string with
812 * null. @len must be actual maximum length (even number) + 1.
813 *
814 * LOCKING:
815 * caller.
816 */
6a62a04d
TH
817void ata_id_c_string(const u16 *id, unsigned char *s,
818 unsigned int ofs, unsigned int len)
0e949ff3
TH
819{
820 unsigned char *p;
821
822 WARN_ON(!(len & 1));
823
6a62a04d 824 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
825
826 p = s + strnlen(s, len - 1);
827 while (p > s && p[-1] == ' ')
828 p--;
829 *p = '\0';
830}
0baab86b 831
db6f8759
TH
832static u64 ata_id_n_sectors(const u16 *id)
833{
834 if (ata_id_has_lba(id)) {
835 if (ata_id_has_lba48(id))
836 return ata_id_u64(id, 100);
837 else
838 return ata_id_u32(id, 60);
839 } else {
840 if (ata_id_current_chs_valid(id))
841 return ata_id_u32(id, 57);
842 else
843 return id[1] * id[3] * id[6];
844 }
845}
846
1e999736
AC
847static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
848{
849 u64 sectors = 0;
850
851 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
852 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
853 sectors |= (tf->hob_lbal & 0xff) << 24;
854 sectors |= (tf->lbah & 0xff) << 16;
855 sectors |= (tf->lbam & 0xff) << 8;
856 sectors |= (tf->lbal & 0xff);
857
858 return ++sectors;
859}
860
861static u64 ata_tf_to_lba(struct ata_taskfile *tf)
862{
863 u64 sectors = 0;
864
865 sectors |= (tf->device & 0x0f) << 24;
866 sectors |= (tf->lbah & 0xff) << 16;
867 sectors |= (tf->lbam & 0xff) << 8;
868 sectors |= (tf->lbal & 0xff);
869
870 return ++sectors;
871}
872
873/**
c728a914
TH
874 * ata_read_native_max_address - Read native max address
875 * @dev: target device
876 * @max_sectors: out parameter for the result native max address
1e999736 877 *
c728a914
TH
878 * Perform an LBA48 or LBA28 native size query upon the device in
879 * question.
1e999736 880 *
c728a914
TH
881 * RETURNS:
882 * 0 on success, -EACCES if command is aborted by the drive.
883 * -EIO on other errors.
1e999736 884 */
c728a914 885static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1e999736 886{
c728a914 887 unsigned int err_mask;
1e999736 888 struct ata_taskfile tf;
c728a914 889 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
890
891 ata_tf_init(dev, &tf);
892
c728a914 893 /* always clear all address registers */
1e999736 894 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1e999736 895
c728a914
TH
896 if (lba48) {
897 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
898 tf.flags |= ATA_TFLAG_LBA48;
899 } else
900 tf.command = ATA_CMD_READ_NATIVE_MAX;
1e999736 901
1e999736 902 tf.protocol |= ATA_PROT_NODATA;
c728a914
TH
903 tf.device |= ATA_LBA;
904
905 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
906 if (err_mask) {
907 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
908 "max address (err_mask=0x%x)\n", err_mask);
909 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
910 return -EACCES;
911 return -EIO;
912 }
1e999736 913
c728a914
TH
914 if (lba48)
915 *max_sectors = ata_tf_to_lba48(&tf);
916 else
917 *max_sectors = ata_tf_to_lba(&tf);
93328e11
AC
918 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
919 (*max_sectors)--;
c728a914 920 return 0;
1e999736
AC
921}
922
923/**
c728a914
TH
924 * ata_set_max_sectors - Set max sectors
925 * @dev: target device
6b38d1d1 926 * @new_sectors: new max sectors value to set for the device
1e999736 927 *
c728a914
TH
928 * Set max sectors of @dev to @new_sectors.
929 *
930 * RETURNS:
931 * 0 on success, -EACCES if command is aborted or denied (due to
932 * previous non-volatile SET_MAX) by the drive. -EIO on other
933 * errors.
1e999736 934 */
05027adc 935static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1e999736 936{
c728a914 937 unsigned int err_mask;
1e999736 938 struct ata_taskfile tf;
c728a914 939 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
940
941 new_sectors--;
942
943 ata_tf_init(dev, &tf);
944
1e999736 945 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
c728a914
TH
946
947 if (lba48) {
948 tf.command = ATA_CMD_SET_MAX_EXT;
949 tf.flags |= ATA_TFLAG_LBA48;
950
951 tf.hob_lbal = (new_sectors >> 24) & 0xff;
952 tf.hob_lbam = (new_sectors >> 32) & 0xff;
953 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1e582ba4 954 } else {
c728a914
TH
955 tf.command = ATA_CMD_SET_MAX;
956
1e582ba4
TH
957 tf.device |= (new_sectors >> 24) & 0xf;
958 }
959
1e999736 960 tf.protocol |= ATA_PROT_NODATA;
c728a914 961 tf.device |= ATA_LBA;
1e999736
AC
962
963 tf.lbal = (new_sectors >> 0) & 0xff;
964 tf.lbam = (new_sectors >> 8) & 0xff;
965 tf.lbah = (new_sectors >> 16) & 0xff;
1e999736 966
c728a914
TH
967 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
968 if (err_mask) {
969 ata_dev_printk(dev, KERN_WARNING, "failed to set "
970 "max address (err_mask=0x%x)\n", err_mask);
971 if (err_mask == AC_ERR_DEV &&
972 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
973 return -EACCES;
974 return -EIO;
975 }
976
c728a914 977 return 0;
1e999736
AC
978}
979
980/**
981 * ata_hpa_resize - Resize a device with an HPA set
982 * @dev: Device to resize
983 *
984 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
985 * it if required to the full size of the media. The caller must check
986 * the drive has the HPA feature set enabled.
05027adc
TH
987 *
988 * RETURNS:
989 * 0 on success, -errno on failure.
1e999736 990 */
05027adc 991static int ata_hpa_resize(struct ata_device *dev)
1e999736 992{
05027adc
TH
993 struct ata_eh_context *ehc = &dev->link->eh_context;
994 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
995 u64 sectors = ata_id_n_sectors(dev->id);
996 u64 native_sectors;
c728a914 997 int rc;
a617c09f 998
05027adc
TH
999 /* do we need to do it? */
1000 if (dev->class != ATA_DEV_ATA ||
1001 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1002 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
c728a914 1003 return 0;
1e999736 1004
05027adc
TH
1005 /* read native max address */
1006 rc = ata_read_native_max_address(dev, &native_sectors);
1007 if (rc) {
1008 /* If HPA isn't going to be unlocked, skip HPA
1009 * resizing from the next try.
1010 */
1011 if (!ata_ignore_hpa) {
1012 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1013 "broken, will skip HPA handling\n");
1014 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1015
1016 /* we can continue if device aborted the command */
1017 if (rc == -EACCES)
1018 rc = 0;
1e999736 1019 }
37301a55 1020
05027adc
TH
1021 return rc;
1022 }
1023
1024 /* nothing to do? */
1025 if (native_sectors <= sectors || !ata_ignore_hpa) {
1026 if (!print_info || native_sectors == sectors)
1027 return 0;
1028
1029 if (native_sectors > sectors)
1030 ata_dev_printk(dev, KERN_INFO,
1031 "HPA detected: current %llu, native %llu\n",
1032 (unsigned long long)sectors,
1033 (unsigned long long)native_sectors);
1034 else if (native_sectors < sectors)
1035 ata_dev_printk(dev, KERN_WARNING,
1036 "native sectors (%llu) is smaller than "
1037 "sectors (%llu)\n",
1038 (unsigned long long)native_sectors,
1039 (unsigned long long)sectors);
1040 return 0;
1041 }
1042
1043 /* let's unlock HPA */
1044 rc = ata_set_max_sectors(dev, native_sectors);
1045 if (rc == -EACCES) {
1046 /* if device aborted the command, skip HPA resizing */
1047 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1048 "(%llu -> %llu), skipping HPA handling\n",
1049 (unsigned long long)sectors,
1050 (unsigned long long)native_sectors);
1051 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1052 return 0;
1053 } else if (rc)
1054 return rc;
1055
1056 /* re-read IDENTIFY data */
1057 rc = ata_dev_reread_id(dev, 0);
1058 if (rc) {
1059 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1060 "data after HPA resizing\n");
1061 return rc;
1062 }
1063
1064 if (print_info) {
1065 u64 new_sectors = ata_id_n_sectors(dev->id);
1066 ata_dev_printk(dev, KERN_INFO,
1067 "HPA unlocked: %llu -> %llu, native %llu\n",
1068 (unsigned long long)sectors,
1069 (unsigned long long)new_sectors,
1070 (unsigned long long)native_sectors);
1071 }
1072
1073 return 0;
1e999736
AC
1074}
1075
10305f0f
AC
1076/**
1077 * ata_id_to_dma_mode - Identify DMA mode from id block
1078 * @dev: device to identify
cc261267 1079 * @unknown: mode to assume if we cannot tell
10305f0f
AC
1080 *
1081 * Set up the timing values for the device based upon the identify
1082 * reported values for the DMA mode. This function is used by drivers
1083 * which rely upon firmware configured modes, but wish to report the
1084 * mode correctly when possible.
1085 *
1086 * In addition we emit similarly formatted messages to the default
1087 * ata_dev_set_mode handler, in order to provide consistency of
1088 * presentation.
1089 */
1090
1091void ata_id_to_dma_mode(struct ata_device *dev, u8 unknown)
1092{
1093 unsigned int mask;
1094 u8 mode;
1095
1096 /* Pack the DMA modes */
1097 mask = ((dev->id[63] >> 8) << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA;
1098 if (dev->id[53] & 0x04)
1099 mask |= ((dev->id[88] >> 8) << ATA_SHIFT_UDMA) & ATA_MASK_UDMA;
1100
1101 /* Select the mode in use */
1102 mode = ata_xfer_mask2mode(mask);
1103
1104 if (mode != 0) {
1105 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
1106 ata_mode_string(mask));
1107 } else {
1108 /* SWDMA perhaps ? */
1109 mode = unknown;
1110 ata_dev_printk(dev, KERN_INFO, "configured for DMA\n");
1111 }
1112
1113 /* Configure the device reporting */
1114 dev->xfer_mode = mode;
1115 dev->xfer_shift = ata_xfer_mode2shift(mode);
1116}
1117
0baab86b
EF
1118/**
1119 * ata_noop_dev_select - Select device 0/1 on ATA bus
1120 * @ap: ATA channel to manipulate
1121 * @device: ATA device (numbered from zero) to select
1122 *
1123 * This function performs no actual function.
1124 *
1125 * May be used as the dev_select() entry in ata_port_operations.
1126 *
1127 * LOCKING:
1128 * caller.
1129 */
1da177e4
LT
1130void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
1131{
1132}
1133
0baab86b 1134
1da177e4
LT
1135/**
1136 * ata_std_dev_select - Select device 0/1 on ATA bus
1137 * @ap: ATA channel to manipulate
1138 * @device: ATA device (numbered from zero) to select
1139 *
1140 * Use the method defined in the ATA specification to
1141 * make either device 0, or device 1, active on the
0baab86b
EF
1142 * ATA channel. Works with both PIO and MMIO.
1143 *
1144 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
1145 *
1146 * LOCKING:
1147 * caller.
1148 */
1149
1150void ata_std_dev_select (struct ata_port *ap, unsigned int device)
1151{
1152 u8 tmp;
1153
1154 if (device == 0)
1155 tmp = ATA_DEVICE_OBS;
1156 else
1157 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1158
0d5ff566 1159 iowrite8(tmp, ap->ioaddr.device_addr);
1da177e4
LT
1160 ata_pause(ap); /* needed; also flushes, for mmio */
1161}
1162
1163/**
1164 * ata_dev_select - Select device 0/1 on ATA bus
1165 * @ap: ATA channel to manipulate
1166 * @device: ATA device (numbered from zero) to select
1167 * @wait: non-zero to wait for Status register BSY bit to clear
1168 * @can_sleep: non-zero if context allows sleeping
1169 *
1170 * Use the method defined in the ATA specification to
1171 * make either device 0, or device 1, active on the
1172 * ATA channel.
1173 *
1174 * This is a high-level version of ata_std_dev_select(),
1175 * which additionally provides the services of inserting
1176 * the proper pauses and status polling, where needed.
1177 *
1178 * LOCKING:
1179 * caller.
1180 */
1181
1182void ata_dev_select(struct ata_port *ap, unsigned int device,
1183 unsigned int wait, unsigned int can_sleep)
1184{
88574551 1185 if (ata_msg_probe(ap))
44877b4e
TH
1186 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
1187 "device %u, wait %u\n", device, wait);
1da177e4
LT
1188
1189 if (wait)
1190 ata_wait_idle(ap);
1191
1192 ap->ops->dev_select(ap, device);
1193
1194 if (wait) {
9af5c9c9 1195 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
1da177e4
LT
1196 msleep(150);
1197 ata_wait_idle(ap);
1198 }
1199}
1200
1201/**
1202 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 1203 * @id: IDENTIFY DEVICE page to dump
1da177e4 1204 *
0bd3300a
TH
1205 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1206 * page.
1da177e4
LT
1207 *
1208 * LOCKING:
1209 * caller.
1210 */
1211
0bd3300a 1212static inline void ata_dump_id(const u16 *id)
1da177e4
LT
1213{
1214 DPRINTK("49==0x%04x "
1215 "53==0x%04x "
1216 "63==0x%04x "
1217 "64==0x%04x "
1218 "75==0x%04x \n",
0bd3300a
TH
1219 id[49],
1220 id[53],
1221 id[63],
1222 id[64],
1223 id[75]);
1da177e4
LT
1224 DPRINTK("80==0x%04x "
1225 "81==0x%04x "
1226 "82==0x%04x "
1227 "83==0x%04x "
1228 "84==0x%04x \n",
0bd3300a
TH
1229 id[80],
1230 id[81],
1231 id[82],
1232 id[83],
1233 id[84]);
1da177e4
LT
1234 DPRINTK("88==0x%04x "
1235 "93==0x%04x\n",
0bd3300a
TH
1236 id[88],
1237 id[93]);
1da177e4
LT
1238}
1239
cb95d562
TH
1240/**
1241 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1242 * @id: IDENTIFY data to compute xfer mask from
1243 *
1244 * Compute the xfermask for this device. This is not as trivial
1245 * as it seems if we must consider early devices correctly.
1246 *
1247 * FIXME: pre IDE drive timing (do we care ?).
1248 *
1249 * LOCKING:
1250 * None.
1251 *
1252 * RETURNS:
1253 * Computed xfermask
1254 */
1255static unsigned int ata_id_xfermask(const u16 *id)
1256{
1257 unsigned int pio_mask, mwdma_mask, udma_mask;
1258
1259 /* Usual case. Word 53 indicates word 64 is valid */
1260 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1261 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1262 pio_mask <<= 3;
1263 pio_mask |= 0x7;
1264 } else {
1265 /* If word 64 isn't valid then Word 51 high byte holds
1266 * the PIO timing number for the maximum. Turn it into
1267 * a mask.
1268 */
7a0f1c8a 1269 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb
AC
1270 if (mode < 5) /* Valid PIO range */
1271 pio_mask = (2 << mode) - 1;
1272 else
1273 pio_mask = 1;
cb95d562
TH
1274
1275 /* But wait.. there's more. Design your standards by
1276 * committee and you too can get a free iordy field to
1277 * process. However its the speeds not the modes that
1278 * are supported... Note drivers using the timing API
1279 * will get this right anyway
1280 */
1281 }
1282
1283 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1284
b352e57d
AC
1285 if (ata_id_is_cfa(id)) {
1286 /*
1287 * Process compact flash extended modes
1288 */
1289 int pio = id[163] & 0x7;
1290 int dma = (id[163] >> 3) & 7;
1291
1292 if (pio)
1293 pio_mask |= (1 << 5);
1294 if (pio > 1)
1295 pio_mask |= (1 << 6);
1296 if (dma)
1297 mwdma_mask |= (1 << 3);
1298 if (dma > 1)
1299 mwdma_mask |= (1 << 4);
1300 }
1301
fb21f0d0
TH
1302 udma_mask = 0;
1303 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1304 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1305
1306 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1307}
1308
86e45b6b
TH
1309/**
1310 * ata_port_queue_task - Queue port_task
1311 * @ap: The ata_port to queue port_task for
e2a7f77a 1312 * @fn: workqueue function to be scheduled
65f27f38 1313 * @data: data for @fn to use
e2a7f77a 1314 * @delay: delay time for workqueue function
86e45b6b
TH
1315 *
1316 * Schedule @fn(@data) for execution after @delay jiffies using
1317 * port_task. There is one port_task per port and it's the
1318 * user(low level driver)'s responsibility to make sure that only
1319 * one task is active at any given time.
1320 *
1321 * libata core layer takes care of synchronization between
1322 * port_task and EH. ata_port_queue_task() may be ignored for EH
1323 * synchronization.
1324 *
1325 * LOCKING:
1326 * Inherited from caller.
1327 */
65f27f38 1328void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
86e45b6b
TH
1329 unsigned long delay)
1330{
65f27f38
DH
1331 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1332 ap->port_task_data = data;
86e45b6b 1333
45a66c1c
ON
1334 /* may fail if ata_port_flush_task() in progress */
1335 queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1336}
1337
1338/**
1339 * ata_port_flush_task - Flush port_task
1340 * @ap: The ata_port to flush port_task for
1341 *
1342 * After this function completes, port_task is guranteed not to
1343 * be running or scheduled.
1344 *
1345 * LOCKING:
1346 * Kernel thread context (may sleep)
1347 */
1348void ata_port_flush_task(struct ata_port *ap)
1349{
86e45b6b
TH
1350 DPRINTK("ENTER\n");
1351
45a66c1c 1352 cancel_rearming_delayed_work(&ap->port_task);
86e45b6b 1353
0dd4b21f
BP
1354 if (ata_msg_ctl(ap))
1355 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
86e45b6b
TH
1356}
1357
7102d230 1358static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1359{
77853bf2 1360 struct completion *waiting = qc->private_data;
a2a7a662 1361
a2a7a662 1362 complete(waiting);
a2a7a662
TH
1363}
1364
1365/**
2432697b 1366 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1367 * @dev: Device to which the command is sent
1368 * @tf: Taskfile registers for the command and the result
d69cf37d 1369 * @cdb: CDB for packet command
a2a7a662 1370 * @dma_dir: Data tranfer direction of the command
2432697b
TH
1371 * @sg: sg list for the data buffer of the command
1372 * @n_elem: Number of sg entries
a2a7a662
TH
1373 *
1374 * Executes libata internal command with timeout. @tf contains
1375 * command on entry and result on return. Timeout and error
1376 * conditions are reported via return value. No recovery action
1377 * is taken after a command times out. It's caller's duty to
1378 * clean up after timeout.
1379 *
1380 * LOCKING:
1381 * None. Should be called with kernel context, might sleep.
551e8889
TH
1382 *
1383 * RETURNS:
1384 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1385 */
2432697b
TH
1386unsigned ata_exec_internal_sg(struct ata_device *dev,
1387 struct ata_taskfile *tf, const u8 *cdb,
1388 int dma_dir, struct scatterlist *sg,
1389 unsigned int n_elem)
a2a7a662 1390{
9af5c9c9
TH
1391 struct ata_link *link = dev->link;
1392 struct ata_port *ap = link->ap;
a2a7a662
TH
1393 u8 command = tf->command;
1394 struct ata_queued_cmd *qc;
2ab7db1f 1395 unsigned int tag, preempted_tag;
dedaf2b0 1396 u32 preempted_sactive, preempted_qc_active;
da917d69 1397 int preempted_nr_active_links;
60be6b9a 1398 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1399 unsigned long flags;
77853bf2 1400 unsigned int err_mask;
d95a717f 1401 int rc;
a2a7a662 1402
ba6a1308 1403 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1404
e3180499 1405 /* no internal command while frozen */
b51e9e5d 1406 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1407 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1408 return AC_ERR_SYSTEM;
1409 }
1410
2ab7db1f 1411 /* initialize internal qc */
a2a7a662 1412
2ab7db1f
TH
1413 /* XXX: Tag 0 is used for drivers with legacy EH as some
1414 * drivers choke if any other tag is given. This breaks
1415 * ata_tag_internal() test for those drivers. Don't use new
1416 * EH stuff without converting to it.
1417 */
1418 if (ap->ops->error_handler)
1419 tag = ATA_TAG_INTERNAL;
1420 else
1421 tag = 0;
1422
6cec4a39 1423 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1424 BUG();
f69499f4 1425 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1426
1427 qc->tag = tag;
1428 qc->scsicmd = NULL;
1429 qc->ap = ap;
1430 qc->dev = dev;
1431 ata_qc_reinit(qc);
1432
9af5c9c9
TH
1433 preempted_tag = link->active_tag;
1434 preempted_sactive = link->sactive;
dedaf2b0 1435 preempted_qc_active = ap->qc_active;
da917d69 1436 preempted_nr_active_links = ap->nr_active_links;
9af5c9c9
TH
1437 link->active_tag = ATA_TAG_POISON;
1438 link->sactive = 0;
dedaf2b0 1439 ap->qc_active = 0;
da917d69 1440 ap->nr_active_links = 0;
2ab7db1f
TH
1441
1442 /* prepare & issue qc */
a2a7a662 1443 qc->tf = *tf;
d69cf37d
TH
1444 if (cdb)
1445 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1446 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1447 qc->dma_dir = dma_dir;
1448 if (dma_dir != DMA_NONE) {
2432697b
TH
1449 unsigned int i, buflen = 0;
1450
1451 for (i = 0; i < n_elem; i++)
1452 buflen += sg[i].length;
1453
1454 ata_sg_init(qc, sg, n_elem);
49c80429 1455 qc->nbytes = buflen;
a2a7a662
TH
1456 }
1457
77853bf2 1458 qc->private_data = &wait;
a2a7a662
TH
1459 qc->complete_fn = ata_qc_complete_internal;
1460
8e0e694a 1461 ata_qc_issue(qc);
a2a7a662 1462
ba6a1308 1463 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1464
a8601e5f 1465 rc = wait_for_completion_timeout(&wait, ata_probe_timeout);
d95a717f
TH
1466
1467 ata_port_flush_task(ap);
41ade50c 1468
d95a717f 1469 if (!rc) {
ba6a1308 1470 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1471
1472 /* We're racing with irq here. If we lose, the
1473 * following test prevents us from completing the qc
d95a717f
TH
1474 * twice. If we win, the port is frozen and will be
1475 * cleaned up by ->post_internal_cmd().
a2a7a662 1476 */
77853bf2 1477 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1478 qc->err_mask |= AC_ERR_TIMEOUT;
1479
1480 if (ap->ops->error_handler)
1481 ata_port_freeze(ap);
1482 else
1483 ata_qc_complete(qc);
f15a1daf 1484
0dd4b21f
BP
1485 if (ata_msg_warn(ap))
1486 ata_dev_printk(dev, KERN_WARNING,
88574551 1487 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1488 }
1489
ba6a1308 1490 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1491 }
1492
d95a717f
TH
1493 /* do post_internal_cmd */
1494 if (ap->ops->post_internal_cmd)
1495 ap->ops->post_internal_cmd(qc);
1496
a51d644a
TH
1497 /* perform minimal error analysis */
1498 if (qc->flags & ATA_QCFLAG_FAILED) {
1499 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1500 qc->err_mask |= AC_ERR_DEV;
1501
1502 if (!qc->err_mask)
1503 qc->err_mask |= AC_ERR_OTHER;
1504
1505 if (qc->err_mask & ~AC_ERR_OTHER)
1506 qc->err_mask &= ~AC_ERR_OTHER;
d95a717f
TH
1507 }
1508
15869303 1509 /* finish up */
ba6a1308 1510 spin_lock_irqsave(ap->lock, flags);
15869303 1511
e61e0672 1512 *tf = qc->result_tf;
77853bf2
TH
1513 err_mask = qc->err_mask;
1514
1515 ata_qc_free(qc);
9af5c9c9
TH
1516 link->active_tag = preempted_tag;
1517 link->sactive = preempted_sactive;
dedaf2b0 1518 ap->qc_active = preempted_qc_active;
da917d69 1519 ap->nr_active_links = preempted_nr_active_links;
77853bf2 1520
1f7dd3e9
TH
1521 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1522 * Until those drivers are fixed, we detect the condition
1523 * here, fail the command with AC_ERR_SYSTEM and reenable the
1524 * port.
1525 *
1526 * Note that this doesn't change any behavior as internal
1527 * command failure results in disabling the device in the
1528 * higher layer for LLDDs without new reset/EH callbacks.
1529 *
1530 * Kill the following code as soon as those drivers are fixed.
1531 */
198e0fed 1532 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1533 err_mask |= AC_ERR_SYSTEM;
1534 ata_port_probe(ap);
1535 }
1536
ba6a1308 1537 spin_unlock_irqrestore(ap->lock, flags);
15869303 1538
77853bf2 1539 return err_mask;
a2a7a662
TH
1540}
1541
2432697b 1542/**
33480a0e 1543 * ata_exec_internal - execute libata internal command
2432697b
TH
1544 * @dev: Device to which the command is sent
1545 * @tf: Taskfile registers for the command and the result
1546 * @cdb: CDB for packet command
1547 * @dma_dir: Data tranfer direction of the command
1548 * @buf: Data buffer of the command
1549 * @buflen: Length of data buffer
1550 *
1551 * Wrapper around ata_exec_internal_sg() which takes simple
1552 * buffer instead of sg list.
1553 *
1554 * LOCKING:
1555 * None. Should be called with kernel context, might sleep.
1556 *
1557 * RETURNS:
1558 * Zero on success, AC_ERR_* mask on failure
1559 */
1560unsigned ata_exec_internal(struct ata_device *dev,
1561 struct ata_taskfile *tf, const u8 *cdb,
1562 int dma_dir, void *buf, unsigned int buflen)
1563{
33480a0e
TH
1564 struct scatterlist *psg = NULL, sg;
1565 unsigned int n_elem = 0;
2432697b 1566
33480a0e
TH
1567 if (dma_dir != DMA_NONE) {
1568 WARN_ON(!buf);
1569 sg_init_one(&sg, buf, buflen);
1570 psg = &sg;
1571 n_elem++;
1572 }
2432697b 1573
33480a0e 1574 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem);
2432697b
TH
1575}
1576
977e6b9f
TH
1577/**
1578 * ata_do_simple_cmd - execute simple internal command
1579 * @dev: Device to which the command is sent
1580 * @cmd: Opcode to execute
1581 *
1582 * Execute a 'simple' command, that only consists of the opcode
1583 * 'cmd' itself, without filling any other registers
1584 *
1585 * LOCKING:
1586 * Kernel thread context (may sleep).
1587 *
1588 * RETURNS:
1589 * Zero on success, AC_ERR_* mask on failure
e58eb583 1590 */
77b08fb5 1591unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1592{
1593 struct ata_taskfile tf;
e58eb583
TH
1594
1595 ata_tf_init(dev, &tf);
1596
1597 tf.command = cmd;
1598 tf.flags |= ATA_TFLAG_DEVICE;
1599 tf.protocol = ATA_PROT_NODATA;
1600
977e6b9f 1601 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
e58eb583
TH
1602}
1603
1bc4ccff
AC
1604/**
1605 * ata_pio_need_iordy - check if iordy needed
1606 * @adev: ATA device
1607 *
1608 * Check if the current speed of the device requires IORDY. Used
1609 * by various controllers for chip configuration.
1610 */
a617c09f 1611
1bc4ccff
AC
1612unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1613{
432729f0
AC
1614 /* Controller doesn't support IORDY. Probably a pointless check
1615 as the caller should know this */
9af5c9c9 1616 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1bc4ccff 1617 return 0;
432729f0
AC
1618 /* PIO3 and higher it is mandatory */
1619 if (adev->pio_mode > XFER_PIO_2)
1620 return 1;
1621 /* We turn it on when possible */
1622 if (ata_id_has_iordy(adev->id))
1bc4ccff 1623 return 1;
432729f0
AC
1624 return 0;
1625}
2e9edbf8 1626
432729f0
AC
1627/**
1628 * ata_pio_mask_no_iordy - Return the non IORDY mask
1629 * @adev: ATA device
1630 *
1631 * Compute the highest mode possible if we are not using iordy. Return
1632 * -1 if no iordy mode is available.
1633 */
a617c09f 1634
432729f0
AC
1635static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1636{
1bc4ccff 1637 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1bc4ccff 1638 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
432729f0 1639 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1bc4ccff
AC
1640 /* Is the speed faster than the drive allows non IORDY ? */
1641 if (pio) {
1642 /* This is cycle times not frequency - watch the logic! */
1643 if (pio > 240) /* PIO2 is 240nS per cycle */
432729f0
AC
1644 return 3 << ATA_SHIFT_PIO;
1645 return 7 << ATA_SHIFT_PIO;
1bc4ccff
AC
1646 }
1647 }
432729f0 1648 return 3 << ATA_SHIFT_PIO;
1bc4ccff
AC
1649}
1650
1da177e4 1651/**
49016aca 1652 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1653 * @dev: target device
1654 * @p_class: pointer to class of the target device (may be changed)
bff04647 1655 * @flags: ATA_READID_* flags
fe635c7e 1656 * @id: buffer to read IDENTIFY data into
1da177e4 1657 *
49016aca
TH
1658 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1659 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1660 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1661 * for pre-ATA4 drives.
1da177e4 1662 *
50a99018
AC
1663 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
1664 * now we abort if we hit that case.
1665 *
1da177e4 1666 * LOCKING:
49016aca
TH
1667 * Kernel thread context (may sleep)
1668 *
1669 * RETURNS:
1670 * 0 on success, -errno otherwise.
1da177e4 1671 */
a9beec95 1672int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1673 unsigned int flags, u16 *id)
1da177e4 1674{
9af5c9c9 1675 struct ata_port *ap = dev->link->ap;
49016aca 1676 unsigned int class = *p_class;
a0123703 1677 struct ata_taskfile tf;
49016aca
TH
1678 unsigned int err_mask = 0;
1679 const char *reason;
54936f8b 1680 int may_fallback = 1, tried_spinup = 0;
49016aca 1681 int rc;
1da177e4 1682
0dd4b21f 1683 if (ata_msg_ctl(ap))
44877b4e 1684 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1685
49016aca 1686 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
49016aca 1687 retry:
3373efd8 1688 ata_tf_init(dev, &tf);
a0123703 1689
49016aca
TH
1690 switch (class) {
1691 case ATA_DEV_ATA:
a0123703 1692 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1693 break;
1694 case ATA_DEV_ATAPI:
a0123703 1695 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1696 break;
1697 default:
1698 rc = -ENODEV;
1699 reason = "unsupported class";
1700 goto err_out;
1da177e4
LT
1701 }
1702
a0123703 1703 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1704
1705 /* Some devices choke if TF registers contain garbage. Make
1706 * sure those are properly initialized.
1707 */
1708 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1709
1710 /* Device presence detection is unreliable on some
1711 * controllers. Always poll IDENTIFY if available.
1712 */
1713 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 1714
3373efd8 1715 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
49016aca 1716 id, sizeof(id[0]) * ATA_ID_WORDS);
a0123703 1717 if (err_mask) {
800b3996 1718 if (err_mask & AC_ERR_NODEV_HINT) {
55a8e2c8 1719 DPRINTK("ata%u.%d: NODEV after polling detection\n",
44877b4e 1720 ap->print_id, dev->devno);
55a8e2c8
TH
1721 return -ENOENT;
1722 }
1723
54936f8b
TH
1724 /* Device or controller might have reported the wrong
1725 * device class. Give a shot at the other IDENTIFY if
1726 * the current one is aborted by the device.
1727 */
1728 if (may_fallback &&
1729 (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1730 may_fallback = 0;
1731
1732 if (class == ATA_DEV_ATA)
1733 class = ATA_DEV_ATAPI;
1734 else
1735 class = ATA_DEV_ATA;
1736 goto retry;
1737 }
1738
49016aca
TH
1739 rc = -EIO;
1740 reason = "I/O error";
1da177e4
LT
1741 goto err_out;
1742 }
1743
54936f8b
TH
1744 /* Falling back doesn't make sense if ID data was read
1745 * successfully at least once.
1746 */
1747 may_fallback = 0;
1748
49016aca 1749 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1750
49016aca 1751 /* sanity check */
a4f5749b 1752 rc = -EINVAL;
6070068b 1753 reason = "device reports invalid type";
a4f5749b
TH
1754
1755 if (class == ATA_DEV_ATA) {
1756 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1757 goto err_out;
1758 } else {
1759 if (ata_id_is_ata(id))
1760 goto err_out;
49016aca
TH
1761 }
1762
169439c2
ML
1763 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1764 tried_spinup = 1;
1765 /*
1766 * Drive powered-up in standby mode, and requires a specific
1767 * SET_FEATURES spin-up subcommand before it will accept
1768 * anything other than the original IDENTIFY command.
1769 */
1770 ata_tf_init(dev, &tf);
1771 tf.command = ATA_CMD_SET_FEATURES;
1772 tf.feature = SETFEATURES_SPINUP;
1773 tf.protocol = ATA_PROT_NODATA;
1774 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1775 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
fb0582f9 1776 if (err_mask && id[2] != 0x738c) {
169439c2
ML
1777 rc = -EIO;
1778 reason = "SPINUP failed";
1779 goto err_out;
1780 }
1781 /*
1782 * If the drive initially returned incomplete IDENTIFY info,
1783 * we now must reissue the IDENTIFY command.
1784 */
1785 if (id[2] == 0x37c8)
1786 goto retry;
1787 }
1788
bff04647 1789 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
1790 /*
1791 * The exact sequence expected by certain pre-ATA4 drives is:
1792 * SRST RESET
50a99018
AC
1793 * IDENTIFY (optional in early ATA)
1794 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
49016aca
TH
1795 * anything else..
1796 * Some drives were very specific about that exact sequence.
50a99018
AC
1797 *
1798 * Note that ATA4 says lba is mandatory so the second check
1799 * shoud never trigger.
49016aca
TH
1800 */
1801 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 1802 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
1803 if (err_mask) {
1804 rc = -EIO;
1805 reason = "INIT_DEV_PARAMS failed";
1806 goto err_out;
1807 }
1808
1809 /* current CHS translation info (id[53-58]) might be
1810 * changed. reread the identify device info.
1811 */
bff04647 1812 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
1813 goto retry;
1814 }
1815 }
1816
1817 *p_class = class;
fe635c7e 1818
49016aca
TH
1819 return 0;
1820
1821 err_out:
88574551 1822 if (ata_msg_warn(ap))
0dd4b21f 1823 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 1824 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
1825 return rc;
1826}
1827
3373efd8 1828static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 1829{
9af5c9c9
TH
1830 struct ata_port *ap = dev->link->ap;
1831 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
1832}
1833
a6e6ce8e
TH
1834static void ata_dev_config_ncq(struct ata_device *dev,
1835 char *desc, size_t desc_sz)
1836{
9af5c9c9 1837 struct ata_port *ap = dev->link->ap;
a6e6ce8e
TH
1838 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
1839
1840 if (!ata_id_has_ncq(dev->id)) {
1841 desc[0] = '\0';
1842 return;
1843 }
75683fe7 1844 if (dev->horkage & ATA_HORKAGE_NONCQ) {
6919a0a6
AC
1845 snprintf(desc, desc_sz, "NCQ (not used)");
1846 return;
1847 }
a6e6ce8e 1848 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 1849 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
1850 dev->flags |= ATA_DFLAG_NCQ;
1851 }
1852
1853 if (hdepth >= ddepth)
1854 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
1855 else
1856 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
1857}
1858
49016aca 1859/**
ffeae418 1860 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
1861 * @dev: Target device to configure
1862 *
1863 * Configure @dev according to @dev->id. Generic and low-level
1864 * driver specific fixups are also applied.
49016aca
TH
1865 *
1866 * LOCKING:
ffeae418
TH
1867 * Kernel thread context (may sleep)
1868 *
1869 * RETURNS:
1870 * 0 on success, -errno otherwise
49016aca 1871 */
efdaedc4 1872int ata_dev_configure(struct ata_device *dev)
49016aca 1873{
9af5c9c9
TH
1874 struct ata_port *ap = dev->link->ap;
1875 struct ata_eh_context *ehc = &dev->link->eh_context;
6746544c 1876 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1148c3a7 1877 const u16 *id = dev->id;
ff8854b2 1878 unsigned int xfer_mask;
b352e57d 1879 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
1880 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
1881 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 1882 int rc;
49016aca 1883
0dd4b21f 1884 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
44877b4e
TH
1885 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
1886 __FUNCTION__);
ffeae418 1887 return 0;
49016aca
TH
1888 }
1889
0dd4b21f 1890 if (ata_msg_probe(ap))
44877b4e 1891 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1892
75683fe7
TH
1893 /* set horkage */
1894 dev->horkage |= ata_dev_blacklisted(dev);
1895
6746544c
TH
1896 /* let ACPI work its magic */
1897 rc = ata_acpi_on_devcfg(dev);
1898 if (rc)
1899 return rc;
08573a86 1900
05027adc
TH
1901 /* massage HPA, do it early as it might change IDENTIFY data */
1902 rc = ata_hpa_resize(dev);
1903 if (rc)
1904 return rc;
1905
c39f5ebe 1906 /* print device capabilities */
0dd4b21f 1907 if (ata_msg_probe(ap))
88574551
TH
1908 ata_dev_printk(dev, KERN_DEBUG,
1909 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
1910 "85:%04x 86:%04x 87:%04x 88:%04x\n",
0dd4b21f 1911 __FUNCTION__,
f15a1daf
TH
1912 id[49], id[82], id[83], id[84],
1913 id[85], id[86], id[87], id[88]);
c39f5ebe 1914
208a9933 1915 /* initialize to-be-configured parameters */
ea1dd4e1 1916 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
1917 dev->max_sectors = 0;
1918 dev->cdb_len = 0;
1919 dev->n_sectors = 0;
1920 dev->cylinders = 0;
1921 dev->heads = 0;
1922 dev->sectors = 0;
1923
1da177e4
LT
1924 /*
1925 * common ATA, ATAPI feature tests
1926 */
1927
ff8854b2 1928 /* find max transfer mode; for printk only */
1148c3a7 1929 xfer_mask = ata_id_xfermask(id);
1da177e4 1930
0dd4b21f
BP
1931 if (ata_msg_probe(ap))
1932 ata_dump_id(id);
1da177e4 1933
ef143d57
AL
1934 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
1935 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
1936 sizeof(fwrevbuf));
1937
1938 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
1939 sizeof(modelbuf));
1940
1da177e4
LT
1941 /* ATA-specific feature tests */
1942 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
1943 if (ata_id_is_cfa(id)) {
1944 if (id[162] & 1) /* CPRM may make this media unusable */
44877b4e
TH
1945 ata_dev_printk(dev, KERN_WARNING,
1946 "supports DRM functions and may "
1947 "not be fully accessable.\n");
b352e57d
AC
1948 snprintf(revbuf, 7, "CFA");
1949 }
1950 else
1951 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
1952
1148c3a7 1953 dev->n_sectors = ata_id_n_sectors(id);
2940740b 1954
3f64f565
EM
1955 if (dev->id[59] & 0x100)
1956 dev->multi_count = dev->id[59] & 0xff;
1957
1148c3a7 1958 if (ata_id_has_lba(id)) {
4c2d721a 1959 const char *lba_desc;
a6e6ce8e 1960 char ncq_desc[20];
8bf62ece 1961
4c2d721a
TH
1962 lba_desc = "LBA";
1963 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 1964 if (ata_id_has_lba48(id)) {
8bf62ece 1965 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 1966 lba_desc = "LBA48";
6fc49adb
TH
1967
1968 if (dev->n_sectors >= (1UL << 28) &&
1969 ata_id_has_flush_ext(id))
1970 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 1971 }
8bf62ece 1972
a6e6ce8e
TH
1973 /* config NCQ */
1974 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
1975
8bf62ece 1976 /* print device info to dmesg */
3f64f565
EM
1977 if (ata_msg_drv(ap) && print_info) {
1978 ata_dev_printk(dev, KERN_INFO,
1979 "%s: %s, %s, max %s\n",
1980 revbuf, modelbuf, fwrevbuf,
1981 ata_mode_string(xfer_mask));
1982 ata_dev_printk(dev, KERN_INFO,
1983 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 1984 (unsigned long long)dev->n_sectors,
3f64f565
EM
1985 dev->multi_count, lba_desc, ncq_desc);
1986 }
ffeae418 1987 } else {
8bf62ece
AL
1988 /* CHS */
1989
1990 /* Default translation */
1148c3a7
TH
1991 dev->cylinders = id[1];
1992 dev->heads = id[3];
1993 dev->sectors = id[6];
8bf62ece 1994
1148c3a7 1995 if (ata_id_current_chs_valid(id)) {
8bf62ece 1996 /* Current CHS translation is valid. */
1148c3a7
TH
1997 dev->cylinders = id[54];
1998 dev->heads = id[55];
1999 dev->sectors = id[56];
8bf62ece
AL
2000 }
2001
2002 /* print device info to dmesg */
3f64f565 2003 if (ata_msg_drv(ap) && print_info) {
88574551 2004 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2005 "%s: %s, %s, max %s\n",
2006 revbuf, modelbuf, fwrevbuf,
2007 ata_mode_string(xfer_mask));
a84471fe 2008 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2009 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2010 (unsigned long long)dev->n_sectors,
2011 dev->multi_count, dev->cylinders,
2012 dev->heads, dev->sectors);
2013 }
07f6f7d0
AL
2014 }
2015
6e7846e9 2016 dev->cdb_len = 16;
1da177e4
LT
2017 }
2018
2019 /* ATAPI-specific feature tests */
2c13b7ce 2020 else if (dev->class == ATA_DEV_ATAPI) {
854c73a2
TH
2021 const char *cdb_intr_string = "";
2022 const char *atapi_an_string = "";
7d77b247 2023 u32 sntf;
08a556db 2024
1148c3a7 2025 rc = atapi_cdb_len(id);
1da177e4 2026 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 2027 if (ata_msg_warn(ap))
88574551
TH
2028 ata_dev_printk(dev, KERN_WARNING,
2029 "unsupported CDB len\n");
ffeae418 2030 rc = -EINVAL;
1da177e4
LT
2031 goto err_out_nosup;
2032 }
6e7846e9 2033 dev->cdb_len = (unsigned int) rc;
1da177e4 2034
7d77b247
TH
2035 /* Enable ATAPI AN if both the host and device have
2036 * the support. If PMP is attached, SNTF is required
2037 * to enable ATAPI AN to discern between PHY status
2038 * changed notifications and ATAPI ANs.
9f45cbd3 2039 */
7d77b247
TH
2040 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2041 (!ap->nr_pmp_links ||
2042 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
854c73a2
TH
2043 unsigned int err_mask;
2044
9f45cbd3 2045 /* issue SET feature command to turn this on */
854c73a2
TH
2046 err_mask = ata_dev_set_AN(dev, SETFEATURES_SATA_ENABLE);
2047 if (err_mask)
9f45cbd3 2048 ata_dev_printk(dev, KERN_ERR,
854c73a2
TH
2049 "failed to enable ATAPI AN "
2050 "(err_mask=0x%x)\n", err_mask);
2051 else {
9f45cbd3 2052 dev->flags |= ATA_DFLAG_AN;
854c73a2
TH
2053 atapi_an_string = ", ATAPI AN";
2054 }
9f45cbd3
KCA
2055 }
2056
08a556db 2057 if (ata_id_cdb_intr(dev->id)) {
312f7da2 2058 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
2059 cdb_intr_string = ", CDB intr";
2060 }
312f7da2 2061
1da177e4 2062 /* print device info to dmesg */
5afc8142 2063 if (ata_msg_drv(ap) && print_info)
ef143d57 2064 ata_dev_printk(dev, KERN_INFO,
854c73a2 2065 "ATAPI: %s, %s, max %s%s%s\n",
ef143d57 2066 modelbuf, fwrevbuf,
12436c30 2067 ata_mode_string(xfer_mask),
854c73a2 2068 cdb_intr_string, atapi_an_string);
1da177e4
LT
2069 }
2070
914ed354
TH
2071 /* determine max_sectors */
2072 dev->max_sectors = ATA_MAX_SECTORS;
2073 if (dev->flags & ATA_DFLAG_LBA48)
2074 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2075
93590859
AC
2076 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2077 /* Let the user know. We don't want to disallow opens for
2078 rescue purposes, or in case the vendor is just a blithering
2079 idiot */
2080 if (print_info) {
2081 ata_dev_printk(dev, KERN_WARNING,
2082"Drive reports diagnostics failure. This may indicate a drive\n");
2083 ata_dev_printk(dev, KERN_WARNING,
2084"fault or invalid emulation. Contact drive vendor for information.\n");
2085 }
2086 }
2087
4b2f3ede 2088 /* limit bridge transfers to udma5, 200 sectors */
3373efd8 2089 if (ata_dev_knobble(dev)) {
5afc8142 2090 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
2091 ata_dev_printk(dev, KERN_INFO,
2092 "applying bridge limits\n");
5a529139 2093 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
2094 dev->max_sectors = ATA_MAX_SECTORS;
2095 }
2096
75683fe7 2097 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
03ec52de
TH
2098 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2099 dev->max_sectors);
18d6e9d5 2100
4b2f3ede 2101 if (ap->ops->dev_config)
cd0d3bbc 2102 ap->ops->dev_config(dev);
4b2f3ede 2103
0dd4b21f
BP
2104 if (ata_msg_probe(ap))
2105 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2106 __FUNCTION__, ata_chk_status(ap));
ffeae418 2107 return 0;
1da177e4
LT
2108
2109err_out_nosup:
0dd4b21f 2110 if (ata_msg_probe(ap))
88574551
TH
2111 ata_dev_printk(dev, KERN_DEBUG,
2112 "%s: EXIT, err\n", __FUNCTION__);
ffeae418 2113 return rc;
1da177e4
LT
2114}
2115
be0d18df 2116/**
2e41e8e6 2117 * ata_cable_40wire - return 40 wire cable type
be0d18df
AC
2118 * @ap: port
2119 *
2e41e8e6 2120 * Helper method for drivers which want to hardwire 40 wire cable
be0d18df
AC
2121 * detection.
2122 */
2123
2124int ata_cable_40wire(struct ata_port *ap)
2125{
2126 return ATA_CBL_PATA40;
2127}
2128
2129/**
2e41e8e6 2130 * ata_cable_80wire - return 80 wire cable type
be0d18df
AC
2131 * @ap: port
2132 *
2e41e8e6 2133 * Helper method for drivers which want to hardwire 80 wire cable
be0d18df
AC
2134 * detection.
2135 */
2136
2137int ata_cable_80wire(struct ata_port *ap)
2138{
2139 return ATA_CBL_PATA80;
2140}
2141
2142/**
2143 * ata_cable_unknown - return unknown PATA cable.
2144 * @ap: port
2145 *
2146 * Helper method for drivers which have no PATA cable detection.
2147 */
2148
2149int ata_cable_unknown(struct ata_port *ap)
2150{
2151 return ATA_CBL_PATA_UNK;
2152}
2153
2154/**
2155 * ata_cable_sata - return SATA cable type
2156 * @ap: port
2157 *
2158 * Helper method for drivers which have SATA cables
2159 */
2160
2161int ata_cable_sata(struct ata_port *ap)
2162{
2163 return ATA_CBL_SATA;
2164}
2165
1da177e4
LT
2166/**
2167 * ata_bus_probe - Reset and probe ATA bus
2168 * @ap: Bus to probe
2169 *
0cba632b
JG
2170 * Master ATA bus probing function. Initiates a hardware-dependent
2171 * bus reset, then attempts to identify any devices found on
2172 * the bus.
2173 *
1da177e4 2174 * LOCKING:
0cba632b 2175 * PCI/etc. bus probe sem.
1da177e4
LT
2176 *
2177 * RETURNS:
96072e69 2178 * Zero on success, negative errno otherwise.
1da177e4
LT
2179 */
2180
80289167 2181int ata_bus_probe(struct ata_port *ap)
1da177e4 2182{
28ca5c57 2183 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 2184 int tries[ATA_MAX_DEVICES];
f58229f8 2185 int rc;
e82cbdb9 2186 struct ata_device *dev;
1da177e4 2187
28ca5c57 2188 ata_port_probe(ap);
c19ba8af 2189
f58229f8
TH
2190 ata_link_for_each_dev(dev, &ap->link)
2191 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
14d2bac1
TH
2192
2193 retry:
2044470c 2194 /* reset and determine device classes */
52783c5d 2195 ap->ops->phy_reset(ap);
2061a47a 2196
f58229f8 2197 ata_link_for_each_dev(dev, &ap->link) {
52783c5d
TH
2198 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2199 dev->class != ATA_DEV_UNKNOWN)
2200 classes[dev->devno] = dev->class;
2201 else
2202 classes[dev->devno] = ATA_DEV_NONE;
2044470c 2203
52783c5d 2204 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 2205 }
1da177e4 2206
52783c5d 2207 ata_port_probe(ap);
2044470c 2208
b6079ca4
AC
2209 /* after the reset the device state is PIO 0 and the controller
2210 state is undefined. Record the mode */
2211
f58229f8
TH
2212 ata_link_for_each_dev(dev, &ap->link)
2213 dev->pio_mode = XFER_PIO_0;
b6079ca4 2214
f31f0cc2
JG
2215 /* read IDENTIFY page and configure devices. We have to do the identify
2216 specific sequence bass-ackwards so that PDIAG- is released by
2217 the slave device */
2218
f58229f8
TH
2219 ata_link_for_each_dev(dev, &ap->link) {
2220 if (tries[dev->devno])
2221 dev->class = classes[dev->devno];
ffeae418 2222
14d2bac1 2223 if (!ata_dev_enabled(dev))
ffeae418 2224 continue;
ffeae418 2225
bff04647
TH
2226 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2227 dev->id);
14d2bac1
TH
2228 if (rc)
2229 goto fail;
f31f0cc2
JG
2230 }
2231
be0d18df
AC
2232 /* Now ask for the cable type as PDIAG- should have been released */
2233 if (ap->ops->cable_detect)
2234 ap->cbl = ap->ops->cable_detect(ap);
2235
614fe29b
AC
2236 /* We may have SATA bridge glue hiding here irrespective of the
2237 reported cable types and sensed types */
2238 ata_link_for_each_dev(dev, &ap->link) {
2239 if (!ata_dev_enabled(dev))
2240 continue;
2241 /* SATA drives indicate we have a bridge. We don't know which
2242 end of the link the bridge is which is a problem */
2243 if (ata_id_is_sata(dev->id))
2244 ap->cbl = ATA_CBL_SATA;
2245 }
2246
f31f0cc2
JG
2247 /* After the identify sequence we can now set up the devices. We do
2248 this in the normal order so that the user doesn't get confused */
2249
f58229f8 2250 ata_link_for_each_dev(dev, &ap->link) {
f31f0cc2
JG
2251 if (!ata_dev_enabled(dev))
2252 continue;
14d2bac1 2253
9af5c9c9 2254 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
efdaedc4 2255 rc = ata_dev_configure(dev);
9af5c9c9 2256 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
2257 if (rc)
2258 goto fail;
1da177e4
LT
2259 }
2260
e82cbdb9 2261 /* configure transfer mode */
0260731f 2262 rc = ata_set_mode(&ap->link, &dev);
4ae72a1e 2263 if (rc)
51713d35 2264 goto fail;
1da177e4 2265
f58229f8
TH
2266 ata_link_for_each_dev(dev, &ap->link)
2267 if (ata_dev_enabled(dev))
e82cbdb9 2268 return 0;
1da177e4 2269
e82cbdb9
TH
2270 /* no device present, disable port */
2271 ata_port_disable(ap);
96072e69 2272 return -ENODEV;
14d2bac1
TH
2273
2274 fail:
4ae72a1e
TH
2275 tries[dev->devno]--;
2276
14d2bac1
TH
2277 switch (rc) {
2278 case -EINVAL:
4ae72a1e 2279 /* eeek, something went very wrong, give up */
14d2bac1
TH
2280 tries[dev->devno] = 0;
2281 break;
4ae72a1e
TH
2282
2283 case -ENODEV:
2284 /* give it just one more chance */
2285 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 2286 case -EIO:
4ae72a1e
TH
2287 if (tries[dev->devno] == 1) {
2288 /* This is the last chance, better to slow
2289 * down than lose it.
2290 */
936fd732 2291 sata_down_spd_limit(&ap->link);
4ae72a1e
TH
2292 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2293 }
14d2bac1
TH
2294 }
2295
4ae72a1e 2296 if (!tries[dev->devno])
3373efd8 2297 ata_dev_disable(dev);
ec573755 2298
14d2bac1 2299 goto retry;
1da177e4
LT
2300}
2301
2302/**
0cba632b
JG
2303 * ata_port_probe - Mark port as enabled
2304 * @ap: Port for which we indicate enablement
1da177e4 2305 *
0cba632b
JG
2306 * Modify @ap data structure such that the system
2307 * thinks that the entire port is enabled.
2308 *
cca3974e 2309 * LOCKING: host lock, or some other form of
0cba632b 2310 * serialization.
1da177e4
LT
2311 */
2312
2313void ata_port_probe(struct ata_port *ap)
2314{
198e0fed 2315 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
2316}
2317
3be680b7
TH
2318/**
2319 * sata_print_link_status - Print SATA link status
936fd732 2320 * @link: SATA link to printk link status about
3be680b7
TH
2321 *
2322 * This function prints link speed and status of a SATA link.
2323 *
2324 * LOCKING:
2325 * None.
2326 */
936fd732 2327void sata_print_link_status(struct ata_link *link)
3be680b7 2328{
6d5f9732 2329 u32 sstatus, scontrol, tmp;
3be680b7 2330
936fd732 2331 if (sata_scr_read(link, SCR_STATUS, &sstatus))
3be680b7 2332 return;
936fd732 2333 sata_scr_read(link, SCR_CONTROL, &scontrol);
3be680b7 2334
936fd732 2335 if (ata_link_online(link)) {
3be680b7 2336 tmp = (sstatus >> 4) & 0xf;
936fd732 2337 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2338 "SATA link up %s (SStatus %X SControl %X)\n",
2339 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 2340 } else {
936fd732 2341 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2342 "SATA link down (SStatus %X SControl %X)\n",
2343 sstatus, scontrol);
3be680b7
TH
2344 }
2345}
2346
1da177e4 2347/**
780a87f7
JG
2348 * __sata_phy_reset - Wake/reset a low-level SATA PHY
2349 * @ap: SATA port associated with target SATA PHY.
1da177e4 2350 *
780a87f7
JG
2351 * This function issues commands to standard SATA Sxxx
2352 * PHY registers, to wake up the phy (and device), and
2353 * clear any reset condition.
1da177e4
LT
2354 *
2355 * LOCKING:
0cba632b 2356 * PCI/etc. bus probe sem.
1da177e4
LT
2357 *
2358 */
2359void __sata_phy_reset(struct ata_port *ap)
2360{
936fd732 2361 struct ata_link *link = &ap->link;
1da177e4 2362 unsigned long timeout = jiffies + (HZ * 5);
936fd732 2363 u32 sstatus;
1da177e4
LT
2364
2365 if (ap->flags & ATA_FLAG_SATA_RESET) {
cdcca89e 2366 /* issue phy wake/reset */
936fd732 2367 sata_scr_write_flush(link, SCR_CONTROL, 0x301);
62ba2841
TH
2368 /* Couldn't find anything in SATA I/II specs, but
2369 * AHCI-1.1 10.4.2 says at least 1 ms. */
2370 mdelay(1);
1da177e4 2371 }
81952c54 2372 /* phy wake/clear reset */
936fd732 2373 sata_scr_write_flush(link, SCR_CONTROL, 0x300);
1da177e4
LT
2374
2375 /* wait for phy to become ready, if necessary */
2376 do {
2377 msleep(200);
936fd732 2378 sata_scr_read(link, SCR_STATUS, &sstatus);
1da177e4
LT
2379 if ((sstatus & 0xf) != 1)
2380 break;
2381 } while (time_before(jiffies, timeout));
2382
3be680b7 2383 /* print link status */
936fd732 2384 sata_print_link_status(link);
656563e3 2385
3be680b7 2386 /* TODO: phy layer with polling, timeouts, etc. */
936fd732 2387 if (!ata_link_offline(link))
1da177e4 2388 ata_port_probe(ap);
3be680b7 2389 else
1da177e4 2390 ata_port_disable(ap);
1da177e4 2391
198e0fed 2392 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2393 return;
2394
2395 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
2396 ata_port_disable(ap);
2397 return;
2398 }
2399
2400 ap->cbl = ATA_CBL_SATA;
2401}
2402
2403/**
780a87f7
JG
2404 * sata_phy_reset - Reset SATA bus.
2405 * @ap: SATA port associated with target SATA PHY.
1da177e4 2406 *
780a87f7
JG
2407 * This function resets the SATA bus, and then probes
2408 * the bus for devices.
1da177e4
LT
2409 *
2410 * LOCKING:
0cba632b 2411 * PCI/etc. bus probe sem.
1da177e4
LT
2412 *
2413 */
2414void sata_phy_reset(struct ata_port *ap)
2415{
2416 __sata_phy_reset(ap);
198e0fed 2417 if (ap->flags & ATA_FLAG_DISABLED)
1da177e4
LT
2418 return;
2419 ata_bus_reset(ap);
2420}
2421
ebdfca6e
AC
2422/**
2423 * ata_dev_pair - return other device on cable
ebdfca6e
AC
2424 * @adev: device
2425 *
2426 * Obtain the other device on the same cable, or if none is
2427 * present NULL is returned
2428 */
2e9edbf8 2429
3373efd8 2430struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2431{
9af5c9c9
TH
2432 struct ata_link *link = adev->link;
2433 struct ata_device *pair = &link->device[1 - adev->devno];
e1211e3f 2434 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2435 return NULL;
2436 return pair;
2437}
2438
1da177e4 2439/**
780a87f7
JG
2440 * ata_port_disable - Disable port.
2441 * @ap: Port to be disabled.
1da177e4 2442 *
780a87f7
JG
2443 * Modify @ap data structure such that the system
2444 * thinks that the entire port is disabled, and should
2445 * never attempt to probe or communicate with devices
2446 * on this port.
2447 *
cca3974e 2448 * LOCKING: host lock, or some other form of
780a87f7 2449 * serialization.
1da177e4
LT
2450 */
2451
2452void ata_port_disable(struct ata_port *ap)
2453{
9af5c9c9
TH
2454 ap->link.device[0].class = ATA_DEV_NONE;
2455 ap->link.device[1].class = ATA_DEV_NONE;
198e0fed 2456 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2457}
2458
1c3fae4d 2459/**
3c567b7d 2460 * sata_down_spd_limit - adjust SATA spd limit downward
936fd732 2461 * @link: Link to adjust SATA spd limit for
1c3fae4d 2462 *
936fd732 2463 * Adjust SATA spd limit of @link downward. Note that this
1c3fae4d 2464 * function only adjusts the limit. The change must be applied
3c567b7d 2465 * using sata_set_spd().
1c3fae4d
TH
2466 *
2467 * LOCKING:
2468 * Inherited from caller.
2469 *
2470 * RETURNS:
2471 * 0 on success, negative errno on failure
2472 */
936fd732 2473int sata_down_spd_limit(struct ata_link *link)
1c3fae4d 2474{
81952c54
TH
2475 u32 sstatus, spd, mask;
2476 int rc, highbit;
1c3fae4d 2477
936fd732 2478 if (!sata_scr_valid(link))
008a7896
TH
2479 return -EOPNOTSUPP;
2480
2481 /* If SCR can be read, use it to determine the current SPD.
936fd732 2482 * If not, use cached value in link->sata_spd.
008a7896 2483 */
936fd732 2484 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
008a7896
TH
2485 if (rc == 0)
2486 spd = (sstatus >> 4) & 0xf;
2487 else
936fd732 2488 spd = link->sata_spd;
1c3fae4d 2489
936fd732 2490 mask = link->sata_spd_limit;
1c3fae4d
TH
2491 if (mask <= 1)
2492 return -EINVAL;
008a7896
TH
2493
2494 /* unconditionally mask off the highest bit */
1c3fae4d
TH
2495 highbit = fls(mask) - 1;
2496 mask &= ~(1 << highbit);
2497
008a7896
TH
2498 /* Mask off all speeds higher than or equal to the current
2499 * one. Force 1.5Gbps if current SPD is not available.
2500 */
2501 if (spd > 1)
2502 mask &= (1 << (spd - 1)) - 1;
2503 else
2504 mask &= 1;
2505
2506 /* were we already at the bottom? */
1c3fae4d
TH
2507 if (!mask)
2508 return -EINVAL;
2509
936fd732 2510 link->sata_spd_limit = mask;
1c3fae4d 2511
936fd732 2512 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
f15a1daf 2513 sata_spd_string(fls(mask)));
1c3fae4d
TH
2514
2515 return 0;
2516}
2517
936fd732 2518static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
1c3fae4d
TH
2519{
2520 u32 spd, limit;
2521
936fd732 2522 if (link->sata_spd_limit == UINT_MAX)
1c3fae4d
TH
2523 limit = 0;
2524 else
936fd732 2525 limit = fls(link->sata_spd_limit);
1c3fae4d
TH
2526
2527 spd = (*scontrol >> 4) & 0xf;
2528 *scontrol = (*scontrol & ~0xf0) | ((limit & 0xf) << 4);
2529
2530 return spd != limit;
2531}
2532
2533/**
3c567b7d 2534 * sata_set_spd_needed - is SATA spd configuration needed
936fd732 2535 * @link: Link in question
1c3fae4d
TH
2536 *
2537 * Test whether the spd limit in SControl matches
936fd732 2538 * @link->sata_spd_limit. This function is used to determine
1c3fae4d
TH
2539 * whether hardreset is necessary to apply SATA spd
2540 * configuration.
2541 *
2542 * LOCKING:
2543 * Inherited from caller.
2544 *
2545 * RETURNS:
2546 * 1 if SATA spd configuration is needed, 0 otherwise.
2547 */
936fd732 2548int sata_set_spd_needed(struct ata_link *link)
1c3fae4d
TH
2549{
2550 u32 scontrol;
2551
936fd732 2552 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
1c3fae4d
TH
2553 return 0;
2554
936fd732 2555 return __sata_set_spd_needed(link, &scontrol);
1c3fae4d
TH
2556}
2557
2558/**
3c567b7d 2559 * sata_set_spd - set SATA spd according to spd limit
936fd732 2560 * @link: Link to set SATA spd for
1c3fae4d 2561 *
936fd732 2562 * Set SATA spd of @link according to sata_spd_limit.
1c3fae4d
TH
2563 *
2564 * LOCKING:
2565 * Inherited from caller.
2566 *
2567 * RETURNS:
2568 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2569 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2570 */
936fd732 2571int sata_set_spd(struct ata_link *link)
1c3fae4d
TH
2572{
2573 u32 scontrol;
81952c54 2574 int rc;
1c3fae4d 2575
936fd732 2576 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 2577 return rc;
1c3fae4d 2578
936fd732 2579 if (!__sata_set_spd_needed(link, &scontrol))
1c3fae4d
TH
2580 return 0;
2581
936fd732 2582 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54
TH
2583 return rc;
2584
1c3fae4d
TH
2585 return 1;
2586}
2587
452503f9
AC
2588/*
2589 * This mode timing computation functionality is ported over from
2590 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2591 */
2592/*
b352e57d 2593 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2594 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2595 * for UDMA6, which is currently supported only by Maxtor drives.
2596 *
2597 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2598 */
2599
2600static const struct ata_timing ata_timing[] = {
2601
2602 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
2603 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2604 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2605 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2606
b352e57d
AC
2607 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
2608 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
452503f9
AC
2609 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2610 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2611 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2612
2613/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
2e9edbf8 2614
452503f9
AC
2615 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
2616 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2617 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2e9edbf8 2618
452503f9
AC
2619 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
2620 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2621 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2622
b352e57d
AC
2623 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
2624 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
452503f9
AC
2625 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2626 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2627
2628 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2629 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2630 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2631
2632/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2633
2634 { 0xFF }
2635};
2636
2637#define ENOUGH(v,unit) (((v)-1)/(unit)+1)
2638#define EZ(v,unit) ((v)?ENOUGH(v,unit):0)
2639
2640static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2641{
2642 q->setup = EZ(t->setup * 1000, T);
2643 q->act8b = EZ(t->act8b * 1000, T);
2644 q->rec8b = EZ(t->rec8b * 1000, T);
2645 q->cyc8b = EZ(t->cyc8b * 1000, T);
2646 q->active = EZ(t->active * 1000, T);
2647 q->recover = EZ(t->recover * 1000, T);
2648 q->cycle = EZ(t->cycle * 1000, T);
2649 q->udma = EZ(t->udma * 1000, UT);
2650}
2651
2652void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2653 struct ata_timing *m, unsigned int what)
2654{
2655 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2656 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2657 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2658 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2659 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2660 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2661 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2662 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2663}
2664
2665static const struct ata_timing* ata_timing_find_mode(unsigned short speed)
2666{
2667 const struct ata_timing *t;
2668
2669 for (t = ata_timing; t->mode != speed; t++)
91190758 2670 if (t->mode == 0xFF)
452503f9 2671 return NULL;
2e9edbf8 2672 return t;
452503f9
AC
2673}
2674
2675int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2676 struct ata_timing *t, int T, int UT)
2677{
2678 const struct ata_timing *s;
2679 struct ata_timing p;
2680
2681 /*
2e9edbf8 2682 * Find the mode.
75b1f2f8 2683 */
452503f9
AC
2684
2685 if (!(s = ata_timing_find_mode(speed)))
2686 return -EINVAL;
2687
75b1f2f8
AL
2688 memcpy(t, s, sizeof(*s));
2689
452503f9
AC
2690 /*
2691 * If the drive is an EIDE drive, it can tell us it needs extended
2692 * PIO/MW_DMA cycle timing.
2693 */
2694
2695 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2696 memset(&p, 0, sizeof(p));
2697 if(speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
2698 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2699 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2700 } else if(speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
2701 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2702 }
2703 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2704 }
2705
2706 /*
2707 * Convert the timing to bus clock counts.
2708 */
2709
75b1f2f8 2710 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2711
2712 /*
c893a3ae
RD
2713 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2714 * S.M.A.R.T * and some other commands. We have to ensure that the
2715 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2716 */
2717
fd3367af 2718 if (speed > XFER_PIO_6) {
452503f9
AC
2719 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2720 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2721 }
2722
2723 /*
c893a3ae 2724 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2725 */
2726
2727 if (t->act8b + t->rec8b < t->cyc8b) {
2728 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2729 t->rec8b = t->cyc8b - t->act8b;
2730 }
2731
2732 if (t->active + t->recover < t->cycle) {
2733 t->active += (t->cycle - (t->active + t->recover)) / 2;
2734 t->recover = t->cycle - t->active;
2735 }
a617c09f 2736
4f701d1e
AC
2737 /* In a few cases quantisation may produce enough errors to
2738 leave t->cycle too low for the sum of active and recovery
2739 if so we must correct this */
2740 if (t->active + t->recover > t->cycle)
2741 t->cycle = t->active + t->recover;
452503f9
AC
2742
2743 return 0;
2744}
2745
cf176e1a
TH
2746/**
2747 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 2748 * @dev: Device to adjust xfer masks
458337db 2749 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
2750 *
2751 * Adjust xfer masks of @dev downward. Note that this function
2752 * does not apply the change. Invoking ata_set_mode() afterwards
2753 * will apply the limit.
2754 *
2755 * LOCKING:
2756 * Inherited from caller.
2757 *
2758 * RETURNS:
2759 * 0 on success, negative errno on failure
2760 */
458337db 2761int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 2762{
458337db
TH
2763 char buf[32];
2764 unsigned int orig_mask, xfer_mask;
2765 unsigned int pio_mask, mwdma_mask, udma_mask;
2766 int quiet, highbit;
cf176e1a 2767
458337db
TH
2768 quiet = !!(sel & ATA_DNXFER_QUIET);
2769 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 2770
458337db
TH
2771 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2772 dev->mwdma_mask,
2773 dev->udma_mask);
2774 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 2775
458337db
TH
2776 switch (sel) {
2777 case ATA_DNXFER_PIO:
2778 highbit = fls(pio_mask) - 1;
2779 pio_mask &= ~(1 << highbit);
2780 break;
2781
2782 case ATA_DNXFER_DMA:
2783 if (udma_mask) {
2784 highbit = fls(udma_mask) - 1;
2785 udma_mask &= ~(1 << highbit);
2786 if (!udma_mask)
2787 return -ENOENT;
2788 } else if (mwdma_mask) {
2789 highbit = fls(mwdma_mask) - 1;
2790 mwdma_mask &= ~(1 << highbit);
2791 if (!mwdma_mask)
2792 return -ENOENT;
2793 }
2794 break;
2795
2796 case ATA_DNXFER_40C:
2797 udma_mask &= ATA_UDMA_MASK_40C;
2798 break;
2799
2800 case ATA_DNXFER_FORCE_PIO0:
2801 pio_mask &= 1;
2802 case ATA_DNXFER_FORCE_PIO:
2803 mwdma_mask = 0;
2804 udma_mask = 0;
2805 break;
2806
458337db
TH
2807 default:
2808 BUG();
2809 }
2810
2811 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2812
2813 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2814 return -ENOENT;
2815
2816 if (!quiet) {
2817 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2818 snprintf(buf, sizeof(buf), "%s:%s",
2819 ata_mode_string(xfer_mask),
2820 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2821 else
2822 snprintf(buf, sizeof(buf), "%s",
2823 ata_mode_string(xfer_mask));
2824
2825 ata_dev_printk(dev, KERN_WARNING,
2826 "limiting speed to %s\n", buf);
2827 }
cf176e1a
TH
2828
2829 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2830 &dev->udma_mask);
2831
cf176e1a 2832 return 0;
cf176e1a
TH
2833}
2834
3373efd8 2835static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 2836{
9af5c9c9 2837 struct ata_eh_context *ehc = &dev->link->eh_context;
83206a29
TH
2838 unsigned int err_mask;
2839 int rc;
1da177e4 2840
e8384607 2841 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
2842 if (dev->xfer_shift == ATA_SHIFT_PIO)
2843 dev->flags |= ATA_DFLAG_PIO;
2844
3373efd8 2845 err_mask = ata_dev_set_xfermode(dev);
11750a40
AC
2846 /* Old CFA may refuse this command, which is just fine */
2847 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2848 err_mask &= ~AC_ERR_DEV;
0bc2a79a
AC
2849 /* Some very old devices and some bad newer ones fail any kind of
2850 SET_XFERMODE request but support PIO0-2 timings and no IORDY */
2851 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
2852 dev->pio_mode <= XFER_PIO_2)
2853 err_mask &= ~AC_ERR_DEV;
83206a29 2854 if (err_mask) {
f15a1daf
TH
2855 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
2856 "(err_mask=0x%x)\n", err_mask);
83206a29
TH
2857 return -EIO;
2858 }
1da177e4 2859
baa1e78a 2860 ehc->i.flags |= ATA_EHI_POST_SETMODE;
422c9daa 2861 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
baa1e78a 2862 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
5eb45c02 2863 if (rc)
83206a29 2864 return rc;
48a8a14f 2865
23e71c3d
TH
2866 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
2867 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 2868
f15a1daf
TH
2869 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
2870 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
83206a29 2871 return 0;
1da177e4
LT
2872}
2873
1da177e4 2874/**
04351821 2875 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 2876 * @link: link on which timings will be programmed
e82cbdb9 2877 * @r_failed_dev: out paramter for failed device
1da177e4 2878 *
04351821
AC
2879 * Standard implementation of the function used to tune and set
2880 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2881 * ata_dev_set_mode() fails, pointer to the failing device is
e82cbdb9 2882 * returned in @r_failed_dev.
780a87f7 2883 *
1da177e4 2884 * LOCKING:
0cba632b 2885 * PCI/etc. bus probe sem.
e82cbdb9
TH
2886 *
2887 * RETURNS:
2888 * 0 on success, negative errno otherwise
1da177e4 2889 */
04351821 2890
0260731f 2891int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
1da177e4 2892{
0260731f 2893 struct ata_port *ap = link->ap;
e8e0619f 2894 struct ata_device *dev;
f58229f8 2895 int rc = 0, used_dma = 0, found = 0;
3adcebb2 2896
a6d5a51c 2897 /* step 1: calculate xfer_mask */
f58229f8 2898 ata_link_for_each_dev(dev, link) {
acf356b1 2899 unsigned int pio_mask, dma_mask;
a6d5a51c 2900
e1211e3f 2901 if (!ata_dev_enabled(dev))
a6d5a51c
TH
2902 continue;
2903
3373efd8 2904 ata_dev_xfermask(dev);
1da177e4 2905
acf356b1
TH
2906 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
2907 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
2908 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
2909 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 2910
4f65977d 2911 found = 1;
5444a6f4
AC
2912 if (dev->dma_mode)
2913 used_dma = 1;
a6d5a51c 2914 }
4f65977d 2915 if (!found)
e82cbdb9 2916 goto out;
a6d5a51c
TH
2917
2918 /* step 2: always set host PIO timings */
f58229f8 2919 ata_link_for_each_dev(dev, link) {
e8e0619f
TH
2920 if (!ata_dev_enabled(dev))
2921 continue;
2922
2923 if (!dev->pio_mode) {
f15a1daf 2924 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 2925 rc = -EINVAL;
e82cbdb9 2926 goto out;
e8e0619f
TH
2927 }
2928
2929 dev->xfer_mode = dev->pio_mode;
2930 dev->xfer_shift = ATA_SHIFT_PIO;
2931 if (ap->ops->set_piomode)
2932 ap->ops->set_piomode(ap, dev);
2933 }
1da177e4 2934
a6d5a51c 2935 /* step 3: set host DMA timings */
f58229f8 2936 ata_link_for_each_dev(dev, link) {
e8e0619f
TH
2937 if (!ata_dev_enabled(dev) || !dev->dma_mode)
2938 continue;
2939
2940 dev->xfer_mode = dev->dma_mode;
2941 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
2942 if (ap->ops->set_dmamode)
2943 ap->ops->set_dmamode(ap, dev);
2944 }
1da177e4
LT
2945
2946 /* step 4: update devices' xfer mode */
f58229f8 2947 ata_link_for_each_dev(dev, link) {
18d90deb 2948 /* don't update suspended devices' xfer mode */
9666f400 2949 if (!ata_dev_enabled(dev))
83206a29
TH
2950 continue;
2951
3373efd8 2952 rc = ata_dev_set_mode(dev);
5bbc53f4 2953 if (rc)
e82cbdb9 2954 goto out;
83206a29 2955 }
1da177e4 2956
e8e0619f
TH
2957 /* Record simplex status. If we selected DMA then the other
2958 * host channels are not permitted to do so.
5444a6f4 2959 */
cca3974e 2960 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 2961 ap->host->simplex_claimed = ap;
5444a6f4 2962
e82cbdb9
TH
2963 out:
2964 if (rc)
2965 *r_failed_dev = dev;
2966 return rc;
1da177e4
LT
2967}
2968
04351821
AC
2969/**
2970 * ata_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 2971 * @link: link on which timings will be programmed
04351821
AC
2972 * @r_failed_dev: out paramter for failed device
2973 *
2974 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.). If
2975 * ata_set_mode() fails, pointer to the failing device is
2976 * returned in @r_failed_dev.
2977 *
2978 * LOCKING:
2979 * PCI/etc. bus probe sem.
2980 *
2981 * RETURNS:
2982 * 0 on success, negative errno otherwise
2983 */
0260731f 2984int ata_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
04351821 2985{
0260731f
TH
2986 struct ata_port *ap = link->ap;
2987
04351821
AC
2988 /* has private set_mode? */
2989 if (ap->ops->set_mode)
0260731f
TH
2990 return ap->ops->set_mode(link, r_failed_dev);
2991 return ata_do_set_mode(link, r_failed_dev);
04351821
AC
2992}
2993
1fdffbce
JG
2994/**
2995 * ata_tf_to_host - issue ATA taskfile to host controller
2996 * @ap: port to which command is being issued
2997 * @tf: ATA taskfile register set
2998 *
2999 * Issues ATA taskfile register set to ATA host controller,
3000 * with proper synchronization with interrupt handler and
3001 * other threads.
3002 *
3003 * LOCKING:
cca3974e 3004 * spin_lock_irqsave(host lock)
1fdffbce
JG
3005 */
3006
3007static inline void ata_tf_to_host(struct ata_port *ap,
3008 const struct ata_taskfile *tf)
3009{
3010 ap->ops->tf_load(ap, tf);
3011 ap->ops->exec_command(ap, tf);
3012}
3013
1da177e4
LT
3014/**
3015 * ata_busy_sleep - sleep until BSY clears, or timeout
3016 * @ap: port containing status register to be polled
3017 * @tmout_pat: impatience timeout
3018 * @tmout: overall timeout
3019 *
780a87f7
JG
3020 * Sleep until ATA Status register bit BSY clears,
3021 * or a timeout occurs.
3022 *
d1adc1bb
TH
3023 * LOCKING:
3024 * Kernel thread context (may sleep).
3025 *
3026 * RETURNS:
3027 * 0 on success, -errno otherwise.
1da177e4 3028 */
d1adc1bb
TH
3029int ata_busy_sleep(struct ata_port *ap,
3030 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
3031{
3032 unsigned long timer_start, timeout;
3033 u8 status;
3034
3035 status = ata_busy_wait(ap, ATA_BUSY, 300);
3036 timer_start = jiffies;
3037 timeout = timer_start + tmout_pat;
d1adc1bb
TH
3038 while (status != 0xff && (status & ATA_BUSY) &&
3039 time_before(jiffies, timeout)) {
1da177e4
LT
3040 msleep(50);
3041 status = ata_busy_wait(ap, ATA_BUSY, 3);
3042 }
3043
d1adc1bb 3044 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 3045 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
3046 "port is slow to respond, please be patient "
3047 "(Status 0x%x)\n", status);
1da177e4
LT
3048
3049 timeout = timer_start + tmout;
d1adc1bb
TH
3050 while (status != 0xff && (status & ATA_BUSY) &&
3051 time_before(jiffies, timeout)) {
1da177e4
LT
3052 msleep(50);
3053 status = ata_chk_status(ap);
3054 }
3055
d1adc1bb
TH
3056 if (status == 0xff)
3057 return -ENODEV;
3058
1da177e4 3059 if (status & ATA_BUSY) {
f15a1daf 3060 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
3061 "(%lu secs, Status 0x%x)\n",
3062 tmout / HZ, status);
d1adc1bb 3063 return -EBUSY;
1da177e4
LT
3064 }
3065
3066 return 0;
3067}
3068
d4b2bab4
TH
3069/**
3070 * ata_wait_ready - sleep until BSY clears, or timeout
3071 * @ap: port containing status register to be polled
3072 * @deadline: deadline jiffies for the operation
3073 *
3074 * Sleep until ATA Status register bit BSY clears, or timeout
3075 * occurs.
3076 *
3077 * LOCKING:
3078 * Kernel thread context (may sleep).
3079 *
3080 * RETURNS:
3081 * 0 on success, -errno otherwise.
3082 */
3083int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3084{
3085 unsigned long start = jiffies;
3086 int warned = 0;
3087
3088 while (1) {
3089 u8 status = ata_chk_status(ap);
3090 unsigned long now = jiffies;
3091
3092 if (!(status & ATA_BUSY))
3093 return 0;
936fd732 3094 if (!ata_link_online(&ap->link) && status == 0xff)
d4b2bab4
TH
3095 return -ENODEV;
3096 if (time_after(now, deadline))
3097 return -EBUSY;
3098
3099 if (!warned && time_after(now, start + 5 * HZ) &&
3100 (deadline - now > 3 * HZ)) {
3101 ata_port_printk(ap, KERN_WARNING,
3102 "port is slow to respond, please be patient "
3103 "(Status 0x%x)\n", status);
3104 warned = 1;
3105 }
3106
3107 msleep(50);
3108 }
3109}
3110
3111static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3112 unsigned long deadline)
1da177e4
LT
3113{
3114 struct ata_ioports *ioaddr = &ap->ioaddr;
3115 unsigned int dev0 = devmask & (1 << 0);
3116 unsigned int dev1 = devmask & (1 << 1);
9b89391c 3117 int rc, ret = 0;
1da177e4
LT
3118
3119 /* if device 0 was found in ata_devchk, wait for its
3120 * BSY bit to clear
3121 */
d4b2bab4
TH
3122 if (dev0) {
3123 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3124 if (rc) {
3125 if (rc != -ENODEV)
3126 return rc;
3127 ret = rc;
3128 }
d4b2bab4 3129 }
1da177e4 3130
e141d999
TH
3131 /* if device 1 was found in ata_devchk, wait for register
3132 * access briefly, then wait for BSY to clear.
1da177e4 3133 */
e141d999
TH
3134 if (dev1) {
3135 int i;
1da177e4
LT
3136
3137 ap->ops->dev_select(ap, 1);
e141d999
TH
3138
3139 /* Wait for register access. Some ATAPI devices fail
3140 * to set nsect/lbal after reset, so don't waste too
3141 * much time on it. We're gonna wait for !BSY anyway.
3142 */
3143 for (i = 0; i < 2; i++) {
3144 u8 nsect, lbal;
3145
3146 nsect = ioread8(ioaddr->nsect_addr);
3147 lbal = ioread8(ioaddr->lbal_addr);
3148 if ((nsect == 1) && (lbal == 1))
3149 break;
3150 msleep(50); /* give drive a breather */
3151 }
3152
d4b2bab4 3153 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3154 if (rc) {
3155 if (rc != -ENODEV)
3156 return rc;
3157 ret = rc;
3158 }
d4b2bab4 3159 }
1da177e4
LT
3160
3161 /* is all this really necessary? */
3162 ap->ops->dev_select(ap, 0);
3163 if (dev1)
3164 ap->ops->dev_select(ap, 1);
3165 if (dev0)
3166 ap->ops->dev_select(ap, 0);
d4b2bab4 3167
9b89391c 3168 return ret;
1da177e4
LT
3169}
3170
d4b2bab4
TH
3171static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3172 unsigned long deadline)
1da177e4
LT
3173{
3174 struct ata_ioports *ioaddr = &ap->ioaddr;
3175
44877b4e 3176 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
1da177e4
LT
3177
3178 /* software reset. causes dev0 to be selected */
0d5ff566
TH
3179 iowrite8(ap->ctl, ioaddr->ctl_addr);
3180 udelay(20); /* FIXME: flush */
3181 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3182 udelay(20); /* FIXME: flush */
3183 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3184
3185 /* spec mandates ">= 2ms" before checking status.
3186 * We wait 150ms, because that was the magic delay used for
3187 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
3188 * between when the ATA command register is written, and then
3189 * status is checked. Because waiting for "a while" before
3190 * checking status is fine, post SRST, we perform this magic
3191 * delay here as well.
09c7ad79
AC
3192 *
3193 * Old drivers/ide uses the 2mS rule and then waits for ready
1da177e4
LT
3194 */
3195 msleep(150);
3196
2e9edbf8 3197 /* Before we perform post reset processing we want to see if
298a41ca
TH
3198 * the bus shows 0xFF because the odd clown forgets the D7
3199 * pulldown resistor.
3200 */
d1adc1bb 3201 if (ata_check_status(ap) == 0xFF)
9b89391c 3202 return -ENODEV;
09c7ad79 3203
d4b2bab4 3204 return ata_bus_post_reset(ap, devmask, deadline);
1da177e4
LT
3205}
3206
3207/**
3208 * ata_bus_reset - reset host port and associated ATA channel
3209 * @ap: port to reset
3210 *
3211 * This is typically the first time we actually start issuing
3212 * commands to the ATA channel. We wait for BSY to clear, then
3213 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3214 * result. Determine what devices, if any, are on the channel
3215 * by looking at the device 0/1 error register. Look at the signature
3216 * stored in each device's taskfile registers, to determine if
3217 * the device is ATA or ATAPI.
3218 *
3219 * LOCKING:
0cba632b 3220 * PCI/etc. bus probe sem.
cca3974e 3221 * Obtains host lock.
1da177e4
LT
3222 *
3223 * SIDE EFFECTS:
198e0fed 3224 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
3225 */
3226
3227void ata_bus_reset(struct ata_port *ap)
3228{
9af5c9c9 3229 struct ata_device *device = ap->link.device;
1da177e4
LT
3230 struct ata_ioports *ioaddr = &ap->ioaddr;
3231 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3232 u8 err;
aec5c3c1 3233 unsigned int dev0, dev1 = 0, devmask = 0;
9b89391c 3234 int rc;
1da177e4 3235
44877b4e 3236 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
1da177e4
LT
3237
3238 /* determine if device 0/1 are present */
3239 if (ap->flags & ATA_FLAG_SATA_RESET)
3240 dev0 = 1;
3241 else {
3242 dev0 = ata_devchk(ap, 0);
3243 if (slave_possible)
3244 dev1 = ata_devchk(ap, 1);
3245 }
3246
3247 if (dev0)
3248 devmask |= (1 << 0);
3249 if (dev1)
3250 devmask |= (1 << 1);
3251
3252 /* select device 0 again */
3253 ap->ops->dev_select(ap, 0);
3254
3255 /* issue bus reset */
9b89391c
TH
3256 if (ap->flags & ATA_FLAG_SRST) {
3257 rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
3258 if (rc && rc != -ENODEV)
aec5c3c1 3259 goto err_out;
9b89391c 3260 }
1da177e4
LT
3261
3262 /*
3263 * determine by signature whether we have ATA or ATAPI devices
3264 */
3f19859e 3265 device[0].class = ata_dev_try_classify(&device[0], dev0, &err);
1da177e4 3266 if ((slave_possible) && (err != 0x81))
3f19859e 3267 device[1].class = ata_dev_try_classify(&device[1], dev1, &err);
1da177e4 3268
1da177e4 3269 /* is double-select really necessary? */
9af5c9c9 3270 if (device[1].class != ATA_DEV_NONE)
1da177e4 3271 ap->ops->dev_select(ap, 1);
9af5c9c9 3272 if (device[0].class != ATA_DEV_NONE)
1da177e4
LT
3273 ap->ops->dev_select(ap, 0);
3274
3275 /* if no devices were detected, disable this port */
9af5c9c9
TH
3276 if ((device[0].class == ATA_DEV_NONE) &&
3277 (device[1].class == ATA_DEV_NONE))
1da177e4
LT
3278 goto err_out;
3279
3280 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3281 /* set up device control for ATA_FLAG_SATA_RESET */
0d5ff566 3282 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3283 }
3284
3285 DPRINTK("EXIT\n");
3286 return;
3287
3288err_out:
f15a1daf 3289 ata_port_printk(ap, KERN_ERR, "disabling port\n");
ac8869d5 3290 ata_port_disable(ap);
1da177e4
LT
3291
3292 DPRINTK("EXIT\n");
3293}
3294
d7bb4cc7 3295/**
936fd732
TH
3296 * sata_link_debounce - debounce SATA phy status
3297 * @link: ATA link to debounce SATA phy status for
d7bb4cc7 3298 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3299 * @deadline: deadline jiffies for the operation
d7bb4cc7 3300 *
936fd732 3301* Make sure SStatus of @link reaches stable state, determined by
d7bb4cc7
TH
3302 * holding the same value where DET is not 1 for @duration polled
3303 * every @interval, before @timeout. Timeout constraints the
d4b2bab4
TH
3304 * beginning of the stable state. Because DET gets stuck at 1 on
3305 * some controllers after hot unplugging, this functions waits
d7bb4cc7
TH
3306 * until timeout then returns 0 if DET is stable at 1.
3307 *
d4b2bab4
TH
3308 * @timeout is further limited by @deadline. The sooner of the
3309 * two is used.
3310 *
d7bb4cc7
TH
3311 * LOCKING:
3312 * Kernel thread context (may sleep)
3313 *
3314 * RETURNS:
3315 * 0 on success, -errno on failure.
3316 */
936fd732
TH
3317int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3318 unsigned long deadline)
7a7921e8 3319{
d7bb4cc7 3320 unsigned long interval_msec = params[0];
d4b2bab4
TH
3321 unsigned long duration = msecs_to_jiffies(params[1]);
3322 unsigned long last_jiffies, t;
d7bb4cc7
TH
3323 u32 last, cur;
3324 int rc;
3325
d4b2bab4
TH
3326 t = jiffies + msecs_to_jiffies(params[2]);
3327 if (time_before(t, deadline))
3328 deadline = t;
3329
936fd732 3330 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3331 return rc;
3332 cur &= 0xf;
3333
3334 last = cur;
3335 last_jiffies = jiffies;
3336
3337 while (1) {
3338 msleep(interval_msec);
936fd732 3339 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3340 return rc;
3341 cur &= 0xf;
3342
3343 /* DET stable? */
3344 if (cur == last) {
d4b2bab4 3345 if (cur == 1 && time_before(jiffies, deadline))
d7bb4cc7
TH
3346 continue;
3347 if (time_after(jiffies, last_jiffies + duration))
3348 return 0;
3349 continue;
3350 }
3351
3352 /* unstable, start over */
3353 last = cur;
3354 last_jiffies = jiffies;
3355
f1545154
TH
3356 /* Check deadline. If debouncing failed, return
3357 * -EPIPE to tell upper layer to lower link speed.
3358 */
d4b2bab4 3359 if (time_after(jiffies, deadline))
f1545154 3360 return -EPIPE;
d7bb4cc7
TH
3361 }
3362}
3363
3364/**
936fd732
TH
3365 * sata_link_resume - resume SATA link
3366 * @link: ATA link to resume SATA
d7bb4cc7 3367 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3368 * @deadline: deadline jiffies for the operation
d7bb4cc7 3369 *
936fd732 3370 * Resume SATA phy @link and debounce it.
d7bb4cc7
TH
3371 *
3372 * LOCKING:
3373 * Kernel thread context (may sleep)
3374 *
3375 * RETURNS:
3376 * 0 on success, -errno on failure.
3377 */
936fd732
TH
3378int sata_link_resume(struct ata_link *link, const unsigned long *params,
3379 unsigned long deadline)
d7bb4cc7
TH
3380{
3381 u32 scontrol;
81952c54
TH
3382 int rc;
3383
936fd732 3384 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 3385 return rc;
7a7921e8 3386
852ee16a 3387 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54 3388
936fd732 3389 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54 3390 return rc;
7a7921e8 3391
d7bb4cc7
TH
3392 /* Some PHYs react badly if SStatus is pounded immediately
3393 * after resuming. Delay 200ms before debouncing.
3394 */
3395 msleep(200);
7a7921e8 3396
936fd732 3397 return sata_link_debounce(link, params, deadline);
7a7921e8
TH
3398}
3399
f5914a46
TH
3400/**
3401 * ata_std_prereset - prepare for reset
cc0680a5 3402 * @link: ATA link to be reset
d4b2bab4 3403 * @deadline: deadline jiffies for the operation
f5914a46 3404 *
cc0680a5 3405 * @link is about to be reset. Initialize it. Failure from
b8cffc6a
TH
3406 * prereset makes libata abort whole reset sequence and give up
3407 * that port, so prereset should be best-effort. It does its
3408 * best to prepare for reset sequence but if things go wrong, it
3409 * should just whine, not fail.
f5914a46
TH
3410 *
3411 * LOCKING:
3412 * Kernel thread context (may sleep)
3413 *
3414 * RETURNS:
3415 * 0 on success, -errno otherwise.
3416 */
cc0680a5 3417int ata_std_prereset(struct ata_link *link, unsigned long deadline)
f5914a46 3418{
cc0680a5 3419 struct ata_port *ap = link->ap;
936fd732 3420 struct ata_eh_context *ehc = &link->eh_context;
e9c83914 3421 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
3422 int rc;
3423
31daabda 3424 /* handle link resume */
28324304 3425 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
0c88758b 3426 (link->flags & ATA_LFLAG_HRST_TO_RESUME))
28324304
TH
3427 ehc->i.action |= ATA_EH_HARDRESET;
3428
f5914a46
TH
3429 /* if we're about to do hardreset, nothing more to do */
3430 if (ehc->i.action & ATA_EH_HARDRESET)
3431 return 0;
3432
936fd732 3433 /* if SATA, resume link */
a16abc0b 3434 if (ap->flags & ATA_FLAG_SATA) {
936fd732 3435 rc = sata_link_resume(link, timing, deadline);
b8cffc6a
TH
3436 /* whine about phy resume failure but proceed */
3437 if (rc && rc != -EOPNOTSUPP)
cc0680a5 3438 ata_link_printk(link, KERN_WARNING, "failed to resume "
f5914a46 3439 "link for reset (errno=%d)\n", rc);
f5914a46
TH
3440 }
3441
3442 /* Wait for !BSY if the controller can wait for the first D2H
3443 * Reg FIS and we don't know that no device is attached.
3444 */
0c88758b 3445 if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) {
b8cffc6a 3446 rc = ata_wait_ready(ap, deadline);
6dffaf61 3447 if (rc && rc != -ENODEV) {
cc0680a5 3448 ata_link_printk(link, KERN_WARNING, "device not ready "
b8cffc6a
TH
3449 "(errno=%d), forcing hardreset\n", rc);
3450 ehc->i.action |= ATA_EH_HARDRESET;
3451 }
3452 }
f5914a46
TH
3453
3454 return 0;
3455}
3456
c2bd5804
TH
3457/**
3458 * ata_std_softreset - reset host port via ATA SRST
cc0680a5 3459 * @link: ATA link to reset
c2bd5804 3460 * @classes: resulting classes of attached devices
d4b2bab4 3461 * @deadline: deadline jiffies for the operation
c2bd5804 3462 *
52783c5d 3463 * Reset host port using ATA SRST.
c2bd5804
TH
3464 *
3465 * LOCKING:
3466 * Kernel thread context (may sleep)
3467 *
3468 * RETURNS:
3469 * 0 on success, -errno otherwise.
3470 */
cc0680a5 3471int ata_std_softreset(struct ata_link *link, unsigned int *classes,
d4b2bab4 3472 unsigned long deadline)
c2bd5804 3473{
cc0680a5 3474 struct ata_port *ap = link->ap;
c2bd5804 3475 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
d4b2bab4
TH
3476 unsigned int devmask = 0;
3477 int rc;
c2bd5804
TH
3478 u8 err;
3479
3480 DPRINTK("ENTER\n");
3481
936fd732 3482 if (ata_link_offline(link)) {
3a39746a
TH
3483 classes[0] = ATA_DEV_NONE;
3484 goto out;
3485 }
3486
c2bd5804
TH
3487 /* determine if device 0/1 are present */
3488 if (ata_devchk(ap, 0))
3489 devmask |= (1 << 0);
3490 if (slave_possible && ata_devchk(ap, 1))
3491 devmask |= (1 << 1);
3492
c2bd5804
TH
3493 /* select device 0 again */
3494 ap->ops->dev_select(ap, 0);
3495
3496 /* issue bus reset */
3497 DPRINTK("about to softreset, devmask=%x\n", devmask);
d4b2bab4 3498 rc = ata_bus_softreset(ap, devmask, deadline);
9b89391c 3499 /* if link is occupied, -ENODEV too is an error */
936fd732 3500 if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
cc0680a5 3501 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
d4b2bab4 3502 return rc;
c2bd5804
TH
3503 }
3504
3505 /* determine by signature whether we have ATA or ATAPI devices */
3f19859e
TH
3506 classes[0] = ata_dev_try_classify(&link->device[0],
3507 devmask & (1 << 0), &err);
c2bd5804 3508 if (slave_possible && err != 0x81)
3f19859e
TH
3509 classes[1] = ata_dev_try_classify(&link->device[1],
3510 devmask & (1 << 1), &err);
c2bd5804 3511
3a39746a 3512 out:
c2bd5804
TH
3513 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3514 return 0;
3515}
3516
3517/**
cc0680a5
TH
3518 * sata_link_hardreset - reset link via SATA phy reset
3519 * @link: link to reset
b6103f6d 3520 * @timing: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3521 * @deadline: deadline jiffies for the operation
c2bd5804 3522 *
cc0680a5 3523 * SATA phy-reset @link using DET bits of SControl register.
c2bd5804
TH
3524 *
3525 * LOCKING:
3526 * Kernel thread context (may sleep)
3527 *
3528 * RETURNS:
3529 * 0 on success, -errno otherwise.
3530 */
cc0680a5 3531int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
d4b2bab4 3532 unsigned long deadline)
c2bd5804 3533{
852ee16a 3534 u32 scontrol;
81952c54 3535 int rc;
852ee16a 3536
c2bd5804
TH
3537 DPRINTK("ENTER\n");
3538
936fd732 3539 if (sata_set_spd_needed(link)) {
1c3fae4d
TH
3540 /* SATA spec says nothing about how to reconfigure
3541 * spd. To be on the safe side, turn off phy during
3542 * reconfiguration. This works for at least ICH7 AHCI
3543 * and Sil3124.
3544 */
936fd732 3545 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3546 goto out;
81952c54 3547
a34b6fc0 3548 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54 3549
936fd732 3550 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
b6103f6d 3551 goto out;
1c3fae4d 3552
936fd732 3553 sata_set_spd(link);
1c3fae4d
TH
3554 }
3555
3556 /* issue phy wake/reset */
936fd732 3557 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3558 goto out;
81952c54 3559
852ee16a 3560 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54 3561
936fd732 3562 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
b6103f6d 3563 goto out;
c2bd5804 3564
1c3fae4d 3565 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
3566 * 10.4.2 says at least 1 ms.
3567 */
3568 msleep(1);
3569
936fd732
TH
3570 /* bring link back */
3571 rc = sata_link_resume(link, timing, deadline);
b6103f6d
TH
3572 out:
3573 DPRINTK("EXIT, rc=%d\n", rc);
3574 return rc;
3575}
3576
3577/**
3578 * sata_std_hardreset - reset host port via SATA phy reset
cc0680a5 3579 * @link: link to reset
b6103f6d 3580 * @class: resulting class of attached device
d4b2bab4 3581 * @deadline: deadline jiffies for the operation
b6103f6d
TH
3582 *
3583 * SATA phy-reset host port using DET bits of SControl register,
3584 * wait for !BSY and classify the attached device.
3585 *
3586 * LOCKING:
3587 * Kernel thread context (may sleep)
3588 *
3589 * RETURNS:
3590 * 0 on success, -errno otherwise.
3591 */
cc0680a5 3592int sata_std_hardreset(struct ata_link *link, unsigned int *class,
d4b2bab4 3593 unsigned long deadline)
b6103f6d 3594{
cc0680a5 3595 struct ata_port *ap = link->ap;
936fd732 3596 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
b6103f6d
TH
3597 int rc;
3598
3599 DPRINTK("ENTER\n");
3600
3601 /* do hardreset */
cc0680a5 3602 rc = sata_link_hardreset(link, timing, deadline);
b6103f6d 3603 if (rc) {
cc0680a5 3604 ata_link_printk(link, KERN_ERR,
b6103f6d
TH
3605 "COMRESET failed (errno=%d)\n", rc);
3606 return rc;
3607 }
c2bd5804 3608
c2bd5804 3609 /* TODO: phy layer with polling, timeouts, etc. */
936fd732 3610 if (ata_link_offline(link)) {
c2bd5804
TH
3611 *class = ATA_DEV_NONE;
3612 DPRINTK("EXIT, link offline\n");
3613 return 0;
3614 }
3615
34fee227
TH
3616 /* wait a while before checking status, see SRST for more info */
3617 msleep(150);
3618
d4b2bab4 3619 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3620 /* link occupied, -ENODEV too is an error */
3621 if (rc) {
cc0680a5 3622 ata_link_printk(link, KERN_ERR,
d4b2bab4
TH
3623 "COMRESET failed (errno=%d)\n", rc);
3624 return rc;
c2bd5804
TH
3625 }
3626
3a39746a
TH
3627 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3628
3f19859e 3629 *class = ata_dev_try_classify(link->device, 1, NULL);
c2bd5804
TH
3630
3631 DPRINTK("EXIT, class=%u\n", *class);
3632 return 0;
3633}
3634
3635/**
3636 * ata_std_postreset - standard postreset callback
cc0680a5 3637 * @link: the target ata_link
c2bd5804
TH
3638 * @classes: classes of attached devices
3639 *
3640 * This function is invoked after a successful reset. Note that
3641 * the device might have been reset more than once using
3642 * different reset methods before postreset is invoked.
c2bd5804 3643 *
c2bd5804
TH
3644 * LOCKING:
3645 * Kernel thread context (may sleep)
3646 */
cc0680a5 3647void ata_std_postreset(struct ata_link *link, unsigned int *classes)
c2bd5804 3648{
cc0680a5 3649 struct ata_port *ap = link->ap;
dc2b3515
TH
3650 u32 serror;
3651
c2bd5804
TH
3652 DPRINTK("ENTER\n");
3653
c2bd5804 3654 /* print link status */
936fd732 3655 sata_print_link_status(link);
c2bd5804 3656
dc2b3515 3657 /* clear SError */
936fd732
TH
3658 if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
3659 sata_scr_write(link, SCR_ERROR, serror);
dc2b3515 3660
c2bd5804
TH
3661 /* is double-select really necessary? */
3662 if (classes[0] != ATA_DEV_NONE)
3663 ap->ops->dev_select(ap, 1);
3664 if (classes[1] != ATA_DEV_NONE)
3665 ap->ops->dev_select(ap, 0);
3666
3a39746a
TH
3667 /* bail out if no device is present */
3668 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3669 DPRINTK("EXIT, no device\n");
3670 return;
3671 }
3672
3673 /* set up device control */
0d5ff566
TH
3674 if (ap->ioaddr.ctl_addr)
3675 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
c2bd5804
TH
3676
3677 DPRINTK("EXIT\n");
3678}
3679
623a3128
TH
3680/**
3681 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3682 * @dev: device to compare against
3683 * @new_class: class of the new device
3684 * @new_id: IDENTIFY page of the new device
3685 *
3686 * Compare @new_class and @new_id against @dev and determine
3687 * whether @dev is the device indicated by @new_class and
3688 * @new_id.
3689 *
3690 * LOCKING:
3691 * None.
3692 *
3693 * RETURNS:
3694 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3695 */
3373efd8
TH
3696static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3697 const u16 *new_id)
623a3128
TH
3698{
3699 const u16 *old_id = dev->id;
a0cf733b
TH
3700 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3701 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
3702
3703 if (dev->class != new_class) {
f15a1daf
TH
3704 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3705 dev->class, new_class);
623a3128
TH
3706 return 0;
3707 }
3708
a0cf733b
TH
3709 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3710 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3711 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3712 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
3713
3714 if (strcmp(model[0], model[1])) {
f15a1daf
TH
3715 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3716 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
3717 return 0;
3718 }
3719
3720 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
3721 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3722 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
3723 return 0;
3724 }
3725
623a3128
TH
3726 return 1;
3727}
3728
3729/**
fe30911b 3730 * ata_dev_reread_id - Re-read IDENTIFY data
3fae450c 3731 * @dev: target ATA device
bff04647 3732 * @readid_flags: read ID flags
623a3128
TH
3733 *
3734 * Re-read IDENTIFY page and make sure @dev is still attached to
3735 * the port.
3736 *
3737 * LOCKING:
3738 * Kernel thread context (may sleep)
3739 *
3740 * RETURNS:
3741 * 0 on success, negative errno otherwise
3742 */
fe30911b 3743int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
623a3128 3744{
5eb45c02 3745 unsigned int class = dev->class;
9af5c9c9 3746 u16 *id = (void *)dev->link->ap->sector_buf;
623a3128
TH
3747 int rc;
3748
fe635c7e 3749 /* read ID data */
bff04647 3750 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128 3751 if (rc)
fe30911b 3752 return rc;
623a3128
TH
3753
3754 /* is the device still there? */
fe30911b
TH
3755 if (!ata_dev_same_device(dev, class, id))
3756 return -ENODEV;
623a3128 3757
fe635c7e 3758 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
fe30911b
TH
3759 return 0;
3760}
3761
3762/**
3763 * ata_dev_revalidate - Revalidate ATA device
3764 * @dev: device to revalidate
422c9daa 3765 * @new_class: new class code
fe30911b
TH
3766 * @readid_flags: read ID flags
3767 *
3768 * Re-read IDENTIFY page, make sure @dev is still attached to the
3769 * port and reconfigure it according to the new IDENTIFY page.
3770 *
3771 * LOCKING:
3772 * Kernel thread context (may sleep)
3773 *
3774 * RETURNS:
3775 * 0 on success, negative errno otherwise
3776 */
422c9daa
TH
3777int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3778 unsigned int readid_flags)
fe30911b 3779{
6ddcd3b0 3780 u64 n_sectors = dev->n_sectors;
fe30911b
TH
3781 int rc;
3782
3783 if (!ata_dev_enabled(dev))
3784 return -ENODEV;
3785
422c9daa
TH
3786 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3787 if (ata_class_enabled(new_class) &&
3788 new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
3789 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
3790 dev->class, new_class);
3791 rc = -ENODEV;
3792 goto fail;
3793 }
3794
fe30911b
TH
3795 /* re-read ID */
3796 rc = ata_dev_reread_id(dev, readid_flags);
3797 if (rc)
3798 goto fail;
623a3128
TH
3799
3800 /* configure device according to the new ID */
efdaedc4 3801 rc = ata_dev_configure(dev);
6ddcd3b0
TH
3802 if (rc)
3803 goto fail;
3804
3805 /* verify n_sectors hasn't changed */
b54eebd6
TH
3806 if (dev->class == ATA_DEV_ATA && n_sectors &&
3807 dev->n_sectors != n_sectors) {
6ddcd3b0
TH
3808 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
3809 "%llu != %llu\n",
3810 (unsigned long long)n_sectors,
3811 (unsigned long long)dev->n_sectors);
8270bec4
TH
3812
3813 /* restore original n_sectors */
3814 dev->n_sectors = n_sectors;
3815
6ddcd3b0
TH
3816 rc = -ENODEV;
3817 goto fail;
3818 }
3819
3820 return 0;
623a3128
TH
3821
3822 fail:
f15a1daf 3823 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
3824 return rc;
3825}
3826
6919a0a6
AC
3827struct ata_blacklist_entry {
3828 const char *model_num;
3829 const char *model_rev;
3830 unsigned long horkage;
3831};
3832
3833static const struct ata_blacklist_entry ata_device_blacklist [] = {
3834 /* Devices with DMA related problems under Linux */
3835 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
3836 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
3837 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
3838 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
3839 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
3840 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
3841 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
3842 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
3843 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
3844 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
3845 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
3846 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
3847 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
3848 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
3849 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
3850 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
3851 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
3852 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
3853 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
3854 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
3855 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
3856 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
3857 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
3858 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
3859 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
3860 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
3861 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
3862 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
3863 { "SAMSUNG CD-ROM SN-124","N001", ATA_HORKAGE_NODMA },
39f19886 3864 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
5acd50f6 3865 { "IOMEGA ZIP 250 ATAPI", NULL, ATA_HORKAGE_NODMA }, /* temporary fix */
39ce7128
TH
3866 { "IOMEGA ZIP 250 ATAPI Floppy",
3867 NULL, ATA_HORKAGE_NODMA },
6919a0a6 3868
18d6e9d5 3869 /* Weird ATAPI devices */
40a1d531 3870 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
18d6e9d5 3871
6919a0a6
AC
3872 /* Devices we expect to fail diagnostics */
3873
3874 /* Devices where NCQ should be avoided */
3875 /* NCQ is slow */
3876 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
09125ea6
TH
3877 /* http://thread.gmane.org/gmane.linux.ide/14907 */
3878 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30 3879 /* NCQ is broken */
539cc7c7 3880 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
0e3dbc01 3881 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
0b0a43e0
DM
3882 { "HITACHI HDS7250SASUN500G*", NULL, ATA_HORKAGE_NONCQ },
3883 { "HITACHI HDS7225SBSUN250G*", NULL, ATA_HORKAGE_NONCQ },
539cc7c7 3884
36e337d0
RH
3885 /* Blacklist entries taken from Silicon Image 3124/3132
3886 Windows driver .inf file - also several Linux problem reports */
3887 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
3888 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
3889 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
bd9c5a39
TH
3890 /* Drives which do spurious command completion */
3891 { "HTS541680J9SA00", "SB2IC7EP", ATA_HORKAGE_NONCQ, },
2f8fcebb 3892 { "HTS541612J9SA00", "SBDIC7JP", ATA_HORKAGE_NONCQ, },
e14cbfa6 3893 { "Hitachi HTS541616J9SA00", "SB4OC70P", ATA_HORKAGE_NONCQ, },
2f8fcebb 3894 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
7f567620 3895 { "WDC WD3200AAJS-00RYA0", "12.01B01", ATA_HORKAGE_NONCQ, },
a520f261 3896 { "FUJITSU MHV2080BH", "00840028", ATA_HORKAGE_NONCQ, },
7f567620 3897 { "ST9120822AS", "3.CLF", ATA_HORKAGE_NONCQ, },
3fb6589c 3898 { "ST9160821AS", "3.CLF", ATA_HORKAGE_NONCQ, },
7f567620
TH
3899 { "ST3160812AS", "3.ADJ", ATA_HORKAGE_NONCQ, },
3900 { "ST980813AS", "3.ADB", ATA_HORKAGE_NONCQ, },
5d6aca8d 3901 { "SAMSUNG HD401LJ", "ZZ100-15", ATA_HORKAGE_NONCQ, },
6919a0a6 3902
16c55b03
TH
3903 /* devices which puke on READ_NATIVE_MAX */
3904 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
3905 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
3906 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
3907 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
6919a0a6 3908
93328e11
AC
3909 /* Devices which report 1 sector over size HPA */
3910 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
3911 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
3912
6919a0a6
AC
3913 /* End Marker */
3914 { }
1da177e4 3915};
2e9edbf8 3916
539cc7c7
JG
3917int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
3918{
3919 const char *p;
3920 int len;
3921
3922 /*
3923 * check for trailing wildcard: *\0
3924 */
3925 p = strchr(patt, wildchar);
3926 if (p && ((*(p + 1)) == 0))
3927 len = p - patt;
3928 else
3929 len = strlen(name);
3930
3931 return strncmp(patt, name, len);
3932}
3933
75683fe7 3934static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
1da177e4 3935{
8bfa79fc
TH
3936 unsigned char model_num[ATA_ID_PROD_LEN + 1];
3937 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 3938 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 3939
8bfa79fc
TH
3940 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
3941 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 3942
6919a0a6 3943 while (ad->model_num) {
539cc7c7 3944 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
6919a0a6
AC
3945 if (ad->model_rev == NULL)
3946 return ad->horkage;
539cc7c7 3947 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
6919a0a6 3948 return ad->horkage;
f4b15fef 3949 }
6919a0a6 3950 ad++;
f4b15fef 3951 }
1da177e4
LT
3952 return 0;
3953}
3954
6919a0a6
AC
3955static int ata_dma_blacklisted(const struct ata_device *dev)
3956{
3957 /* We don't support polling DMA.
3958 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
3959 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
3960 */
9af5c9c9 3961 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
6919a0a6
AC
3962 (dev->flags & ATA_DFLAG_CDB_INTR))
3963 return 1;
75683fe7 3964 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
6919a0a6
AC
3965}
3966
a6d5a51c
TH
3967/**
3968 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
3969 * @dev: Device to compute xfermask for
3970 *
acf356b1
TH
3971 * Compute supported xfermask of @dev and store it in
3972 * dev->*_mask. This function is responsible for applying all
3973 * known limits including host controller limits, device
3974 * blacklist, etc...
a6d5a51c
TH
3975 *
3976 * LOCKING:
3977 * None.
a6d5a51c 3978 */
3373efd8 3979static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 3980{
9af5c9c9
TH
3981 struct ata_link *link = dev->link;
3982 struct ata_port *ap = link->ap;
cca3974e 3983 struct ata_host *host = ap->host;
a6d5a51c 3984 unsigned long xfer_mask;
1da177e4 3985
37deecb5 3986 /* controller modes available */
565083e1
TH
3987 xfer_mask = ata_pack_xfermask(ap->pio_mask,
3988 ap->mwdma_mask, ap->udma_mask);
3989
8343f889 3990 /* drive modes available */
37deecb5
TH
3991 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
3992 dev->mwdma_mask, dev->udma_mask);
3993 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 3994
b352e57d
AC
3995 /*
3996 * CFA Advanced TrueIDE timings are not allowed on a shared
3997 * cable
3998 */
3999 if (ata_dev_pair(dev)) {
4000 /* No PIO5 or PIO6 */
4001 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4002 /* No MWDMA3 or MWDMA 4 */
4003 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4004 }
4005
37deecb5
TH
4006 if (ata_dma_blacklisted(dev)) {
4007 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
4008 ata_dev_printk(dev, KERN_WARNING,
4009 "device is on DMA blacklist, disabling DMA\n");
37deecb5 4010 }
a6d5a51c 4011
14d66ab7
PV
4012 if ((host->flags & ATA_HOST_SIMPLEX) &&
4013 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5
TH
4014 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4015 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4016 "other device, disabling DMA\n");
5444a6f4 4017 }
565083e1 4018
e424675f
JG
4019 if (ap->flags & ATA_FLAG_NO_IORDY)
4020 xfer_mask &= ata_pio_mask_no_iordy(dev);
4021
5444a6f4 4022 if (ap->ops->mode_filter)
a76b62ca 4023 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
5444a6f4 4024
8343f889
RH
4025 /* Apply cable rule here. Don't apply it early because when
4026 * we handle hot plug the cable type can itself change.
4027 * Check this last so that we know if the transfer rate was
4028 * solely limited by the cable.
4029 * Unknown or 80 wire cables reported host side are checked
4030 * drive side as well. Cases where we know a 40wire cable
4031 * is used safely for 80 are not checked here.
4032 */
4033 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4034 /* UDMA/44 or higher would be available */
4035 if((ap->cbl == ATA_CBL_PATA40) ||
4036 (ata_drive_40wire(dev->id) &&
4037 (ap->cbl == ATA_CBL_PATA_UNK ||
4038 ap->cbl == ATA_CBL_PATA80))) {
4039 ata_dev_printk(dev, KERN_WARNING,
4040 "limited to UDMA/33 due to 40-wire cable\n");
4041 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4042 }
4043
565083e1
TH
4044 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4045 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
4046}
4047
1da177e4
LT
4048/**
4049 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
4050 * @dev: Device to which command will be sent
4051 *
780a87f7
JG
4052 * Issue SET FEATURES - XFER MODE command to device @dev
4053 * on port @ap.
4054 *
1da177e4 4055 * LOCKING:
0cba632b 4056 * PCI/etc. bus probe sem.
83206a29
TH
4057 *
4058 * RETURNS:
4059 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
4060 */
4061
3373efd8 4062static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 4063{
a0123703 4064 struct ata_taskfile tf;
83206a29 4065 unsigned int err_mask;
1da177e4
LT
4066
4067 /* set up set-features taskfile */
4068 DPRINTK("set features - xfer mode\n");
4069
464cf177
TH
4070 /* Some controllers and ATAPI devices show flaky interrupt
4071 * behavior after setting xfer mode. Use polling instead.
4072 */
3373efd8 4073 ata_tf_init(dev, &tf);
a0123703
TH
4074 tf.command = ATA_CMD_SET_FEATURES;
4075 tf.feature = SETFEATURES_XFER;
464cf177 4076 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
a0123703
TH
4077 tf.protocol = ATA_PROT_NODATA;
4078 tf.nsect = dev->xfer_mode;
1da177e4 4079
3373efd8 4080 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
9f45cbd3
KCA
4081
4082 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4083 return err_mask;
4084}
4085
4086/**
4087 * ata_dev_set_AN - Issue SET FEATURES - SATA FEATURES
4088 * @dev: Device to which command will be sent
4089 * @enable: Whether to enable or disable the feature
4090 *
4091 * Issue SET FEATURES - SATA FEATURES command to device @dev
4092 * on port @ap with sector count set to indicate Asynchronous
4093 * Notification feature
4094 *
4095 * LOCKING:
4096 * PCI/etc. bus probe sem.
4097 *
4098 * RETURNS:
4099 * 0 on success, AC_ERR_* mask otherwise.
4100 */
4101static unsigned int ata_dev_set_AN(struct ata_device *dev, u8 enable)
4102{
4103 struct ata_taskfile tf;
4104 unsigned int err_mask;
4105
4106 /* set up set-features taskfile */
4107 DPRINTK("set features - SATA features\n");
4108
4109 ata_tf_init(dev, &tf);
4110 tf.command = ATA_CMD_SET_FEATURES;
4111 tf.feature = enable;
4112 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4113 tf.protocol = ATA_PROT_NODATA;
4114 tf.nsect = SATA_AN;
4115
4116 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
1da177e4 4117
83206a29
TH
4118 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4119 return err_mask;
1da177e4
LT
4120}
4121
8bf62ece
AL
4122/**
4123 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 4124 * @dev: Device to which command will be sent
e2a7f77a
RD
4125 * @heads: Number of heads (taskfile parameter)
4126 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
4127 *
4128 * LOCKING:
6aff8f1f
TH
4129 * Kernel thread context (may sleep)
4130 *
4131 * RETURNS:
4132 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 4133 */
3373efd8
TH
4134static unsigned int ata_dev_init_params(struct ata_device *dev,
4135 u16 heads, u16 sectors)
8bf62ece 4136{
a0123703 4137 struct ata_taskfile tf;
6aff8f1f 4138 unsigned int err_mask;
8bf62ece
AL
4139
4140 /* Number of sectors per track 1-255. Number of heads 1-16 */
4141 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 4142 return AC_ERR_INVALID;
8bf62ece
AL
4143
4144 /* set up init dev params taskfile */
4145 DPRINTK("init dev params \n");
4146
3373efd8 4147 ata_tf_init(dev, &tf);
a0123703
TH
4148 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4149 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4150 tf.protocol = ATA_PROT_NODATA;
4151 tf.nsect = sectors;
4152 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 4153
3373efd8 4154 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0);
18b2466c
AC
4155 /* A clean abort indicates an original or just out of spec drive
4156 and we should continue as we issue the setup based on the
4157 drive reported working geometry */
4158 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4159 err_mask = 0;
8bf62ece 4160
6aff8f1f
TH
4161 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4162 return err_mask;
8bf62ece
AL
4163}
4164
1da177e4 4165/**
0cba632b
JG
4166 * ata_sg_clean - Unmap DMA memory associated with command
4167 * @qc: Command containing DMA memory to be released
4168 *
4169 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
4170 *
4171 * LOCKING:
cca3974e 4172 * spin_lock_irqsave(host lock)
1da177e4 4173 */
70e6ad0c 4174void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
4175{
4176 struct ata_port *ap = qc->ap;
cedc9a47 4177 struct scatterlist *sg = qc->__sg;
1da177e4 4178 int dir = qc->dma_dir;
cedc9a47 4179 void *pad_buf = NULL;
1da177e4 4180
a4631474
TH
4181 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
4182 WARN_ON(sg == NULL);
1da177e4
LT
4183
4184 if (qc->flags & ATA_QCFLAG_SINGLE)
f131883e 4185 WARN_ON(qc->n_elem > 1);
1da177e4 4186
2c13b7ce 4187 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 4188
cedc9a47
JG
4189 /* if we padded the buffer out to 32-bit bound, and data
4190 * xfer direction is from-device, we must copy from the
4191 * pad buffer back into the supplied buffer
4192 */
4193 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4194 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4195
4196 if (qc->flags & ATA_QCFLAG_SG) {
e1410f2d 4197 if (qc->n_elem)
2f1f610b 4198 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
cedc9a47
JG
4199 /* restore last sg */
4200 sg[qc->orig_n_elem - 1].length += qc->pad_len;
4201 if (pad_buf) {
4202 struct scatterlist *psg = &qc->pad_sgent;
4203 void *addr = kmap_atomic(psg->page, KM_IRQ0);
4204 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
dfa15988 4205 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4206 }
4207 } else {
2e242fa9 4208 if (qc->n_elem)
2f1f610b 4209 dma_unmap_single(ap->dev,
e1410f2d
JG
4210 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
4211 dir);
cedc9a47
JG
4212 /* restore sg */
4213 sg->length += qc->pad_len;
4214 if (pad_buf)
4215 memcpy(qc->buf_virt + sg->length - qc->pad_len,
4216 pad_buf, qc->pad_len);
4217 }
1da177e4
LT
4218
4219 qc->flags &= ~ATA_QCFLAG_DMAMAP;
cedc9a47 4220 qc->__sg = NULL;
1da177e4
LT
4221}
4222
4223/**
4224 * ata_fill_sg - Fill PCI IDE PRD table
4225 * @qc: Metadata associated with taskfile to be transferred
4226 *
780a87f7
JG
4227 * Fill PCI IDE PRD (scatter-gather) table with segments
4228 * associated with the current disk command.
4229 *
1da177e4 4230 * LOCKING:
cca3974e 4231 * spin_lock_irqsave(host lock)
1da177e4
LT
4232 *
4233 */
4234static void ata_fill_sg(struct ata_queued_cmd *qc)
4235{
1da177e4 4236 struct ata_port *ap = qc->ap;
cedc9a47
JG
4237 struct scatterlist *sg;
4238 unsigned int idx;
1da177e4 4239
a4631474 4240 WARN_ON(qc->__sg == NULL);
f131883e 4241 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1da177e4
LT
4242
4243 idx = 0;
cedc9a47 4244 ata_for_each_sg(sg, qc) {
1da177e4
LT
4245 u32 addr, offset;
4246 u32 sg_len, len;
4247
4248 /* determine if physical DMA addr spans 64K boundary.
4249 * Note h/w doesn't support 64-bit, so we unconditionally
4250 * truncate dma_addr_t to u32.
4251 */
4252 addr = (u32) sg_dma_address(sg);
4253 sg_len = sg_dma_len(sg);
4254
4255 while (sg_len) {
4256 offset = addr & 0xffff;
4257 len = sg_len;
4258 if ((offset + sg_len) > 0x10000)
4259 len = 0x10000 - offset;
4260
4261 ap->prd[idx].addr = cpu_to_le32(addr);
4262 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
4263 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4264
4265 idx++;
4266 sg_len -= len;
4267 addr += len;
4268 }
4269 }
4270
4271 if (idx)
4272 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4273}
b9a4197e 4274
d26fc955
AC
4275/**
4276 * ata_fill_sg_dumb - Fill PCI IDE PRD table
4277 * @qc: Metadata associated with taskfile to be transferred
4278 *
4279 * Fill PCI IDE PRD (scatter-gather) table with segments
4280 * associated with the current disk command. Perform the fill
4281 * so that we avoid writing any length 64K records for
4282 * controllers that don't follow the spec.
4283 *
4284 * LOCKING:
4285 * spin_lock_irqsave(host lock)
4286 *
4287 */
4288static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4289{
4290 struct ata_port *ap = qc->ap;
4291 struct scatterlist *sg;
4292 unsigned int idx;
4293
4294 WARN_ON(qc->__sg == NULL);
4295 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4296
4297 idx = 0;
4298 ata_for_each_sg(sg, qc) {
4299 u32 addr, offset;
4300 u32 sg_len, len, blen;
4301
4302 /* determine if physical DMA addr spans 64K boundary.
4303 * Note h/w doesn't support 64-bit, so we unconditionally
4304 * truncate dma_addr_t to u32.
4305 */
4306 addr = (u32) sg_dma_address(sg);
4307 sg_len = sg_dma_len(sg);
4308
4309 while (sg_len) {
4310 offset = addr & 0xffff;
4311 len = sg_len;
4312 if ((offset + sg_len) > 0x10000)
4313 len = 0x10000 - offset;
4314
4315 blen = len & 0xffff;
4316 ap->prd[idx].addr = cpu_to_le32(addr);
4317 if (blen == 0) {
4318 /* Some PATA chipsets like the CS5530 can't
4319 cope with 0x0000 meaning 64K as the spec says */
4320 ap->prd[idx].flags_len = cpu_to_le32(0x8000);
4321 blen = 0x8000;
4322 ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000);
4323 }
4324 ap->prd[idx].flags_len = cpu_to_le32(blen);
4325 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4326
4327 idx++;
4328 sg_len -= len;
4329 addr += len;
4330 }
4331 }
4332
4333 if (idx)
4334 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4335}
4336
1da177e4
LT
4337/**
4338 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4339 * @qc: Metadata associated with taskfile to check
4340 *
780a87f7
JG
4341 * Allow low-level driver to filter ATA PACKET commands, returning
4342 * a status indicating whether or not it is OK to use DMA for the
4343 * supplied PACKET command.
4344 *
1da177e4 4345 * LOCKING:
cca3974e 4346 * spin_lock_irqsave(host lock)
0cba632b 4347 *
1da177e4
LT
4348 * RETURNS: 0 when ATAPI DMA can be used
4349 * nonzero otherwise
4350 */
4351int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4352{
4353 struct ata_port *ap = qc->ap;
b9a4197e
TH
4354
4355 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4356 * few ATAPI devices choke on such DMA requests.
4357 */
4358 if (unlikely(qc->nbytes & 15))
4359 return 1;
6f23a31d 4360
1da177e4 4361 if (ap->ops->check_atapi_dma)
b9a4197e 4362 return ap->ops->check_atapi_dma(qc);
1da177e4 4363
b9a4197e 4364 return 0;
1da177e4 4365}
b9a4197e 4366
31cc23b3
TH
4367/**
4368 * ata_std_qc_defer - Check whether a qc needs to be deferred
4369 * @qc: ATA command in question
4370 *
4371 * Non-NCQ commands cannot run with any other command, NCQ or
4372 * not. As upper layer only knows the queue depth, we are
4373 * responsible for maintaining exclusion. This function checks
4374 * whether a new command @qc can be issued.
4375 *
4376 * LOCKING:
4377 * spin_lock_irqsave(host lock)
4378 *
4379 * RETURNS:
4380 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4381 */
4382int ata_std_qc_defer(struct ata_queued_cmd *qc)
4383{
4384 struct ata_link *link = qc->dev->link;
4385
4386 if (qc->tf.protocol == ATA_PROT_NCQ) {
4387 if (!ata_tag_valid(link->active_tag))
4388 return 0;
4389 } else {
4390 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4391 return 0;
4392 }
4393
4394 return ATA_DEFER_LINK;
4395}
4396
1da177e4
LT
4397/**
4398 * ata_qc_prep - Prepare taskfile for submission
4399 * @qc: Metadata associated with taskfile to be prepared
4400 *
780a87f7
JG
4401 * Prepare ATA taskfile for submission.
4402 *
1da177e4 4403 * LOCKING:
cca3974e 4404 * spin_lock_irqsave(host lock)
1da177e4
LT
4405 */
4406void ata_qc_prep(struct ata_queued_cmd *qc)
4407{
4408 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4409 return;
4410
4411 ata_fill_sg(qc);
4412}
4413
d26fc955
AC
4414/**
4415 * ata_dumb_qc_prep - Prepare taskfile for submission
4416 * @qc: Metadata associated with taskfile to be prepared
4417 *
4418 * Prepare ATA taskfile for submission.
4419 *
4420 * LOCKING:
4421 * spin_lock_irqsave(host lock)
4422 */
4423void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4424{
4425 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4426 return;
4427
4428 ata_fill_sg_dumb(qc);
4429}
4430
e46834cd
BK
4431void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4432
0cba632b
JG
4433/**
4434 * ata_sg_init_one - Associate command with memory buffer
4435 * @qc: Command to be associated
4436 * @buf: Memory buffer
4437 * @buflen: Length of memory buffer, in bytes.
4438 *
4439 * Initialize the data-related elements of queued_cmd @qc
4440 * to point to a single memory buffer, @buf of byte length @buflen.
4441 *
4442 * LOCKING:
cca3974e 4443 * spin_lock_irqsave(host lock)
0cba632b
JG
4444 */
4445
1da177e4
LT
4446void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4447{
1da177e4
LT
4448 qc->flags |= ATA_QCFLAG_SINGLE;
4449
cedc9a47 4450 qc->__sg = &qc->sgent;
1da177e4 4451 qc->n_elem = 1;
cedc9a47 4452 qc->orig_n_elem = 1;
1da177e4 4453 qc->buf_virt = buf;
233277ca 4454 qc->nbytes = buflen;
1da177e4 4455
61c0596c 4456 sg_init_one(&qc->sgent, buf, buflen);
1da177e4
LT
4457}
4458
0cba632b
JG
4459/**
4460 * ata_sg_init - Associate command with scatter-gather table.
4461 * @qc: Command to be associated
4462 * @sg: Scatter-gather table.
4463 * @n_elem: Number of elements in s/g table.
4464 *
4465 * Initialize the data-related elements of queued_cmd @qc
4466 * to point to a scatter-gather table @sg, containing @n_elem
4467 * elements.
4468 *
4469 * LOCKING:
cca3974e 4470 * spin_lock_irqsave(host lock)
0cba632b
JG
4471 */
4472
1da177e4
LT
4473void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4474 unsigned int n_elem)
4475{
4476 qc->flags |= ATA_QCFLAG_SG;
cedc9a47 4477 qc->__sg = sg;
1da177e4 4478 qc->n_elem = n_elem;
cedc9a47 4479 qc->orig_n_elem = n_elem;
1da177e4
LT
4480}
4481
4482/**
0cba632b
JG
4483 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4484 * @qc: Command with memory buffer to be mapped.
4485 *
4486 * DMA-map the memory buffer associated with queued_cmd @qc.
1da177e4
LT
4487 *
4488 * LOCKING:
cca3974e 4489 * spin_lock_irqsave(host lock)
1da177e4
LT
4490 *
4491 * RETURNS:
0cba632b 4492 * Zero on success, negative on error.
1da177e4
LT
4493 */
4494
4495static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4496{
4497 struct ata_port *ap = qc->ap;
4498 int dir = qc->dma_dir;
cedc9a47 4499 struct scatterlist *sg = qc->__sg;
1da177e4 4500 dma_addr_t dma_address;
2e242fa9 4501 int trim_sg = 0;
1da177e4 4502
cedc9a47
JG
4503 /* we must lengthen transfers to end on a 32-bit boundary */
4504 qc->pad_len = sg->length & 3;
4505 if (qc->pad_len) {
4506 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4507 struct scatterlist *psg = &qc->pad_sgent;
4508
a4631474 4509 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4510
4511 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4512
4513 if (qc->tf.flags & ATA_TFLAG_WRITE)
4514 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
4515 qc->pad_len);
4516
4517 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4518 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4519 /* trim sg */
4520 sg->length -= qc->pad_len;
2e242fa9
TH
4521 if (sg->length == 0)
4522 trim_sg = 1;
cedc9a47
JG
4523
4524 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4525 sg->length, qc->pad_len);
4526 }
4527
2e242fa9
TH
4528 if (trim_sg) {
4529 qc->n_elem--;
e1410f2d
JG
4530 goto skip_map;
4531 }
4532
2f1f610b 4533 dma_address = dma_map_single(ap->dev, qc->buf_virt,
32529e01 4534 sg->length, dir);
537a95d9
TH
4535 if (dma_mapping_error(dma_address)) {
4536 /* restore sg */
4537 sg->length += qc->pad_len;
1da177e4 4538 return -1;
537a95d9 4539 }
1da177e4
LT
4540
4541 sg_dma_address(sg) = dma_address;
32529e01 4542 sg_dma_len(sg) = sg->length;
1da177e4 4543
2e242fa9 4544skip_map:
1da177e4
LT
4545 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
4546 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4547
4548 return 0;
4549}
4550
4551/**
0cba632b
JG
4552 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4553 * @qc: Command with scatter-gather table to be mapped.
4554 *
4555 * DMA-map the scatter-gather table associated with queued_cmd @qc.
1da177e4
LT
4556 *
4557 * LOCKING:
cca3974e 4558 * spin_lock_irqsave(host lock)
1da177e4
LT
4559 *
4560 * RETURNS:
0cba632b 4561 * Zero on success, negative on error.
1da177e4
LT
4562 *
4563 */
4564
4565static int ata_sg_setup(struct ata_queued_cmd *qc)
4566{
4567 struct ata_port *ap = qc->ap;
cedc9a47
JG
4568 struct scatterlist *sg = qc->__sg;
4569 struct scatterlist *lsg = &sg[qc->n_elem - 1];
e1410f2d 4570 int n_elem, pre_n_elem, dir, trim_sg = 0;
1da177e4 4571
44877b4e 4572 VPRINTK("ENTER, ata%u\n", ap->print_id);
a4631474 4573 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
1da177e4 4574
cedc9a47
JG
4575 /* we must lengthen transfers to end on a 32-bit boundary */
4576 qc->pad_len = lsg->length & 3;
4577 if (qc->pad_len) {
4578 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4579 struct scatterlist *psg = &qc->pad_sgent;
4580 unsigned int offset;
4581
a4631474 4582 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4583
4584 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4585
4586 /*
4587 * psg->page/offset are used to copy to-be-written
4588 * data in this function or read data in ata_sg_clean.
4589 */
4590 offset = lsg->offset + lsg->length - qc->pad_len;
4591 psg->page = nth_page(lsg->page, offset >> PAGE_SHIFT);
4592 psg->offset = offset_in_page(offset);
4593
4594 if (qc->tf.flags & ATA_TFLAG_WRITE) {
4595 void *addr = kmap_atomic(psg->page, KM_IRQ0);
4596 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
dfa15988 4597 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4598 }
4599
4600 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4601 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4602 /* trim last sg */
4603 lsg->length -= qc->pad_len;
e1410f2d
JG
4604 if (lsg->length == 0)
4605 trim_sg = 1;
cedc9a47
JG
4606
4607 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4608 qc->n_elem - 1, lsg->length, qc->pad_len);
4609 }
4610
e1410f2d
JG
4611 pre_n_elem = qc->n_elem;
4612 if (trim_sg && pre_n_elem)
4613 pre_n_elem--;
4614
4615 if (!pre_n_elem) {
4616 n_elem = 0;
4617 goto skip_map;
4618 }
4619
1da177e4 4620 dir = qc->dma_dir;
2f1f610b 4621 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
537a95d9
TH
4622 if (n_elem < 1) {
4623 /* restore last sg */
4624 lsg->length += qc->pad_len;
1da177e4 4625 return -1;
537a95d9 4626 }
1da177e4
LT
4627
4628 DPRINTK("%d sg elements mapped\n", n_elem);
4629
e1410f2d 4630skip_map:
1da177e4
LT
4631 qc->n_elem = n_elem;
4632
4633 return 0;
4634}
4635
0baab86b 4636/**
c893a3ae 4637 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
4638 * @buf: Buffer to swap
4639 * @buf_words: Number of 16-bit words in buffer.
4640 *
4641 * Swap halves of 16-bit words if needed to convert from
4642 * little-endian byte order to native cpu byte order, or
4643 * vice-versa.
4644 *
4645 * LOCKING:
6f0ef4fa 4646 * Inherited from caller.
0baab86b 4647 */
1da177e4
LT
4648void swap_buf_le16(u16 *buf, unsigned int buf_words)
4649{
4650#ifdef __BIG_ENDIAN
4651 unsigned int i;
4652
4653 for (i = 0; i < buf_words; i++)
4654 buf[i] = le16_to_cpu(buf[i]);
4655#endif /* __BIG_ENDIAN */
4656}
4657
6ae4cfb5 4658/**
0d5ff566 4659 * ata_data_xfer - Transfer data by PIO
a6b2c5d4 4660 * @adev: device to target
6ae4cfb5
AL
4661 * @buf: data buffer
4662 * @buflen: buffer length
344babaa 4663 * @write_data: read/write
6ae4cfb5
AL
4664 *
4665 * Transfer data from/to the device data register by PIO.
4666 *
4667 * LOCKING:
4668 * Inherited from caller.
6ae4cfb5 4669 */
0d5ff566
TH
4670void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
4671 unsigned int buflen, int write_data)
1da177e4 4672{
9af5c9c9 4673 struct ata_port *ap = adev->link->ap;
6ae4cfb5 4674 unsigned int words = buflen >> 1;
1da177e4 4675
6ae4cfb5 4676 /* Transfer multiple of 2 bytes */
1da177e4 4677 if (write_data)
0d5ff566 4678 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
1da177e4 4679 else
0d5ff566 4680 ioread16_rep(ap->ioaddr.data_addr, buf, words);
6ae4cfb5
AL
4681
4682 /* Transfer trailing 1 byte, if any. */
4683 if (unlikely(buflen & 0x01)) {
4684 u16 align_buf[1] = { 0 };
4685 unsigned char *trailing_buf = buf + buflen - 1;
4686
4687 if (write_data) {
4688 memcpy(align_buf, trailing_buf, 1);
0d5ff566 4689 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
6ae4cfb5 4690 } else {
0d5ff566 4691 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
6ae4cfb5
AL
4692 memcpy(trailing_buf, align_buf, 1);
4693 }
4694 }
1da177e4
LT
4695}
4696
75e99585 4697/**
0d5ff566 4698 * ata_data_xfer_noirq - Transfer data by PIO
75e99585
AC
4699 * @adev: device to target
4700 * @buf: data buffer
4701 * @buflen: buffer length
4702 * @write_data: read/write
4703 *
88574551 4704 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
4705 * transfer with interrupts disabled.
4706 *
4707 * LOCKING:
4708 * Inherited from caller.
4709 */
0d5ff566
TH
4710void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
4711 unsigned int buflen, int write_data)
75e99585
AC
4712{
4713 unsigned long flags;
4714 local_irq_save(flags);
0d5ff566 4715 ata_data_xfer(adev, buf, buflen, write_data);
75e99585
AC
4716 local_irq_restore(flags);
4717}
4718
4719
6ae4cfb5 4720/**
5a5dbd18 4721 * ata_pio_sector - Transfer a sector of data.
6ae4cfb5
AL
4722 * @qc: Command on going
4723 *
5a5dbd18 4724 * Transfer qc->sect_size bytes of data from/to the ATA device.
6ae4cfb5
AL
4725 *
4726 * LOCKING:
4727 * Inherited from caller.
4728 */
4729
1da177e4
LT
4730static void ata_pio_sector(struct ata_queued_cmd *qc)
4731{
4732 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4733 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4734 struct ata_port *ap = qc->ap;
4735 struct page *page;
4736 unsigned int offset;
4737 unsigned char *buf;
4738
5a5dbd18 4739 if (qc->curbytes == qc->nbytes - qc->sect_size)
14be71f4 4740 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4741
4742 page = sg[qc->cursg].page;
726f0785 4743 offset = sg[qc->cursg].offset + qc->cursg_ofs;
1da177e4
LT
4744
4745 /* get the current page and offset */
4746 page = nth_page(page, (offset >> PAGE_SHIFT));
4747 offset %= PAGE_SIZE;
4748
1da177e4
LT
4749 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4750
91b8b313
AL
4751 if (PageHighMem(page)) {
4752 unsigned long flags;
4753
a6b2c5d4 4754 /* FIXME: use a bounce buffer */
91b8b313
AL
4755 local_irq_save(flags);
4756 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4757
91b8b313 4758 /* do the actual data transfer */
5a5dbd18 4759 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
1da177e4 4760
91b8b313
AL
4761 kunmap_atomic(buf, KM_IRQ0);
4762 local_irq_restore(flags);
4763 } else {
4764 buf = page_address(page);
5a5dbd18 4765 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
91b8b313 4766 }
1da177e4 4767
5a5dbd18
ML
4768 qc->curbytes += qc->sect_size;
4769 qc->cursg_ofs += qc->sect_size;
1da177e4 4770
726f0785 4771 if (qc->cursg_ofs == (&sg[qc->cursg])->length) {
1da177e4
LT
4772 qc->cursg++;
4773 qc->cursg_ofs = 0;
4774 }
1da177e4 4775}
1da177e4 4776
07f6f7d0 4777/**
5a5dbd18 4778 * ata_pio_sectors - Transfer one or many sectors.
07f6f7d0
AL
4779 * @qc: Command on going
4780 *
5a5dbd18 4781 * Transfer one or many sectors of data from/to the
07f6f7d0
AL
4782 * ATA device for the DRQ request.
4783 *
4784 * LOCKING:
4785 * Inherited from caller.
4786 */
1da177e4 4787
07f6f7d0
AL
4788static void ata_pio_sectors(struct ata_queued_cmd *qc)
4789{
4790 if (is_multi_taskfile(&qc->tf)) {
4791 /* READ/WRITE MULTIPLE */
4792 unsigned int nsect;
4793
587005de 4794 WARN_ON(qc->dev->multi_count == 0);
1da177e4 4795
5a5dbd18 4796 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
726f0785 4797 qc->dev->multi_count);
07f6f7d0
AL
4798 while (nsect--)
4799 ata_pio_sector(qc);
4800 } else
4801 ata_pio_sector(qc);
4cc980b3
AL
4802
4803 ata_altstatus(qc->ap); /* flush */
07f6f7d0
AL
4804}
4805
c71c1857
AL
4806/**
4807 * atapi_send_cdb - Write CDB bytes to hardware
4808 * @ap: Port to which ATAPI device is attached.
4809 * @qc: Taskfile currently active
4810 *
4811 * When device has indicated its readiness to accept
4812 * a CDB, this function is called. Send the CDB.
4813 *
4814 * LOCKING:
4815 * caller.
4816 */
4817
4818static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
4819{
4820 /* send SCSI cdb */
4821 DPRINTK("send cdb\n");
db024d53 4822 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 4823
a6b2c5d4 4824 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
4825 ata_altstatus(ap); /* flush */
4826
4827 switch (qc->tf.protocol) {
4828 case ATA_PROT_ATAPI:
4829 ap->hsm_task_state = HSM_ST;
4830 break;
4831 case ATA_PROT_ATAPI_NODATA:
4832 ap->hsm_task_state = HSM_ST_LAST;
4833 break;
4834 case ATA_PROT_ATAPI_DMA:
4835 ap->hsm_task_state = HSM_ST_LAST;
4836 /* initiate bmdma */
4837 ap->ops->bmdma_start(qc);
4838 break;
4839 }
1da177e4
LT
4840}
4841
6ae4cfb5
AL
4842/**
4843 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
4844 * @qc: Command on going
4845 * @bytes: number of bytes
4846 *
4847 * Transfer Transfer data from/to the ATAPI device.
4848 *
4849 * LOCKING:
4850 * Inherited from caller.
4851 *
4852 */
4853
1da177e4
LT
4854static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
4855{
4856 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
cedc9a47 4857 struct scatterlist *sg = qc->__sg;
1da177e4
LT
4858 struct ata_port *ap = qc->ap;
4859 struct page *page;
4860 unsigned char *buf;
4861 unsigned int offset, count;
4862
563a6e1f 4863 if (qc->curbytes + bytes >= qc->nbytes)
14be71f4 4864 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
4865
4866next_sg:
563a6e1f 4867 if (unlikely(qc->cursg >= qc->n_elem)) {
7fb6ec28 4868 /*
563a6e1f
AL
4869 * The end of qc->sg is reached and the device expects
4870 * more data to transfer. In order not to overrun qc->sg
4871 * and fulfill length specified in the byte count register,
4872 * - for read case, discard trailing data from the device
4873 * - for write case, padding zero data to the device
4874 */
4875 u16 pad_buf[1] = { 0 };
4876 unsigned int words = bytes >> 1;
4877 unsigned int i;
4878
4879 if (words) /* warning if bytes > 1 */
f15a1daf
TH
4880 ata_dev_printk(qc->dev, KERN_WARNING,
4881 "%u bytes trailing data\n", bytes);
563a6e1f
AL
4882
4883 for (i = 0; i < words; i++)
a6b2c5d4 4884 ap->ops->data_xfer(qc->dev, (unsigned char*)pad_buf, 2, do_write);
563a6e1f 4885
14be71f4 4886 ap->hsm_task_state = HSM_ST_LAST;
563a6e1f
AL
4887 return;
4888 }
4889
cedc9a47 4890 sg = &qc->__sg[qc->cursg];
1da177e4 4891
1da177e4
LT
4892 page = sg->page;
4893 offset = sg->offset + qc->cursg_ofs;
4894
4895 /* get the current page and offset */
4896 page = nth_page(page, (offset >> PAGE_SHIFT));
4897 offset %= PAGE_SIZE;
4898
6952df03 4899 /* don't overrun current sg */
32529e01 4900 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
4901
4902 /* don't cross page boundaries */
4903 count = min(count, (unsigned int)PAGE_SIZE - offset);
4904
7282aa4b
AL
4905 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4906
91b8b313
AL
4907 if (PageHighMem(page)) {
4908 unsigned long flags;
4909
a6b2c5d4 4910 /* FIXME: use bounce buffer */
91b8b313
AL
4911 local_irq_save(flags);
4912 buf = kmap_atomic(page, KM_IRQ0);
083958d3 4913
91b8b313 4914 /* do the actual data transfer */
a6b2c5d4 4915 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
7282aa4b 4916
91b8b313
AL
4917 kunmap_atomic(buf, KM_IRQ0);
4918 local_irq_restore(flags);
4919 } else {
4920 buf = page_address(page);
a6b2c5d4 4921 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
91b8b313 4922 }
1da177e4
LT
4923
4924 bytes -= count;
4925 qc->curbytes += count;
4926 qc->cursg_ofs += count;
4927
32529e01 4928 if (qc->cursg_ofs == sg->length) {
1da177e4
LT
4929 qc->cursg++;
4930 qc->cursg_ofs = 0;
4931 }
4932
563a6e1f 4933 if (bytes)
1da177e4 4934 goto next_sg;
1da177e4
LT
4935}
4936
6ae4cfb5
AL
4937/**
4938 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
4939 * @qc: Command on going
4940 *
4941 * Transfer Transfer data from/to the ATAPI device.
4942 *
4943 * LOCKING:
4944 * Inherited from caller.
6ae4cfb5
AL
4945 */
4946
1da177e4
LT
4947static void atapi_pio_bytes(struct ata_queued_cmd *qc)
4948{
4949 struct ata_port *ap = qc->ap;
4950 struct ata_device *dev = qc->dev;
4951 unsigned int ireason, bc_lo, bc_hi, bytes;
4952 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
4953
eec4c3f3
AL
4954 /* Abuse qc->result_tf for temp storage of intermediate TF
4955 * here to save some kernel stack usage.
4956 * For normal completion, qc->result_tf is not relevant. For
4957 * error, qc->result_tf is later overwritten by ata_qc_complete().
4958 * So, the correctness of qc->result_tf is not affected.
4959 */
4960 ap->ops->tf_read(ap, &qc->result_tf);
4961 ireason = qc->result_tf.nsect;
4962 bc_lo = qc->result_tf.lbam;
4963 bc_hi = qc->result_tf.lbah;
1da177e4
LT
4964 bytes = (bc_hi << 8) | bc_lo;
4965
4966 /* shall be cleared to zero, indicating xfer of data */
4967 if (ireason & (1 << 0))
4968 goto err_out;
4969
4970 /* make sure transfer direction matches expected */
4971 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
4972 if (do_write != i_write)
4973 goto err_out;
4974
44877b4e 4975 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
312f7da2 4976
1da177e4 4977 __atapi_pio_bytes(qc, bytes);
4cc980b3 4978 ata_altstatus(ap); /* flush */
1da177e4
LT
4979
4980 return;
4981
4982err_out:
f15a1daf 4983 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
11a56d24 4984 qc->err_mask |= AC_ERR_HSM;
14be71f4 4985 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
4986}
4987
4988/**
c234fb00
AL
4989 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
4990 * @ap: the target ata_port
4991 * @qc: qc on going
1da177e4 4992 *
c234fb00
AL
4993 * RETURNS:
4994 * 1 if ok in workqueue, 0 otherwise.
1da177e4 4995 */
c234fb00
AL
4996
4997static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 4998{
c234fb00
AL
4999 if (qc->tf.flags & ATA_TFLAG_POLLING)
5000 return 1;
1da177e4 5001
c234fb00
AL
5002 if (ap->hsm_task_state == HSM_ST_FIRST) {
5003 if (qc->tf.protocol == ATA_PROT_PIO &&
5004 (qc->tf.flags & ATA_TFLAG_WRITE))
5005 return 1;
1da177e4 5006
c234fb00
AL
5007 if (is_atapi_taskfile(&qc->tf) &&
5008 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5009 return 1;
fe79e683
AL
5010 }
5011
c234fb00
AL
5012 return 0;
5013}
1da177e4 5014
c17ea20d
TH
5015/**
5016 * ata_hsm_qc_complete - finish a qc running on standard HSM
5017 * @qc: Command to complete
5018 * @in_wq: 1 if called from workqueue, 0 otherwise
5019 *
5020 * Finish @qc which is running on standard HSM.
5021 *
5022 * LOCKING:
cca3974e 5023 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
5024 * Otherwise, none on entry and grabs host lock.
5025 */
5026static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
5027{
5028 struct ata_port *ap = qc->ap;
5029 unsigned long flags;
5030
5031 if (ap->ops->error_handler) {
5032 if (in_wq) {
ba6a1308 5033 spin_lock_irqsave(ap->lock, flags);
c17ea20d 5034
cca3974e
JG
5035 /* EH might have kicked in while host lock is
5036 * released.
c17ea20d
TH
5037 */
5038 qc = ata_qc_from_tag(ap, qc->tag);
5039 if (qc) {
5040 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
83625006 5041 ap->ops->irq_on(ap);
c17ea20d
TH
5042 ata_qc_complete(qc);
5043 } else
5044 ata_port_freeze(ap);
5045 }
5046
ba6a1308 5047 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
5048 } else {
5049 if (likely(!(qc->err_mask & AC_ERR_HSM)))
5050 ata_qc_complete(qc);
5051 else
5052 ata_port_freeze(ap);
5053 }
5054 } else {
5055 if (in_wq) {
ba6a1308 5056 spin_lock_irqsave(ap->lock, flags);
83625006 5057 ap->ops->irq_on(ap);
c17ea20d 5058 ata_qc_complete(qc);
ba6a1308 5059 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
5060 } else
5061 ata_qc_complete(qc);
5062 }
5063}
5064
bb5cb290
AL
5065/**
5066 * ata_hsm_move - move the HSM to the next state.
5067 * @ap: the target ata_port
5068 * @qc: qc on going
5069 * @status: current device status
5070 * @in_wq: 1 if called from workqueue, 0 otherwise
5071 *
5072 * RETURNS:
5073 * 1 when poll next status needed, 0 otherwise.
5074 */
9a1004d0
TH
5075int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
5076 u8 status, int in_wq)
e2cec771 5077{
bb5cb290
AL
5078 unsigned long flags = 0;
5079 int poll_next;
5080
6912ccd5
AL
5081 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
5082
bb5cb290
AL
5083 /* Make sure ata_qc_issue_prot() does not throw things
5084 * like DMA polling into the workqueue. Notice that
5085 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
5086 */
c234fb00 5087 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 5088
e2cec771 5089fsm_start:
999bb6f4 5090 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
44877b4e 5091 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
999bb6f4 5092
e2cec771
AL
5093 switch (ap->hsm_task_state) {
5094 case HSM_ST_FIRST:
bb5cb290
AL
5095 /* Send first data block or PACKET CDB */
5096
5097 /* If polling, we will stay in the work queue after
5098 * sending the data. Otherwise, interrupt handler
5099 * takes over after sending the data.
5100 */
5101 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
5102
e2cec771 5103 /* check device status */
3655d1d3
AL
5104 if (unlikely((status & ATA_DRQ) == 0)) {
5105 /* handle BSY=0, DRQ=0 as error */
5106 if (likely(status & (ATA_ERR | ATA_DF)))
5107 /* device stops HSM for abort/error */
5108 qc->err_mask |= AC_ERR_DEV;
5109 else
5110 /* HSM violation. Let EH handle this */
5111 qc->err_mask |= AC_ERR_HSM;
5112
14be71f4 5113 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 5114 goto fsm_start;
1da177e4
LT
5115 }
5116
71601958
AL
5117 /* Device should not ask for data transfer (DRQ=1)
5118 * when it finds something wrong.
eee6c32f
AL
5119 * We ignore DRQ here and stop the HSM by
5120 * changing hsm_task_state to HSM_ST_ERR and
5121 * let the EH abort the command or reset the device.
71601958
AL
5122 */
5123 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
5124 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with device "
5125 "error, dev_stat 0x%X\n", status);
3655d1d3 5126 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
5127 ap->hsm_task_state = HSM_ST_ERR;
5128 goto fsm_start;
71601958 5129 }
1da177e4 5130
bb5cb290
AL
5131 /* Send the CDB (atapi) or the first data block (ata pio out).
5132 * During the state transition, interrupt handler shouldn't
5133 * be invoked before the data transfer is complete and
5134 * hsm_task_state is changed. Hence, the following locking.
5135 */
5136 if (in_wq)
ba6a1308 5137 spin_lock_irqsave(ap->lock, flags);
1da177e4 5138
bb5cb290
AL
5139 if (qc->tf.protocol == ATA_PROT_PIO) {
5140 /* PIO data out protocol.
5141 * send first data block.
5142 */
0565c26d 5143
bb5cb290
AL
5144 /* ata_pio_sectors() might change the state
5145 * to HSM_ST_LAST. so, the state is changed here
5146 * before ata_pio_sectors().
5147 */
5148 ap->hsm_task_state = HSM_ST;
5149 ata_pio_sectors(qc);
bb5cb290
AL
5150 } else
5151 /* send CDB */
5152 atapi_send_cdb(ap, qc);
5153
5154 if (in_wq)
ba6a1308 5155 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
5156
5157 /* if polling, ata_pio_task() handles the rest.
5158 * otherwise, interrupt handler takes over from here.
5159 */
e2cec771 5160 break;
1c848984 5161
e2cec771
AL
5162 case HSM_ST:
5163 /* complete command or read/write the data register */
5164 if (qc->tf.protocol == ATA_PROT_ATAPI) {
5165 /* ATAPI PIO protocol */
5166 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
5167 /* No more data to transfer or device error.
5168 * Device error will be tagged in HSM_ST_LAST.
5169 */
e2cec771
AL
5170 ap->hsm_task_state = HSM_ST_LAST;
5171 goto fsm_start;
5172 }
1da177e4 5173
71601958
AL
5174 /* Device should not ask for data transfer (DRQ=1)
5175 * when it finds something wrong.
eee6c32f
AL
5176 * We ignore DRQ here and stop the HSM by
5177 * changing hsm_task_state to HSM_ST_ERR and
5178 * let the EH abort the command or reset the device.
71601958
AL
5179 */
5180 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
5181 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
5182 "device error, dev_stat 0x%X\n",
5183 status);
3655d1d3 5184 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
5185 ap->hsm_task_state = HSM_ST_ERR;
5186 goto fsm_start;
71601958 5187 }
1da177e4 5188
e2cec771 5189 atapi_pio_bytes(qc);
7fb6ec28 5190
e2cec771
AL
5191 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
5192 /* bad ireason reported by device */
5193 goto fsm_start;
1da177e4 5194
e2cec771
AL
5195 } else {
5196 /* ATA PIO protocol */
5197 if (unlikely((status & ATA_DRQ) == 0)) {
5198 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
5199 if (likely(status & (ATA_ERR | ATA_DF)))
5200 /* device stops HSM for abort/error */
5201 qc->err_mask |= AC_ERR_DEV;
5202 else
55a8e2c8
TH
5203 /* HSM violation. Let EH handle this.
5204 * Phantom devices also trigger this
5205 * condition. Mark hint.
5206 */
5207 qc->err_mask |= AC_ERR_HSM |
5208 AC_ERR_NODEV_HINT;
3655d1d3 5209
e2cec771
AL
5210 ap->hsm_task_state = HSM_ST_ERR;
5211 goto fsm_start;
5212 }
1da177e4 5213
eee6c32f
AL
5214 /* For PIO reads, some devices may ask for
5215 * data transfer (DRQ=1) alone with ERR=1.
5216 * We respect DRQ here and transfer one
5217 * block of junk data before changing the
5218 * hsm_task_state to HSM_ST_ERR.
5219 *
5220 * For PIO writes, ERR=1 DRQ=1 doesn't make
5221 * sense since the data block has been
5222 * transferred to the device.
71601958
AL
5223 */
5224 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
5225 /* data might be corrputed */
5226 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
5227
5228 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5229 ata_pio_sectors(qc);
eee6c32f
AL
5230 status = ata_wait_idle(ap);
5231 }
5232
3655d1d3
AL
5233 if (status & (ATA_BUSY | ATA_DRQ))
5234 qc->err_mask |= AC_ERR_HSM;
5235
eee6c32f
AL
5236 /* ata_pio_sectors() might change the
5237 * state to HSM_ST_LAST. so, the state
5238 * is changed after ata_pio_sectors().
5239 */
5240 ap->hsm_task_state = HSM_ST_ERR;
5241 goto fsm_start;
71601958
AL
5242 }
5243
e2cec771
AL
5244 ata_pio_sectors(qc);
5245
5246 if (ap->hsm_task_state == HSM_ST_LAST &&
5247 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5248 /* all data read */
52a32205 5249 status = ata_wait_idle(ap);
e2cec771
AL
5250 goto fsm_start;
5251 }
5252 }
5253
bb5cb290 5254 poll_next = 1;
1da177e4
LT
5255 break;
5256
14be71f4 5257 case HSM_ST_LAST:
6912ccd5
AL
5258 if (unlikely(!ata_ok(status))) {
5259 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
5260 ap->hsm_task_state = HSM_ST_ERR;
5261 goto fsm_start;
5262 }
5263
5264 /* no more data to transfer */
4332a771 5265 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
44877b4e 5266 ap->print_id, qc->dev->devno, status);
e2cec771 5267
6912ccd5
AL
5268 WARN_ON(qc->err_mask);
5269
e2cec771 5270 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 5271
e2cec771 5272 /* complete taskfile transaction */
c17ea20d 5273 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5274
5275 poll_next = 0;
1da177e4
LT
5276 break;
5277
14be71f4 5278 case HSM_ST_ERR:
e2cec771
AL
5279 /* make sure qc->err_mask is available to
5280 * know what's wrong and recover
5281 */
5282 WARN_ON(qc->err_mask == 0);
5283
5284 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 5285
999bb6f4 5286 /* complete taskfile transaction */
c17ea20d 5287 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5288
5289 poll_next = 0;
e2cec771
AL
5290 break;
5291 default:
bb5cb290 5292 poll_next = 0;
6912ccd5 5293 BUG();
1da177e4
LT
5294 }
5295
bb5cb290 5296 return poll_next;
1da177e4
LT
5297}
5298
65f27f38 5299static void ata_pio_task(struct work_struct *work)
8061f5f0 5300{
65f27f38
DH
5301 struct ata_port *ap =
5302 container_of(work, struct ata_port, port_task.work);
5303 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 5304 u8 status;
a1af3734 5305 int poll_next;
8061f5f0 5306
7fb6ec28 5307fsm_start:
a1af3734 5308 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 5309
a1af3734
AL
5310 /*
5311 * This is purely heuristic. This is a fast path.
5312 * Sometimes when we enter, BSY will be cleared in
5313 * a chk-status or two. If not, the drive is probably seeking
5314 * or something. Snooze for a couple msecs, then
5315 * chk-status again. If still busy, queue delayed work.
5316 */
5317 status = ata_busy_wait(ap, ATA_BUSY, 5);
5318 if (status & ATA_BUSY) {
5319 msleep(2);
5320 status = ata_busy_wait(ap, ATA_BUSY, 10);
5321 if (status & ATA_BUSY) {
31ce6dae 5322 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
a1af3734
AL
5323 return;
5324 }
8061f5f0
TH
5325 }
5326
a1af3734
AL
5327 /* move the HSM */
5328 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 5329
a1af3734
AL
5330 /* another command or interrupt handler
5331 * may be running at this point.
5332 */
5333 if (poll_next)
7fb6ec28 5334 goto fsm_start;
8061f5f0
TH
5335}
5336
1da177e4
LT
5337/**
5338 * ata_qc_new - Request an available ATA command, for queueing
5339 * @ap: Port associated with device @dev
5340 * @dev: Device from whom we request an available command structure
5341 *
5342 * LOCKING:
0cba632b 5343 * None.
1da177e4
LT
5344 */
5345
5346static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5347{
5348 struct ata_queued_cmd *qc = NULL;
5349 unsigned int i;
5350
e3180499 5351 /* no command while frozen */
b51e9e5d 5352 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
5353 return NULL;
5354
2ab7db1f
TH
5355 /* the last tag is reserved for internal command. */
5356 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 5357 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 5358 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
5359 break;
5360 }
5361
5362 if (qc)
5363 qc->tag = i;
5364
5365 return qc;
5366}
5367
5368/**
5369 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
5370 * @dev: Device from whom we request an available command structure
5371 *
5372 * LOCKING:
0cba632b 5373 * None.
1da177e4
LT
5374 */
5375
3373efd8 5376struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 5377{
9af5c9c9 5378 struct ata_port *ap = dev->link->ap;
1da177e4
LT
5379 struct ata_queued_cmd *qc;
5380
5381 qc = ata_qc_new(ap);
5382 if (qc) {
1da177e4
LT
5383 qc->scsicmd = NULL;
5384 qc->ap = ap;
5385 qc->dev = dev;
1da177e4 5386
2c13b7ce 5387 ata_qc_reinit(qc);
1da177e4
LT
5388 }
5389
5390 return qc;
5391}
5392
1da177e4
LT
5393/**
5394 * ata_qc_free - free unused ata_queued_cmd
5395 * @qc: Command to complete
5396 *
5397 * Designed to free unused ata_queued_cmd object
5398 * in case something prevents using it.
5399 *
5400 * LOCKING:
cca3974e 5401 * spin_lock_irqsave(host lock)
1da177e4
LT
5402 */
5403void ata_qc_free(struct ata_queued_cmd *qc)
5404{
4ba946e9
TH
5405 struct ata_port *ap = qc->ap;
5406 unsigned int tag;
5407
a4631474 5408 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 5409
4ba946e9
TH
5410 qc->flags = 0;
5411 tag = qc->tag;
5412 if (likely(ata_tag_valid(tag))) {
4ba946e9 5413 qc->tag = ATA_TAG_POISON;
6cec4a39 5414 clear_bit(tag, &ap->qc_allocated);
4ba946e9 5415 }
1da177e4
LT
5416}
5417
76014427 5418void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 5419{
dedaf2b0 5420 struct ata_port *ap = qc->ap;
9af5c9c9 5421 struct ata_link *link = qc->dev->link;
dedaf2b0 5422
a4631474
TH
5423 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5424 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
5425
5426 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5427 ata_sg_clean(qc);
5428
7401abf2 5429 /* command should be marked inactive atomically with qc completion */
da917d69 5430 if (qc->tf.protocol == ATA_PROT_NCQ) {
9af5c9c9 5431 link->sactive &= ~(1 << qc->tag);
da917d69
TH
5432 if (!link->sactive)
5433 ap->nr_active_links--;
5434 } else {
9af5c9c9 5435 link->active_tag = ATA_TAG_POISON;
da917d69
TH
5436 ap->nr_active_links--;
5437 }
5438
5439 /* clear exclusive status */
5440 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5441 ap->excl_link == link))
5442 ap->excl_link = NULL;
7401abf2 5443
3f3791d3
AL
5444 /* atapi: mark qc as inactive to prevent the interrupt handler
5445 * from completing the command twice later, before the error handler
5446 * is called. (when rc != 0 and atapi request sense is needed)
5447 */
5448 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 5449 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 5450
1da177e4 5451 /* call completion callback */
77853bf2 5452 qc->complete_fn(qc);
1da177e4
LT
5453}
5454
39599a53
TH
5455static void fill_result_tf(struct ata_queued_cmd *qc)
5456{
5457 struct ata_port *ap = qc->ap;
5458
39599a53 5459 qc->result_tf.flags = qc->tf.flags;
4742d54f 5460 ap->ops->tf_read(ap, &qc->result_tf);
39599a53
TH
5461}
5462
f686bcb8
TH
5463/**
5464 * ata_qc_complete - Complete an active ATA command
5465 * @qc: Command to complete
5466 * @err_mask: ATA Status register contents
5467 *
5468 * Indicate to the mid and upper layers that an ATA
5469 * command has completed, with either an ok or not-ok status.
5470 *
5471 * LOCKING:
cca3974e 5472 * spin_lock_irqsave(host lock)
f686bcb8
TH
5473 */
5474void ata_qc_complete(struct ata_queued_cmd *qc)
5475{
5476 struct ata_port *ap = qc->ap;
5477
5478 /* XXX: New EH and old EH use different mechanisms to
5479 * synchronize EH with regular execution path.
5480 *
5481 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5482 * Normal execution path is responsible for not accessing a
5483 * failed qc. libata core enforces the rule by returning NULL
5484 * from ata_qc_from_tag() for failed qcs.
5485 *
5486 * Old EH depends on ata_qc_complete() nullifying completion
5487 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5488 * not synchronize with interrupt handler. Only PIO task is
5489 * taken care of.
5490 */
5491 if (ap->ops->error_handler) {
b51e9e5d 5492 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
5493
5494 if (unlikely(qc->err_mask))
5495 qc->flags |= ATA_QCFLAG_FAILED;
5496
5497 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5498 if (!ata_tag_internal(qc->tag)) {
5499 /* always fill result TF for failed qc */
39599a53 5500 fill_result_tf(qc);
f686bcb8
TH
5501 ata_qc_schedule_eh(qc);
5502 return;
5503 }
5504 }
5505
5506 /* read result TF if requested */
5507 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5508 fill_result_tf(qc);
f686bcb8
TH
5509
5510 __ata_qc_complete(qc);
5511 } else {
5512 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5513 return;
5514
5515 /* read result TF if failed or requested */
5516 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5517 fill_result_tf(qc);
f686bcb8
TH
5518
5519 __ata_qc_complete(qc);
5520 }
5521}
5522
dedaf2b0
TH
5523/**
5524 * ata_qc_complete_multiple - Complete multiple qcs successfully
5525 * @ap: port in question
5526 * @qc_active: new qc_active mask
5527 * @finish_qc: LLDD callback invoked before completing a qc
5528 *
5529 * Complete in-flight commands. This functions is meant to be
5530 * called from low-level driver's interrupt routine to complete
5531 * requests normally. ap->qc_active and @qc_active is compared
5532 * and commands are completed accordingly.
5533 *
5534 * LOCKING:
cca3974e 5535 * spin_lock_irqsave(host lock)
dedaf2b0
TH
5536 *
5537 * RETURNS:
5538 * Number of completed commands on success, -errno otherwise.
5539 */
5540int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5541 void (*finish_qc)(struct ata_queued_cmd *))
5542{
5543 int nr_done = 0;
5544 u32 done_mask;
5545 int i;
5546
5547 done_mask = ap->qc_active ^ qc_active;
5548
5549 if (unlikely(done_mask & qc_active)) {
5550 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5551 "(%08x->%08x)\n", ap->qc_active, qc_active);
5552 return -EINVAL;
5553 }
5554
5555 for (i = 0; i < ATA_MAX_QUEUE; i++) {
5556 struct ata_queued_cmd *qc;
5557
5558 if (!(done_mask & (1 << i)))
5559 continue;
5560
5561 if ((qc = ata_qc_from_tag(ap, i))) {
5562 if (finish_qc)
5563 finish_qc(qc);
5564 ata_qc_complete(qc);
5565 nr_done++;
5566 }
5567 }
5568
5569 return nr_done;
5570}
5571
1da177e4
LT
5572static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
5573{
5574 struct ata_port *ap = qc->ap;
5575
5576 switch (qc->tf.protocol) {
3dc1d881 5577 case ATA_PROT_NCQ:
1da177e4
LT
5578 case ATA_PROT_DMA:
5579 case ATA_PROT_ATAPI_DMA:
5580 return 1;
5581
5582 case ATA_PROT_ATAPI:
5583 case ATA_PROT_PIO:
1da177e4
LT
5584 if (ap->flags & ATA_FLAG_PIO_DMA)
5585 return 1;
5586
5587 /* fall through */
5588
5589 default:
5590 return 0;
5591 }
5592
5593 /* never reached */
5594}
5595
5596/**
5597 * ata_qc_issue - issue taskfile to device
5598 * @qc: command to issue to device
5599 *
5600 * Prepare an ATA command to submission to device.
5601 * This includes mapping the data into a DMA-able
5602 * area, filling in the S/G table, and finally
5603 * writing the taskfile to hardware, starting the command.
5604 *
5605 * LOCKING:
cca3974e 5606 * spin_lock_irqsave(host lock)
1da177e4 5607 */
8e0e694a 5608void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
5609{
5610 struct ata_port *ap = qc->ap;
9af5c9c9 5611 struct ata_link *link = qc->dev->link;
1da177e4 5612
dedaf2b0
TH
5613 /* Make sure only one non-NCQ command is outstanding. The
5614 * check is skipped for old EH because it reuses active qc to
5615 * request ATAPI sense.
5616 */
9af5c9c9 5617 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
dedaf2b0
TH
5618
5619 if (qc->tf.protocol == ATA_PROT_NCQ) {
9af5c9c9 5620 WARN_ON(link->sactive & (1 << qc->tag));
da917d69
TH
5621
5622 if (!link->sactive)
5623 ap->nr_active_links++;
9af5c9c9 5624 link->sactive |= 1 << qc->tag;
dedaf2b0 5625 } else {
9af5c9c9 5626 WARN_ON(link->sactive);
da917d69
TH
5627
5628 ap->nr_active_links++;
9af5c9c9 5629 link->active_tag = qc->tag;
dedaf2b0
TH
5630 }
5631
e4a70e76 5632 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 5633 ap->qc_active |= 1 << qc->tag;
e4a70e76 5634
1da177e4
LT
5635 if (ata_should_dma_map(qc)) {
5636 if (qc->flags & ATA_QCFLAG_SG) {
5637 if (ata_sg_setup(qc))
8e436af9 5638 goto sg_err;
1da177e4
LT
5639 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
5640 if (ata_sg_setup_one(qc))
8e436af9 5641 goto sg_err;
1da177e4
LT
5642 }
5643 } else {
5644 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5645 }
5646
5647 ap->ops->qc_prep(qc);
5648
8e0e694a
TH
5649 qc->err_mask |= ap->ops->qc_issue(qc);
5650 if (unlikely(qc->err_mask))
5651 goto err;
5652 return;
1da177e4 5653
8e436af9
TH
5654sg_err:
5655 qc->flags &= ~ATA_QCFLAG_DMAMAP;
8e0e694a
TH
5656 qc->err_mask |= AC_ERR_SYSTEM;
5657err:
5658 ata_qc_complete(qc);
1da177e4
LT
5659}
5660
5661/**
5662 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
5663 * @qc: command to issue to device
5664 *
5665 * Using various libata functions and hooks, this function
5666 * starts an ATA command. ATA commands are grouped into
5667 * classes called "protocols", and issuing each type of protocol
5668 * is slightly different.
5669 *
0baab86b
EF
5670 * May be used as the qc_issue() entry in ata_port_operations.
5671 *
1da177e4 5672 * LOCKING:
cca3974e 5673 * spin_lock_irqsave(host lock)
1da177e4
LT
5674 *
5675 * RETURNS:
9a3d9eb0 5676 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
5677 */
5678
9a3d9eb0 5679unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
5680{
5681 struct ata_port *ap = qc->ap;
5682
e50362ec
AL
5683 /* Use polling pio if the LLD doesn't handle
5684 * interrupt driven pio and atapi CDB interrupt.
5685 */
5686 if (ap->flags & ATA_FLAG_PIO_POLLING) {
5687 switch (qc->tf.protocol) {
5688 case ATA_PROT_PIO:
e3472cbe 5689 case ATA_PROT_NODATA:
e50362ec
AL
5690 case ATA_PROT_ATAPI:
5691 case ATA_PROT_ATAPI_NODATA:
5692 qc->tf.flags |= ATA_TFLAG_POLLING;
5693 break;
5694 case ATA_PROT_ATAPI_DMA:
5695 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 5696 /* see ata_dma_blacklisted() */
e50362ec
AL
5697 BUG();
5698 break;
5699 default:
5700 break;
5701 }
5702 }
5703
312f7da2 5704 /* select the device */
1da177e4
LT
5705 ata_dev_select(ap, qc->dev->devno, 1, 0);
5706
312f7da2 5707 /* start the command */
1da177e4
LT
5708 switch (qc->tf.protocol) {
5709 case ATA_PROT_NODATA:
312f7da2
AL
5710 if (qc->tf.flags & ATA_TFLAG_POLLING)
5711 ata_qc_set_polling(qc);
5712
e5338254 5713 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
5714 ap->hsm_task_state = HSM_ST_LAST;
5715
5716 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5717 ata_port_queue_task(ap, ata_pio_task, qc, 0);
312f7da2 5718
1da177e4
LT
5719 break;
5720
5721 case ATA_PROT_DMA:
587005de 5722 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5723
1da177e4
LT
5724 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5725 ap->ops->bmdma_setup(qc); /* set up bmdma */
5726 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 5727 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
5728 break;
5729
312f7da2
AL
5730 case ATA_PROT_PIO:
5731 if (qc->tf.flags & ATA_TFLAG_POLLING)
5732 ata_qc_set_polling(qc);
1da177e4 5733
e5338254 5734 ata_tf_to_host(ap, &qc->tf);
312f7da2 5735
54f00389
AL
5736 if (qc->tf.flags & ATA_TFLAG_WRITE) {
5737 /* PIO data out protocol */
5738 ap->hsm_task_state = HSM_ST_FIRST;
31ce6dae 5739 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5740
5741 /* always send first data block using
e27486db 5742 * the ata_pio_task() codepath.
54f00389 5743 */
312f7da2 5744 } else {
54f00389
AL
5745 /* PIO data in protocol */
5746 ap->hsm_task_state = HSM_ST;
5747
5748 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 5749 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
5750
5751 /* if polling, ata_pio_task() handles the rest.
5752 * otherwise, interrupt handler takes over from here.
5753 */
312f7da2
AL
5754 }
5755
1da177e4
LT
5756 break;
5757
1da177e4 5758 case ATA_PROT_ATAPI:
1da177e4 5759 case ATA_PROT_ATAPI_NODATA:
312f7da2
AL
5760 if (qc->tf.flags & ATA_TFLAG_POLLING)
5761 ata_qc_set_polling(qc);
5762
e5338254 5763 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 5764
312f7da2
AL
5765 ap->hsm_task_state = HSM_ST_FIRST;
5766
5767 /* send cdb by polling if no cdb interrupt */
5768 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
5769 (qc->tf.flags & ATA_TFLAG_POLLING))
31ce6dae 5770 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5771 break;
5772
5773 case ATA_PROT_ATAPI_DMA:
587005de 5774 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 5775
1da177e4
LT
5776 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
5777 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
5778 ap->hsm_task_state = HSM_ST_FIRST;
5779
5780 /* send cdb by polling if no cdb interrupt */
5781 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
31ce6dae 5782 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
5783 break;
5784
5785 default:
5786 WARN_ON(1);
9a3d9eb0 5787 return AC_ERR_SYSTEM;
1da177e4
LT
5788 }
5789
5790 return 0;
5791}
5792
1da177e4
LT
5793/**
5794 * ata_host_intr - Handle host interrupt for given (port, task)
5795 * @ap: Port on which interrupt arrived (possibly...)
5796 * @qc: Taskfile currently active in engine
5797 *
5798 * Handle host interrupt for given queued command. Currently,
5799 * only DMA interrupts are handled. All other commands are
5800 * handled via polling with interrupts disabled (nIEN bit).
5801 *
5802 * LOCKING:
cca3974e 5803 * spin_lock_irqsave(host lock)
1da177e4
LT
5804 *
5805 * RETURNS:
5806 * One if interrupt was handled, zero if not (shared irq).
5807 */
5808
5809inline unsigned int ata_host_intr (struct ata_port *ap,
5810 struct ata_queued_cmd *qc)
5811{
9af5c9c9 5812 struct ata_eh_info *ehi = &ap->link.eh_info;
312f7da2 5813 u8 status, host_stat = 0;
1da177e4 5814
312f7da2 5815 VPRINTK("ata%u: protocol %d task_state %d\n",
44877b4e 5816 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 5817
312f7da2
AL
5818 /* Check whether we are expecting interrupt in this state */
5819 switch (ap->hsm_task_state) {
5820 case HSM_ST_FIRST:
6912ccd5
AL
5821 /* Some pre-ATAPI-4 devices assert INTRQ
5822 * at this state when ready to receive CDB.
5823 */
1da177e4 5824
312f7da2
AL
5825 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
5826 * The flag was turned on only for atapi devices.
5827 * No need to check is_atapi_taskfile(&qc->tf) again.
5828 */
5829 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 5830 goto idle_irq;
1da177e4 5831 break;
312f7da2
AL
5832 case HSM_ST_LAST:
5833 if (qc->tf.protocol == ATA_PROT_DMA ||
5834 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
5835 /* check status of DMA engine */
5836 host_stat = ap->ops->bmdma_status(ap);
44877b4e
TH
5837 VPRINTK("ata%u: host_stat 0x%X\n",
5838 ap->print_id, host_stat);
312f7da2
AL
5839
5840 /* if it's not our irq... */
5841 if (!(host_stat & ATA_DMA_INTR))
5842 goto idle_irq;
5843
5844 /* before we do anything else, clear DMA-Start bit */
5845 ap->ops->bmdma_stop(qc);
a4f16610
AL
5846
5847 if (unlikely(host_stat & ATA_DMA_ERR)) {
5848 /* error when transfering data to/from memory */
5849 qc->err_mask |= AC_ERR_HOST_BUS;
5850 ap->hsm_task_state = HSM_ST_ERR;
5851 }
312f7da2
AL
5852 }
5853 break;
5854 case HSM_ST:
5855 break;
1da177e4
LT
5856 default:
5857 goto idle_irq;
5858 }
5859
312f7da2
AL
5860 /* check altstatus */
5861 status = ata_altstatus(ap);
5862 if (status & ATA_BUSY)
5863 goto idle_irq;
1da177e4 5864
312f7da2
AL
5865 /* check main status, clearing INTRQ */
5866 status = ata_chk_status(ap);
5867 if (unlikely(status & ATA_BUSY))
5868 goto idle_irq;
1da177e4 5869
312f7da2
AL
5870 /* ack bmdma irq events */
5871 ap->ops->irq_clear(ap);
1da177e4 5872
bb5cb290 5873 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
5874
5875 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
5876 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
5877 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
5878
1da177e4
LT
5879 return 1; /* irq handled */
5880
5881idle_irq:
5882 ap->stats.idle_irq++;
5883
5884#ifdef ATA_IRQ_TRAP
5885 if ((ap->stats.idle_irq % 1000) == 0) {
6d32d30f
JG
5886 ata_chk_status(ap);
5887 ap->ops->irq_clear(ap);
f15a1daf 5888 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 5889 return 1;
1da177e4
LT
5890 }
5891#endif
5892 return 0; /* irq not handled */
5893}
5894
5895/**
5896 * ata_interrupt - Default ATA host interrupt handler
0cba632b 5897 * @irq: irq line (unused)
cca3974e 5898 * @dev_instance: pointer to our ata_host information structure
1da177e4 5899 *
0cba632b
JG
5900 * Default interrupt handler for PCI IDE devices. Calls
5901 * ata_host_intr() for each port that is not disabled.
5902 *
1da177e4 5903 * LOCKING:
cca3974e 5904 * Obtains host lock during operation.
1da177e4
LT
5905 *
5906 * RETURNS:
0cba632b 5907 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
5908 */
5909
7d12e780 5910irqreturn_t ata_interrupt (int irq, void *dev_instance)
1da177e4 5911{
cca3974e 5912 struct ata_host *host = dev_instance;
1da177e4
LT
5913 unsigned int i;
5914 unsigned int handled = 0;
5915 unsigned long flags;
5916
5917 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 5918 spin_lock_irqsave(&host->lock, flags);
1da177e4 5919
cca3974e 5920 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
5921 struct ata_port *ap;
5922
cca3974e 5923 ap = host->ports[i];
c1389503 5924 if (ap &&
029f5468 5925 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
5926 struct ata_queued_cmd *qc;
5927
9af5c9c9 5928 qc = ata_qc_from_tag(ap, ap->link.active_tag);
312f7da2 5929 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 5930 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
5931 handled |= ata_host_intr(ap, qc);
5932 }
5933 }
5934
cca3974e 5935 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
5936
5937 return IRQ_RETVAL(handled);
5938}
5939
34bf2170
TH
5940/**
5941 * sata_scr_valid - test whether SCRs are accessible
936fd732 5942 * @link: ATA link to test SCR accessibility for
34bf2170 5943 *
936fd732 5944 * Test whether SCRs are accessible for @link.
34bf2170
TH
5945 *
5946 * LOCKING:
5947 * None.
5948 *
5949 * RETURNS:
5950 * 1 if SCRs are accessible, 0 otherwise.
5951 */
936fd732 5952int sata_scr_valid(struct ata_link *link)
34bf2170 5953{
936fd732
TH
5954 struct ata_port *ap = link->ap;
5955
a16abc0b 5956 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
34bf2170
TH
5957}
5958
5959/**
5960 * sata_scr_read - read SCR register of the specified port
936fd732 5961 * @link: ATA link to read SCR for
34bf2170
TH
5962 * @reg: SCR to read
5963 * @val: Place to store read value
5964 *
936fd732 5965 * Read SCR register @reg of @link into *@val. This function is
34bf2170
TH
5966 * guaranteed to succeed if the cable type of the port is SATA
5967 * and the port implements ->scr_read.
5968 *
5969 * LOCKING:
5970 * None.
5971 *
5972 * RETURNS:
5973 * 0 on success, negative errno on failure.
5974 */
936fd732 5975int sata_scr_read(struct ata_link *link, int reg, u32 *val)
34bf2170 5976{
936fd732
TH
5977 struct ata_port *ap = link->ap;
5978
5979 if (sata_scr_valid(link))
da3dbb17 5980 return ap->ops->scr_read(ap, reg, val);
34bf2170
TH
5981 return -EOPNOTSUPP;
5982}
5983
5984/**
5985 * sata_scr_write - write SCR register of the specified port
936fd732 5986 * @link: ATA link to write SCR for
34bf2170
TH
5987 * @reg: SCR to write
5988 * @val: value to write
5989 *
936fd732 5990 * Write @val to SCR register @reg of @link. This function is
34bf2170
TH
5991 * guaranteed to succeed if the cable type of the port is SATA
5992 * and the port implements ->scr_read.
5993 *
5994 * LOCKING:
5995 * None.
5996 *
5997 * RETURNS:
5998 * 0 on success, negative errno on failure.
5999 */
936fd732 6000int sata_scr_write(struct ata_link *link, int reg, u32 val)
34bf2170 6001{
936fd732
TH
6002 struct ata_port *ap = link->ap;
6003
6004 if (sata_scr_valid(link))
da3dbb17 6005 return ap->ops->scr_write(ap, reg, val);
34bf2170
TH
6006 return -EOPNOTSUPP;
6007}
6008
6009/**
6010 * sata_scr_write_flush - write SCR register of the specified port and flush
936fd732 6011 * @link: ATA link to write SCR for
34bf2170
TH
6012 * @reg: SCR to write
6013 * @val: value to write
6014 *
6015 * This function is identical to sata_scr_write() except that this
6016 * function performs flush after writing to the register.
6017 *
6018 * LOCKING:
6019 * None.
6020 *
6021 * RETURNS:
6022 * 0 on success, negative errno on failure.
6023 */
936fd732 6024int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
34bf2170 6025{
936fd732 6026 struct ata_port *ap = link->ap;
da3dbb17
TH
6027 int rc;
6028
936fd732 6029 if (sata_scr_valid(link)) {
da3dbb17
TH
6030 rc = ap->ops->scr_write(ap, reg, val);
6031 if (rc == 0)
6032 rc = ap->ops->scr_read(ap, reg, &val);
6033 return rc;
34bf2170
TH
6034 }
6035 return -EOPNOTSUPP;
6036}
6037
6038/**
936fd732
TH
6039 * ata_link_online - test whether the given link is online
6040 * @link: ATA link to test
34bf2170 6041 *
936fd732
TH
6042 * Test whether @link is online. Note that this function returns
6043 * 0 if online status of @link cannot be obtained, so
6044 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
6045 *
6046 * LOCKING:
6047 * None.
6048 *
6049 * RETURNS:
6050 * 1 if the port online status is available and online.
6051 */
936fd732 6052int ata_link_online(struct ata_link *link)
34bf2170
TH
6053{
6054 u32 sstatus;
6055
936fd732
TH
6056 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6057 (sstatus & 0xf) == 0x3)
34bf2170
TH
6058 return 1;
6059 return 0;
6060}
6061
6062/**
936fd732
TH
6063 * ata_link_offline - test whether the given link is offline
6064 * @link: ATA link to test
34bf2170 6065 *
936fd732
TH
6066 * Test whether @link is offline. Note that this function
6067 * returns 0 if offline status of @link cannot be obtained, so
6068 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
6069 *
6070 * LOCKING:
6071 * None.
6072 *
6073 * RETURNS:
6074 * 1 if the port offline status is available and offline.
6075 */
936fd732 6076int ata_link_offline(struct ata_link *link)
34bf2170
TH
6077{
6078 u32 sstatus;
6079
936fd732
TH
6080 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6081 (sstatus & 0xf) != 0x3)
34bf2170
TH
6082 return 1;
6083 return 0;
6084}
0baab86b 6085
77b08fb5 6086int ata_flush_cache(struct ata_device *dev)
9b847548 6087{
977e6b9f 6088 unsigned int err_mask;
9b847548
JA
6089 u8 cmd;
6090
6091 if (!ata_try_flush_cache(dev))
6092 return 0;
6093
6fc49adb 6094 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
6095 cmd = ATA_CMD_FLUSH_EXT;
6096 else
6097 cmd = ATA_CMD_FLUSH;
6098
4f34337b
AC
6099 /* This is wrong. On a failed flush we get back the LBA of the lost
6100 sector and we should (assuming it wasn't aborted as unknown) issue
6101 a further flush command to continue the writeback until it
6102 does not error */
977e6b9f
TH
6103 err_mask = ata_do_simple_cmd(dev, cmd);
6104 if (err_mask) {
6105 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
6106 return -EIO;
6107 }
6108
6109 return 0;
9b847548
JA
6110}
6111
6ffa01d8 6112#ifdef CONFIG_PM
cca3974e
JG
6113static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
6114 unsigned int action, unsigned int ehi_flags,
6115 int wait)
500530f6
TH
6116{
6117 unsigned long flags;
6118 int i, rc;
6119
cca3974e
JG
6120 for (i = 0; i < host->n_ports; i++) {
6121 struct ata_port *ap = host->ports[i];
e3667ebf 6122 struct ata_link *link;
500530f6
TH
6123
6124 /* Previous resume operation might still be in
6125 * progress. Wait for PM_PENDING to clear.
6126 */
6127 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
6128 ata_port_wait_eh(ap);
6129 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6130 }
6131
6132 /* request PM ops to EH */
6133 spin_lock_irqsave(ap->lock, flags);
6134
6135 ap->pm_mesg = mesg;
6136 if (wait) {
6137 rc = 0;
6138 ap->pm_result = &rc;
6139 }
6140
6141 ap->pflags |= ATA_PFLAG_PM_PENDING;
e3667ebf
TH
6142 __ata_port_for_each_link(link, ap) {
6143 link->eh_info.action |= action;
6144 link->eh_info.flags |= ehi_flags;
6145 }
500530f6
TH
6146
6147 ata_port_schedule_eh(ap);
6148
6149 spin_unlock_irqrestore(ap->lock, flags);
6150
6151 /* wait and check result */
6152 if (wait) {
6153 ata_port_wait_eh(ap);
6154 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6155 if (rc)
6156 return rc;
6157 }
6158 }
6159
6160 return 0;
6161}
6162
6163/**
cca3974e
JG
6164 * ata_host_suspend - suspend host
6165 * @host: host to suspend
500530f6
TH
6166 * @mesg: PM message
6167 *
cca3974e 6168 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
6169 * function requests EH to perform PM operations and waits for EH
6170 * to finish.
6171 *
6172 * LOCKING:
6173 * Kernel thread context (may sleep).
6174 *
6175 * RETURNS:
6176 * 0 on success, -errno on failure.
6177 */
cca3974e 6178int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6 6179{
9666f400 6180 int rc;
500530f6 6181
cca3974e 6182 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
9666f400
TH
6183 if (rc == 0)
6184 host->dev->power.power_state = mesg;
500530f6
TH
6185 return rc;
6186}
6187
6188/**
cca3974e
JG
6189 * ata_host_resume - resume host
6190 * @host: host to resume
500530f6 6191 *
cca3974e 6192 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
6193 * function requests EH to perform PM operations and returns.
6194 * Note that all resume operations are performed parallely.
6195 *
6196 * LOCKING:
6197 * Kernel thread context (may sleep).
6198 */
cca3974e 6199void ata_host_resume(struct ata_host *host)
500530f6 6200{
cca3974e
JG
6201 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
6202 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
6203 host->dev->power.power_state = PMSG_ON;
500530f6 6204}
6ffa01d8 6205#endif
500530f6 6206
c893a3ae
RD
6207/**
6208 * ata_port_start - Set port up for dma.
6209 * @ap: Port to initialize
6210 *
6211 * Called just after data structures for each port are
6212 * initialized. Allocates space for PRD table.
6213 *
6214 * May be used as the port_start() entry in ata_port_operations.
6215 *
6216 * LOCKING:
6217 * Inherited from caller.
6218 */
f0d36efd 6219int ata_port_start(struct ata_port *ap)
1da177e4 6220{
2f1f610b 6221 struct device *dev = ap->dev;
6037d6bb 6222 int rc;
1da177e4 6223
f0d36efd
TH
6224 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6225 GFP_KERNEL);
1da177e4
LT
6226 if (!ap->prd)
6227 return -ENOMEM;
6228
6037d6bb 6229 rc = ata_pad_alloc(ap, dev);
f0d36efd 6230 if (rc)
6037d6bb 6231 return rc;
1da177e4 6232
f0d36efd
TH
6233 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
6234 (unsigned long long)ap->prd_dma);
1da177e4
LT
6235 return 0;
6236}
6237
3ef3b43d
TH
6238/**
6239 * ata_dev_init - Initialize an ata_device structure
6240 * @dev: Device structure to initialize
6241 *
6242 * Initialize @dev in preparation for probing.
6243 *
6244 * LOCKING:
6245 * Inherited from caller.
6246 */
6247void ata_dev_init(struct ata_device *dev)
6248{
9af5c9c9
TH
6249 struct ata_link *link = dev->link;
6250 struct ata_port *ap = link->ap;
72fa4b74
TH
6251 unsigned long flags;
6252
5a04bf4b 6253 /* SATA spd limit is bound to the first device */
9af5c9c9
TH
6254 link->sata_spd_limit = link->hw_sata_spd_limit;
6255 link->sata_spd = 0;
5a04bf4b 6256
72fa4b74
TH
6257 /* High bits of dev->flags are used to record warm plug
6258 * requests which occur asynchronously. Synchronize using
cca3974e 6259 * host lock.
72fa4b74 6260 */
ba6a1308 6261 spin_lock_irqsave(ap->lock, flags);
72fa4b74 6262 dev->flags &= ~ATA_DFLAG_INIT_MASK;
3dcc323f 6263 dev->horkage = 0;
ba6a1308 6264 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 6265
72fa4b74
TH
6266 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
6267 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
6268 dev->pio_mask = UINT_MAX;
6269 dev->mwdma_mask = UINT_MAX;
6270 dev->udma_mask = UINT_MAX;
6271}
6272
4fb37a25
TH
6273/**
6274 * ata_link_init - Initialize an ata_link structure
6275 * @ap: ATA port link is attached to
6276 * @link: Link structure to initialize
8989805d 6277 * @pmp: Port multiplier port number
4fb37a25
TH
6278 *
6279 * Initialize @link.
6280 *
6281 * LOCKING:
6282 * Kernel thread context (may sleep)
6283 */
fb7fd614 6284void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
4fb37a25
TH
6285{
6286 int i;
6287
6288 /* clear everything except for devices */
6289 memset(link, 0, offsetof(struct ata_link, device[0]));
6290
6291 link->ap = ap;
8989805d 6292 link->pmp = pmp;
4fb37a25
TH
6293 link->active_tag = ATA_TAG_POISON;
6294 link->hw_sata_spd_limit = UINT_MAX;
6295
6296 /* can't use iterator, ap isn't initialized yet */
6297 for (i = 0; i < ATA_MAX_DEVICES; i++) {
6298 struct ata_device *dev = &link->device[i];
6299
6300 dev->link = link;
6301 dev->devno = dev - link->device;
6302 ata_dev_init(dev);
6303 }
6304}
6305
6306/**
6307 * sata_link_init_spd - Initialize link->sata_spd_limit
6308 * @link: Link to configure sata_spd_limit for
6309 *
6310 * Initialize @link->[hw_]sata_spd_limit to the currently
6311 * configured value.
6312 *
6313 * LOCKING:
6314 * Kernel thread context (may sleep).
6315 *
6316 * RETURNS:
6317 * 0 on success, -errno on failure.
6318 */
fb7fd614 6319int sata_link_init_spd(struct ata_link *link)
4fb37a25
TH
6320{
6321 u32 scontrol, spd;
6322 int rc;
6323
6324 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
6325 if (rc)
6326 return rc;
6327
6328 spd = (scontrol >> 4) & 0xf;
6329 if (spd)
6330 link->hw_sata_spd_limit &= (1 << spd) - 1;
6331
6332 link->sata_spd_limit = link->hw_sata_spd_limit;
6333
6334 return 0;
6335}
6336
1da177e4 6337/**
f3187195
TH
6338 * ata_port_alloc - allocate and initialize basic ATA port resources
6339 * @host: ATA host this allocated port belongs to
1da177e4 6340 *
f3187195
TH
6341 * Allocate and initialize basic ATA port resources.
6342 *
6343 * RETURNS:
6344 * Allocate ATA port on success, NULL on failure.
0cba632b 6345 *
1da177e4 6346 * LOCKING:
f3187195 6347 * Inherited from calling layer (may sleep).
1da177e4 6348 */
f3187195 6349struct ata_port *ata_port_alloc(struct ata_host *host)
1da177e4 6350{
f3187195 6351 struct ata_port *ap;
1da177e4 6352
f3187195
TH
6353 DPRINTK("ENTER\n");
6354
6355 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6356 if (!ap)
6357 return NULL;
6358
f4d6d004 6359 ap->pflags |= ATA_PFLAG_INITIALIZING;
cca3974e 6360 ap->lock = &host->lock;
198e0fed 6361 ap->flags = ATA_FLAG_DISABLED;
f3187195 6362 ap->print_id = -1;
1da177e4 6363 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 6364 ap->host = host;
f3187195 6365 ap->dev = host->dev;
1da177e4 6366 ap->last_ctl = 0xFF;
bd5d825c
BP
6367
6368#if defined(ATA_VERBOSE_DEBUG)
6369 /* turn on all debugging levels */
6370 ap->msg_enable = 0x00FF;
6371#elif defined(ATA_DEBUG)
6372 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 6373#else
0dd4b21f 6374 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 6375#endif
1da177e4 6376
65f27f38
DH
6377 INIT_DELAYED_WORK(&ap->port_task, NULL);
6378 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6379 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 6380 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 6381 init_waitqueue_head(&ap->eh_wait_q);
5ddf24c5
TH
6382 init_timer_deferrable(&ap->fastdrain_timer);
6383 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
6384 ap->fastdrain_timer.data = (unsigned long)ap;
1da177e4 6385
838df628 6386 ap->cbl = ATA_CBL_NONE;
838df628 6387
8989805d 6388 ata_link_init(ap, &ap->link, 0);
1da177e4
LT
6389
6390#ifdef ATA_IRQ_TRAP
6391 ap->stats.unhandled_irq = 1;
6392 ap->stats.idle_irq = 1;
6393#endif
1da177e4 6394 return ap;
1da177e4
LT
6395}
6396
f0d36efd
TH
6397static void ata_host_release(struct device *gendev, void *res)
6398{
6399 struct ata_host *host = dev_get_drvdata(gendev);
6400 int i;
6401
6402 for (i = 0; i < host->n_ports; i++) {
6403 struct ata_port *ap = host->ports[i];
6404
ecef7253
TH
6405 if (!ap)
6406 continue;
6407
6408 if ((host->flags & ATA_HOST_STARTED) && ap->ops->port_stop)
f0d36efd 6409 ap->ops->port_stop(ap);
f0d36efd
TH
6410 }
6411
ecef7253 6412 if ((host->flags & ATA_HOST_STARTED) && host->ops->host_stop)
f0d36efd 6413 host->ops->host_stop(host);
1aa56cca 6414
1aa506e4
TH
6415 for (i = 0; i < host->n_ports; i++) {
6416 struct ata_port *ap = host->ports[i];
6417
4911487a
TH
6418 if (!ap)
6419 continue;
6420
6421 if (ap->scsi_host)
1aa506e4
TH
6422 scsi_host_put(ap->scsi_host);
6423
4911487a 6424 kfree(ap);
1aa506e4
TH
6425 host->ports[i] = NULL;
6426 }
6427
1aa56cca 6428 dev_set_drvdata(gendev, NULL);
f0d36efd
TH
6429}
6430
f3187195
TH
6431/**
6432 * ata_host_alloc - allocate and init basic ATA host resources
6433 * @dev: generic device this host is associated with
6434 * @max_ports: maximum number of ATA ports associated with this host
6435 *
6436 * Allocate and initialize basic ATA host resources. LLD calls
6437 * this function to allocate a host, initializes it fully and
6438 * attaches it using ata_host_register().
6439 *
6440 * @max_ports ports are allocated and host->n_ports is
6441 * initialized to @max_ports. The caller is allowed to decrease
6442 * host->n_ports before calling ata_host_register(). The unused
6443 * ports will be automatically freed on registration.
6444 *
6445 * RETURNS:
6446 * Allocate ATA host on success, NULL on failure.
6447 *
6448 * LOCKING:
6449 * Inherited from calling layer (may sleep).
6450 */
6451struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6452{
6453 struct ata_host *host;
6454 size_t sz;
6455 int i;
6456
6457 DPRINTK("ENTER\n");
6458
6459 if (!devres_open_group(dev, NULL, GFP_KERNEL))
6460 return NULL;
6461
6462 /* alloc a container for our list of ATA ports (buses) */
6463 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6464 /* alloc a container for our list of ATA ports (buses) */
6465 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6466 if (!host)
6467 goto err_out;
6468
6469 devres_add(dev, host);
6470 dev_set_drvdata(dev, host);
6471
6472 spin_lock_init(&host->lock);
6473 host->dev = dev;
6474 host->n_ports = max_ports;
6475
6476 /* allocate ports bound to this host */
6477 for (i = 0; i < max_ports; i++) {
6478 struct ata_port *ap;
6479
6480 ap = ata_port_alloc(host);
6481 if (!ap)
6482 goto err_out;
6483
6484 ap->port_no = i;
6485 host->ports[i] = ap;
6486 }
6487
6488 devres_remove_group(dev, NULL);
6489 return host;
6490
6491 err_out:
6492 devres_release_group(dev, NULL);
6493 return NULL;
6494}
6495
f5cda257
TH
6496/**
6497 * ata_host_alloc_pinfo - alloc host and init with port_info array
6498 * @dev: generic device this host is associated with
6499 * @ppi: array of ATA port_info to initialize host with
6500 * @n_ports: number of ATA ports attached to this host
6501 *
6502 * Allocate ATA host and initialize with info from @ppi. If NULL
6503 * terminated, @ppi may contain fewer entries than @n_ports. The
6504 * last entry will be used for the remaining ports.
6505 *
6506 * RETURNS:
6507 * Allocate ATA host on success, NULL on failure.
6508 *
6509 * LOCKING:
6510 * Inherited from calling layer (may sleep).
6511 */
6512struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6513 const struct ata_port_info * const * ppi,
6514 int n_ports)
6515{
6516 const struct ata_port_info *pi;
6517 struct ata_host *host;
6518 int i, j;
6519
6520 host = ata_host_alloc(dev, n_ports);
6521 if (!host)
6522 return NULL;
6523
6524 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6525 struct ata_port *ap = host->ports[i];
6526
6527 if (ppi[j])
6528 pi = ppi[j++];
6529
6530 ap->pio_mask = pi->pio_mask;
6531 ap->mwdma_mask = pi->mwdma_mask;
6532 ap->udma_mask = pi->udma_mask;
6533 ap->flags |= pi->flags;
0c88758b 6534 ap->link.flags |= pi->link_flags;
f5cda257
TH
6535 ap->ops = pi->port_ops;
6536
6537 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6538 host->ops = pi->port_ops;
6539 if (!host->private_data && pi->private_data)
6540 host->private_data = pi->private_data;
6541 }
6542
6543 return host;
6544}
6545
ecef7253
TH
6546/**
6547 * ata_host_start - start and freeze ports of an ATA host
6548 * @host: ATA host to start ports for
6549 *
6550 * Start and then freeze ports of @host. Started status is
6551 * recorded in host->flags, so this function can be called
6552 * multiple times. Ports are guaranteed to get started only
f3187195
TH
6553 * once. If host->ops isn't initialized yet, its set to the
6554 * first non-dummy port ops.
ecef7253
TH
6555 *
6556 * LOCKING:
6557 * Inherited from calling layer (may sleep).
6558 *
6559 * RETURNS:
6560 * 0 if all ports are started successfully, -errno otherwise.
6561 */
6562int ata_host_start(struct ata_host *host)
6563{
6564 int i, rc;
6565
6566 if (host->flags & ATA_HOST_STARTED)
6567 return 0;
6568
6569 for (i = 0; i < host->n_ports; i++) {
6570 struct ata_port *ap = host->ports[i];
6571
f3187195
TH
6572 if (!host->ops && !ata_port_is_dummy(ap))
6573 host->ops = ap->ops;
6574
ecef7253
TH
6575 if (ap->ops->port_start) {
6576 rc = ap->ops->port_start(ap);
6577 if (rc) {
6578 ata_port_printk(ap, KERN_ERR, "failed to "
6579 "start port (errno=%d)\n", rc);
6580 goto err_out;
6581 }
6582 }
6583
6584 ata_eh_freeze_port(ap);
6585 }
6586
6587 host->flags |= ATA_HOST_STARTED;
6588 return 0;
6589
6590 err_out:
6591 while (--i >= 0) {
6592 struct ata_port *ap = host->ports[i];
6593
6594 if (ap->ops->port_stop)
6595 ap->ops->port_stop(ap);
6596 }
6597 return rc;
6598}
6599
b03732f0 6600/**
cca3974e
JG
6601 * ata_sas_host_init - Initialize a host struct
6602 * @host: host to initialize
6603 * @dev: device host is attached to
6604 * @flags: host flags
6605 * @ops: port_ops
b03732f0
BK
6606 *
6607 * LOCKING:
6608 * PCI/etc. bus probe sem.
6609 *
6610 */
f3187195 6611/* KILLME - the only user left is ipr */
cca3974e
JG
6612void ata_host_init(struct ata_host *host, struct device *dev,
6613 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 6614{
cca3974e
JG
6615 spin_lock_init(&host->lock);
6616 host->dev = dev;
6617 host->flags = flags;
6618 host->ops = ops;
b03732f0
BK
6619}
6620
f3187195
TH
6621/**
6622 * ata_host_register - register initialized ATA host
6623 * @host: ATA host to register
6624 * @sht: template for SCSI host
6625 *
6626 * Register initialized ATA host. @host is allocated using
6627 * ata_host_alloc() and fully initialized by LLD. This function
6628 * starts ports, registers @host with ATA and SCSI layers and
6629 * probe registered devices.
6630 *
6631 * LOCKING:
6632 * Inherited from calling layer (may sleep).
6633 *
6634 * RETURNS:
6635 * 0 on success, -errno otherwise.
6636 */
6637int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
6638{
6639 int i, rc;
6640
6641 /* host must have been started */
6642 if (!(host->flags & ATA_HOST_STARTED)) {
6643 dev_printk(KERN_ERR, host->dev,
6644 "BUG: trying to register unstarted host\n");
6645 WARN_ON(1);
6646 return -EINVAL;
6647 }
6648
6649 /* Blow away unused ports. This happens when LLD can't
6650 * determine the exact number of ports to allocate at
6651 * allocation time.
6652 */
6653 for (i = host->n_ports; host->ports[i]; i++)
6654 kfree(host->ports[i]);
6655
6656 /* give ports names and add SCSI hosts */
6657 for (i = 0; i < host->n_ports; i++)
6658 host->ports[i]->print_id = ata_print_id++;
6659
6660 rc = ata_scsi_add_hosts(host, sht);
6661 if (rc)
6662 return rc;
6663
fafbae87
TH
6664 /* associate with ACPI nodes */
6665 ata_acpi_associate(host);
6666
f3187195
TH
6667 /* set cable, sata_spd_limit and report */
6668 for (i = 0; i < host->n_ports; i++) {
6669 struct ata_port *ap = host->ports[i];
f3187195
TH
6670 unsigned long xfer_mask;
6671
6672 /* set SATA cable type if still unset */
6673 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
6674 ap->cbl = ATA_CBL_SATA;
6675
6676 /* init sata_spd_limit to the current value */
4fb37a25 6677 sata_link_init_spd(&ap->link);
f3187195 6678
cbcdd875 6679 /* print per-port info to dmesg */
f3187195
TH
6680 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
6681 ap->udma_mask);
6682
f3187195 6683 if (!ata_port_is_dummy(ap))
cbcdd875
TH
6684 ata_port_printk(ap, KERN_INFO,
6685 "%cATA max %s %s\n",
a16abc0b 6686 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
f3187195 6687 ata_mode_string(xfer_mask),
cbcdd875 6688 ap->link.eh_info.desc);
f3187195
TH
6689 else
6690 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
6691 }
6692
6693 /* perform each probe synchronously */
6694 DPRINTK("probe begin\n");
6695 for (i = 0; i < host->n_ports; i++) {
6696 struct ata_port *ap = host->ports[i];
6697 int rc;
6698
6699 /* probe */
6700 if (ap->ops->error_handler) {
9af5c9c9 6701 struct ata_eh_info *ehi = &ap->link.eh_info;
f3187195
TH
6702 unsigned long flags;
6703
6704 ata_port_probe(ap);
6705
6706 /* kick EH for boot probing */
6707 spin_lock_irqsave(ap->lock, flags);
6708
f58229f8
TH
6709 ehi->probe_mask =
6710 (1 << ata_link_max_devices(&ap->link)) - 1;
f3187195
TH
6711 ehi->action |= ATA_EH_SOFTRESET;
6712 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
6713
f4d6d004 6714 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
f3187195
TH
6715 ap->pflags |= ATA_PFLAG_LOADING;
6716 ata_port_schedule_eh(ap);
6717
6718 spin_unlock_irqrestore(ap->lock, flags);
6719
6720 /* wait for EH to finish */
6721 ata_port_wait_eh(ap);
6722 } else {
6723 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
6724 rc = ata_bus_probe(ap);
6725 DPRINTK("ata%u: bus probe end\n", ap->print_id);
6726
6727 if (rc) {
6728 /* FIXME: do something useful here?
6729 * Current libata behavior will
6730 * tear down everything when
6731 * the module is removed
6732 * or the h/w is unplugged.
6733 */
6734 }
6735 }
6736 }
6737
6738 /* probes are done, now scan each port's disk(s) */
6739 DPRINTK("host probe begin\n");
6740 for (i = 0; i < host->n_ports; i++) {
6741 struct ata_port *ap = host->ports[i];
6742
1ae46317 6743 ata_scsi_scan_host(ap, 1);
f3187195
TH
6744 }
6745
6746 return 0;
6747}
6748
f5cda257
TH
6749/**
6750 * ata_host_activate - start host, request IRQ and register it
6751 * @host: target ATA host
6752 * @irq: IRQ to request
6753 * @irq_handler: irq_handler used when requesting IRQ
6754 * @irq_flags: irq_flags used when requesting IRQ
6755 * @sht: scsi_host_template to use when registering the host
6756 *
6757 * After allocating an ATA host and initializing it, most libata
6758 * LLDs perform three steps to activate the host - start host,
6759 * request IRQ and register it. This helper takes necessasry
6760 * arguments and performs the three steps in one go.
6761 *
6762 * LOCKING:
6763 * Inherited from calling layer (may sleep).
6764 *
6765 * RETURNS:
6766 * 0 on success, -errno otherwise.
6767 */
6768int ata_host_activate(struct ata_host *host, int irq,
6769 irq_handler_t irq_handler, unsigned long irq_flags,
6770 struct scsi_host_template *sht)
6771{
cbcdd875 6772 int i, rc;
f5cda257
TH
6773
6774 rc = ata_host_start(host);
6775 if (rc)
6776 return rc;
6777
6778 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
6779 dev_driver_string(host->dev), host);
6780 if (rc)
6781 return rc;
6782
cbcdd875
TH
6783 for (i = 0; i < host->n_ports; i++)
6784 ata_port_desc(host->ports[i], "irq %d", irq);
4031826b 6785
f5cda257
TH
6786 rc = ata_host_register(host, sht);
6787 /* if failed, just free the IRQ and leave ports alone */
6788 if (rc)
6789 devm_free_irq(host->dev, irq, host);
6790
6791 return rc;
6792}
6793
720ba126
TH
6794/**
6795 * ata_port_detach - Detach ATA port in prepration of device removal
6796 * @ap: ATA port to be detached
6797 *
6798 * Detach all ATA devices and the associated SCSI devices of @ap;
6799 * then, remove the associated SCSI host. @ap is guaranteed to
6800 * be quiescent on return from this function.
6801 *
6802 * LOCKING:
6803 * Kernel thread context (may sleep).
6804 */
6805void ata_port_detach(struct ata_port *ap)
6806{
6807 unsigned long flags;
41bda9c9 6808 struct ata_link *link;
f58229f8 6809 struct ata_device *dev;
720ba126
TH
6810
6811 if (!ap->ops->error_handler)
c3cf30a9 6812 goto skip_eh;
720ba126
TH
6813
6814 /* tell EH we're leaving & flush EH */
ba6a1308 6815 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 6816 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 6817 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6818
6819 ata_port_wait_eh(ap);
6820
6821 /* EH is now guaranteed to see UNLOADING, so no new device
6822 * will be attached. Disable all existing devices.
6823 */
ba6a1308 6824 spin_lock_irqsave(ap->lock, flags);
720ba126 6825
41bda9c9
TH
6826 ata_port_for_each_link(link, ap) {
6827 ata_link_for_each_dev(dev, link)
6828 ata_dev_disable(dev);
6829 }
720ba126 6830
ba6a1308 6831 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6832
6833 /* Final freeze & EH. All in-flight commands are aborted. EH
6834 * will be skipped and retrials will be terminated with bad
6835 * target.
6836 */
ba6a1308 6837 spin_lock_irqsave(ap->lock, flags);
720ba126 6838 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 6839 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
6840
6841 ata_port_wait_eh(ap);
45a66c1c 6842 cancel_rearming_delayed_work(&ap->hotplug_task);
720ba126 6843
c3cf30a9 6844 skip_eh:
720ba126 6845 /* remove the associated SCSI host */
cca3974e 6846 scsi_remove_host(ap->scsi_host);
720ba126
TH
6847}
6848
0529c159
TH
6849/**
6850 * ata_host_detach - Detach all ports of an ATA host
6851 * @host: Host to detach
6852 *
6853 * Detach all ports of @host.
6854 *
6855 * LOCKING:
6856 * Kernel thread context (may sleep).
6857 */
6858void ata_host_detach(struct ata_host *host)
6859{
6860 int i;
6861
6862 for (i = 0; i < host->n_ports; i++)
6863 ata_port_detach(host->ports[i]);
6864}
6865
1da177e4
LT
6866/**
6867 * ata_std_ports - initialize ioaddr with standard port offsets.
6868 * @ioaddr: IO address structure to be initialized
0baab86b
EF
6869 *
6870 * Utility function which initializes data_addr, error_addr,
6871 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
6872 * device_addr, status_addr, and command_addr to standard offsets
6873 * relative to cmd_addr.
6874 *
6875 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 6876 */
0baab86b 6877
1da177e4
LT
6878void ata_std_ports(struct ata_ioports *ioaddr)
6879{
6880 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
6881 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
6882 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
6883 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
6884 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
6885 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
6886 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
6887 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
6888 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
6889 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
6890}
6891
0baab86b 6892
374b1873
JG
6893#ifdef CONFIG_PCI
6894
1da177e4
LT
6895/**
6896 * ata_pci_remove_one - PCI layer callback for device removal
6897 * @pdev: PCI device that was removed
6898 *
b878ca5d
TH
6899 * PCI layer indicates to libata via this hook that hot-unplug or
6900 * module unload event has occurred. Detach all ports. Resource
6901 * release is handled via devres.
1da177e4
LT
6902 *
6903 * LOCKING:
6904 * Inherited from PCI layer (may sleep).
6905 */
f0d36efd 6906void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4
LT
6907{
6908 struct device *dev = pci_dev_to_dev(pdev);
cca3974e 6909 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 6910
b878ca5d 6911 ata_host_detach(host);
1da177e4
LT
6912}
6913
6914/* move to PCI subsystem */
057ace5e 6915int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
6916{
6917 unsigned long tmp = 0;
6918
6919 switch (bits->width) {
6920 case 1: {
6921 u8 tmp8 = 0;
6922 pci_read_config_byte(pdev, bits->reg, &tmp8);
6923 tmp = tmp8;
6924 break;
6925 }
6926 case 2: {
6927 u16 tmp16 = 0;
6928 pci_read_config_word(pdev, bits->reg, &tmp16);
6929 tmp = tmp16;
6930 break;
6931 }
6932 case 4: {
6933 u32 tmp32 = 0;
6934 pci_read_config_dword(pdev, bits->reg, &tmp32);
6935 tmp = tmp32;
6936 break;
6937 }
6938
6939 default:
6940 return -EINVAL;
6941 }
6942
6943 tmp &= bits->mask;
6944
6945 return (tmp == bits->val) ? 1 : 0;
6946}
9b847548 6947
6ffa01d8 6948#ifdef CONFIG_PM
3c5100c1 6949void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
6950{
6951 pci_save_state(pdev);
4c90d971 6952 pci_disable_device(pdev);
500530f6 6953
4c90d971 6954 if (mesg.event == PM_EVENT_SUSPEND)
500530f6 6955 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
6956}
6957
553c4aa6 6958int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 6959{
553c4aa6
TH
6960 int rc;
6961
9b847548
JA
6962 pci_set_power_state(pdev, PCI_D0);
6963 pci_restore_state(pdev);
553c4aa6 6964
b878ca5d 6965 rc = pcim_enable_device(pdev);
553c4aa6
TH
6966 if (rc) {
6967 dev_printk(KERN_ERR, &pdev->dev,
6968 "failed to enable device after resume (%d)\n", rc);
6969 return rc;
6970 }
6971
9b847548 6972 pci_set_master(pdev);
553c4aa6 6973 return 0;
500530f6
TH
6974}
6975
3c5100c1 6976int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 6977{
cca3974e 6978 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
6979 int rc = 0;
6980
cca3974e 6981 rc = ata_host_suspend(host, mesg);
500530f6
TH
6982 if (rc)
6983 return rc;
6984
3c5100c1 6985 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
6986
6987 return 0;
6988}
6989
6990int ata_pci_device_resume(struct pci_dev *pdev)
6991{
cca3974e 6992 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 6993 int rc;
500530f6 6994
553c4aa6
TH
6995 rc = ata_pci_device_do_resume(pdev);
6996 if (rc == 0)
6997 ata_host_resume(host);
6998 return rc;
9b847548 6999}
6ffa01d8
TH
7000#endif /* CONFIG_PM */
7001
1da177e4
LT
7002#endif /* CONFIG_PCI */
7003
7004
1da177e4
LT
7005static int __init ata_init(void)
7006{
a8601e5f 7007 ata_probe_timeout *= HZ;
1da177e4
LT
7008 ata_wq = create_workqueue("ata");
7009 if (!ata_wq)
7010 return -ENOMEM;
7011
453b07ac
TH
7012 ata_aux_wq = create_singlethread_workqueue("ata_aux");
7013 if (!ata_aux_wq) {
7014 destroy_workqueue(ata_wq);
7015 return -ENOMEM;
7016 }
7017
1da177e4
LT
7018 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
7019 return 0;
7020}
7021
7022static void __exit ata_exit(void)
7023{
7024 destroy_workqueue(ata_wq);
453b07ac 7025 destroy_workqueue(ata_aux_wq);
1da177e4
LT
7026}
7027
a4625085 7028subsys_initcall(ata_init);
1da177e4
LT
7029module_exit(ata_exit);
7030
67846b30 7031static unsigned long ratelimit_time;
34af946a 7032static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
7033
7034int ata_ratelimit(void)
7035{
7036 int rc;
7037 unsigned long flags;
7038
7039 spin_lock_irqsave(&ata_ratelimit_lock, flags);
7040
7041 if (time_after(jiffies, ratelimit_time)) {
7042 rc = 1;
7043 ratelimit_time = jiffies + (HZ/5);
7044 } else
7045 rc = 0;
7046
7047 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
7048
7049 return rc;
7050}
7051
c22daff4
TH
7052/**
7053 * ata_wait_register - wait until register value changes
7054 * @reg: IO-mapped register
7055 * @mask: Mask to apply to read register value
7056 * @val: Wait condition
7057 * @interval_msec: polling interval in milliseconds
7058 * @timeout_msec: timeout in milliseconds
7059 *
7060 * Waiting for some bits of register to change is a common
7061 * operation for ATA controllers. This function reads 32bit LE
7062 * IO-mapped register @reg and tests for the following condition.
7063 *
7064 * (*@reg & mask) != val
7065 *
7066 * If the condition is met, it returns; otherwise, the process is
7067 * repeated after @interval_msec until timeout.
7068 *
7069 * LOCKING:
7070 * Kernel thread context (may sleep)
7071 *
7072 * RETURNS:
7073 * The final register value.
7074 */
7075u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
7076 unsigned long interval_msec,
7077 unsigned long timeout_msec)
7078{
7079 unsigned long timeout;
7080 u32 tmp;
7081
7082 tmp = ioread32(reg);
7083
7084 /* Calculate timeout _after_ the first read to make sure
7085 * preceding writes reach the controller before starting to
7086 * eat away the timeout.
7087 */
7088 timeout = jiffies + (timeout_msec * HZ) / 1000;
7089
7090 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
7091 msleep(interval_msec);
7092 tmp = ioread32(reg);
7093 }
7094
7095 return tmp;
7096}
7097
dd5b06c4
TH
7098/*
7099 * Dummy port_ops
7100 */
7101static void ata_dummy_noret(struct ata_port *ap) { }
7102static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
7103static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
7104
7105static u8 ata_dummy_check_status(struct ata_port *ap)
7106{
7107 return ATA_DRDY;
7108}
7109
7110static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7111{
7112 return AC_ERR_SYSTEM;
7113}
7114
7115const struct ata_port_operations ata_dummy_port_ops = {
dd5b06c4
TH
7116 .check_status = ata_dummy_check_status,
7117 .check_altstatus = ata_dummy_check_status,
7118 .dev_select = ata_noop_dev_select,
7119 .qc_prep = ata_noop_qc_prep,
7120 .qc_issue = ata_dummy_qc_issue,
7121 .freeze = ata_dummy_noret,
7122 .thaw = ata_dummy_noret,
7123 .error_handler = ata_dummy_noret,
7124 .post_internal_cmd = ata_dummy_qc_noret,
7125 .irq_clear = ata_dummy_noret,
7126 .port_start = ata_dummy_ret0,
7127 .port_stop = ata_dummy_noret,
7128};
7129
21b0ad4f
TH
7130const struct ata_port_info ata_dummy_port_info = {
7131 .port_ops = &ata_dummy_port_ops,
7132};
7133
1da177e4
LT
7134/*
7135 * libata is essentially a library of internal helper functions for
7136 * low-level ATA host controller drivers. As such, the API/ABI is
7137 * likely to change as new drivers are added and updated.
7138 * Do not depend on ABI/API stability.
7139 */
7140
e9c83914
TH
7141EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7142EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7143EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 7144EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
21b0ad4f 7145EXPORT_SYMBOL_GPL(ata_dummy_port_info);
1da177e4
LT
7146EXPORT_SYMBOL_GPL(ata_std_bios_param);
7147EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 7148EXPORT_SYMBOL_GPL(ata_host_init);
f3187195 7149EXPORT_SYMBOL_GPL(ata_host_alloc);
f5cda257 7150EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
ecef7253 7151EXPORT_SYMBOL_GPL(ata_host_start);
f3187195 7152EXPORT_SYMBOL_GPL(ata_host_register);
f5cda257 7153EXPORT_SYMBOL_GPL(ata_host_activate);
0529c159 7154EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4
LT
7155EXPORT_SYMBOL_GPL(ata_sg_init);
7156EXPORT_SYMBOL_GPL(ata_sg_init_one);
9a1004d0 7157EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 7158EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 7159EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 7160EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
7161EXPORT_SYMBOL_GPL(ata_tf_load);
7162EXPORT_SYMBOL_GPL(ata_tf_read);
7163EXPORT_SYMBOL_GPL(ata_noop_dev_select);
7164EXPORT_SYMBOL_GPL(ata_std_dev_select);
43727fbc 7165EXPORT_SYMBOL_GPL(sata_print_link_status);
1da177e4
LT
7166EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7167EXPORT_SYMBOL_GPL(ata_tf_from_fis);
7168EXPORT_SYMBOL_GPL(ata_check_status);
7169EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
7170EXPORT_SYMBOL_GPL(ata_exec_command);
7171EXPORT_SYMBOL_GPL(ata_port_start);
d92e74d3 7172EXPORT_SYMBOL_GPL(ata_sff_port_start);
1da177e4 7173EXPORT_SYMBOL_GPL(ata_interrupt);
04351821 7174EXPORT_SYMBOL_GPL(ata_do_set_mode);
0d5ff566
TH
7175EXPORT_SYMBOL_GPL(ata_data_xfer);
7176EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
31cc23b3 7177EXPORT_SYMBOL_GPL(ata_std_qc_defer);
1da177e4 7178EXPORT_SYMBOL_GPL(ata_qc_prep);
d26fc955 7179EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
e46834cd 7180EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
7181EXPORT_SYMBOL_GPL(ata_bmdma_setup);
7182EXPORT_SYMBOL_GPL(ata_bmdma_start);
7183EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
7184EXPORT_SYMBOL_GPL(ata_bmdma_status);
7185EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
7186EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
7187EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
7188EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
7189EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
7190EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 7191EXPORT_SYMBOL_GPL(ata_port_probe);
10305f0f 7192EXPORT_SYMBOL_GPL(ata_dev_disable);
3c567b7d 7193EXPORT_SYMBOL_GPL(sata_set_spd);
936fd732
TH
7194EXPORT_SYMBOL_GPL(sata_link_debounce);
7195EXPORT_SYMBOL_GPL(sata_link_resume);
1da177e4
LT
7196EXPORT_SYMBOL_GPL(sata_phy_reset);
7197EXPORT_SYMBOL_GPL(__sata_phy_reset);
7198EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 7199EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 7200EXPORT_SYMBOL_GPL(ata_std_softreset);
cc0680a5 7201EXPORT_SYMBOL_GPL(sata_link_hardreset);
c2bd5804
TH
7202EXPORT_SYMBOL_GPL(sata_std_hardreset);
7203EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
7204EXPORT_SYMBOL_GPL(ata_dev_classify);
7205EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 7206EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 7207EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 7208EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 7209EXPORT_SYMBOL_GPL(ata_busy_sleep);
d4b2bab4 7210EXPORT_SYMBOL_GPL(ata_wait_ready);
86e45b6b 7211EXPORT_SYMBOL_GPL(ata_port_queue_task);
1da177e4
LT
7212EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
7213EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 7214EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 7215EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 7216EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4 7217EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
7218EXPORT_SYMBOL_GPL(sata_scr_valid);
7219EXPORT_SYMBOL_GPL(sata_scr_read);
7220EXPORT_SYMBOL_GPL(sata_scr_write);
7221EXPORT_SYMBOL_GPL(sata_scr_write_flush);
936fd732
TH
7222EXPORT_SYMBOL_GPL(ata_link_online);
7223EXPORT_SYMBOL_GPL(ata_link_offline);
6ffa01d8 7224#ifdef CONFIG_PM
cca3974e
JG
7225EXPORT_SYMBOL_GPL(ata_host_suspend);
7226EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 7227#endif /* CONFIG_PM */
6a62a04d
TH
7228EXPORT_SYMBOL_GPL(ata_id_string);
7229EXPORT_SYMBOL_GPL(ata_id_c_string);
10305f0f 7230EXPORT_SYMBOL_GPL(ata_id_to_dma_mode);
1da177e4
LT
7231EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7232
1bc4ccff 7233EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
452503f9
AC
7234EXPORT_SYMBOL_GPL(ata_timing_compute);
7235EXPORT_SYMBOL_GPL(ata_timing_merge);
7236
1da177e4
LT
7237#ifdef CONFIG_PCI
7238EXPORT_SYMBOL_GPL(pci_test_config_bits);
d583bc18 7239EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
1626aeb8 7240EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
d583bc18 7241EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
1da177e4
LT
7242EXPORT_SYMBOL_GPL(ata_pci_init_one);
7243EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6ffa01d8 7244#ifdef CONFIG_PM
500530f6
TH
7245EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7246EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
7247EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7248EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 7249#endif /* CONFIG_PM */
67951ade
AC
7250EXPORT_SYMBOL_GPL(ata_pci_default_filter);
7251EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 7252#endif /* CONFIG_PCI */
9b847548 7253
b64bbc39
TH
7254EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7255EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7256EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
cbcdd875
TH
7257EXPORT_SYMBOL_GPL(ata_port_desc);
7258#ifdef CONFIG_PCI
7259EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7260#endif /* CONFIG_PCI */
ece1d636 7261EXPORT_SYMBOL_GPL(ata_eng_timeout);
7b70fc03 7262EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
dbd82616 7263EXPORT_SYMBOL_GPL(ata_link_abort);
7b70fc03 7264EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499 7265EXPORT_SYMBOL_GPL(ata_port_freeze);
7d77b247 7266EXPORT_SYMBOL_GPL(sata_async_notification);
e3180499
TH
7267EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7268EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
7269EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7270EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 7271EXPORT_SYMBOL_GPL(ata_do_eh);
83625006 7272EXPORT_SYMBOL_GPL(ata_irq_on);
a619f981 7273EXPORT_SYMBOL_GPL(ata_dev_try_classify);
be0d18df
AC
7274
7275EXPORT_SYMBOL_GPL(ata_cable_40wire);
7276EXPORT_SYMBOL_GPL(ata_cable_80wire);
7277EXPORT_SYMBOL_GPL(ata_cable_unknown);
7278EXPORT_SYMBOL_GPL(ata_cable_sata);