]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/ata/libata-core.c
libata: xfer_mask is unsigned long not unsigned int
[net-next-2.6.git] / drivers / ata / libata-core.c
CommitLineData
1da177e4 1/*
af36d7f0
JG
2 * libata-core.c - helper library for ATA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2004 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2004 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
92c52c52
AC
33 * Standards documents from:
34 * http://www.t13.org (ATA standards, PCI DMA IDE spec)
35 * http://www.t10.org (SCSI MMC - for ATAPI MMC)
36 * http://www.sata-io.org (SATA)
37 * http://www.compactflash.org (CF)
38 * http://www.qic.org (QIC157 - Tape and DSC)
39 * http://www.ce-ata.org (CE-ATA: not supported)
40 *
1da177e4
LT
41 */
42
1da177e4
LT
43#include <linux/kernel.h>
44#include <linux/module.h>
45#include <linux/pci.h>
46#include <linux/init.h>
47#include <linux/list.h>
48#include <linux/mm.h>
49#include <linux/highmem.h>
50#include <linux/spinlock.h>
51#include <linux/blkdev.h>
52#include <linux/delay.h>
53#include <linux/timer.h>
54#include <linux/interrupt.h>
55#include <linux/completion.h>
56#include <linux/suspend.h>
57#include <linux/workqueue.h>
67846b30 58#include <linux/jiffies.h>
378f058c 59#include <linux/scatterlist.h>
2dcb407e 60#include <linux/io.h>
1da177e4 61#include <scsi/scsi.h>
193515d5 62#include <scsi/scsi_cmnd.h>
1da177e4
LT
63#include <scsi/scsi_host.h>
64#include <linux/libata.h>
1da177e4
LT
65#include <asm/semaphore.h>
66#include <asm/byteorder.h>
140b5e59 67#include <linux/cdrom.h>
1da177e4
LT
68
69#include "libata.h"
70
fda0efc5 71
d7bb4cc7 72/* debounce timing parameters in msecs { interval, duration, timeout } */
e9c83914
TH
73const unsigned long sata_deb_timing_normal[] = { 5, 100, 2000 };
74const unsigned long sata_deb_timing_hotplug[] = { 25, 500, 2000 };
75const unsigned long sata_deb_timing_long[] = { 100, 2000, 5000 };
d7bb4cc7 76
3373efd8
TH
77static unsigned int ata_dev_init_params(struct ata_device *dev,
78 u16 heads, u16 sectors);
79static unsigned int ata_dev_set_xfermode(struct ata_device *dev);
218f3d30
JG
80static unsigned int ata_dev_set_feature(struct ata_device *dev,
81 u8 enable, u8 feature);
3373efd8 82static void ata_dev_xfermask(struct ata_device *dev);
75683fe7 83static unsigned long ata_dev_blacklisted(const struct ata_device *dev);
1da177e4 84
f3187195 85unsigned int ata_print_id = 1;
1da177e4
LT
86static struct workqueue_struct *ata_wq;
87
453b07ac
TH
88struct workqueue_struct *ata_aux_wq;
89
418dc1f5 90int atapi_enabled = 1;
1623c81e
JG
91module_param(atapi_enabled, int, 0444);
92MODULE_PARM_DESC(atapi_enabled, "Enable discovery of ATAPI devices (0=off, 1=on)");
93
95de719a
AL
94int atapi_dmadir = 0;
95module_param(atapi_dmadir, int, 0444);
96MODULE_PARM_DESC(atapi_dmadir, "Enable ATAPI DMADIR bridge support (0=off, 1=on)");
97
baf4fdfa
ML
98int atapi_passthru16 = 1;
99module_param(atapi_passthru16, int, 0444);
100MODULE_PARM_DESC(atapi_passthru16, "Enable ATA_16 passthru for ATAPI devices; on by default (0=off, 1=on)");
101
c3c013a2
JG
102int libata_fua = 0;
103module_param_named(fua, libata_fua, int, 0444);
104MODULE_PARM_DESC(fua, "FUA support (0=off, 1=on)");
105
2dcb407e 106static int ata_ignore_hpa;
1e999736
AC
107module_param_named(ignore_hpa, ata_ignore_hpa, int, 0644);
108MODULE_PARM_DESC(ignore_hpa, "Ignore HPA limit (0=keep BIOS limits, 1=ignore limits, using full disk)");
109
b3a70601
AC
110static int libata_dma_mask = ATA_DMA_MASK_ATA|ATA_DMA_MASK_ATAPI|ATA_DMA_MASK_CFA;
111module_param_named(dma, libata_dma_mask, int, 0444);
112MODULE_PARM_DESC(dma, "DMA enable/disable (0x1==ATA, 0x2==ATAPI, 0x4==CF)");
113
a8601e5f
AM
114static int ata_probe_timeout = ATA_TMOUT_INTERNAL / HZ;
115module_param(ata_probe_timeout, int, 0444);
116MODULE_PARM_DESC(ata_probe_timeout, "Set ATA probing timeout (seconds)");
117
6ebe9d86 118int libata_noacpi = 0;
d7d0dad6 119module_param_named(noacpi, libata_noacpi, int, 0444);
6ebe9d86 120MODULE_PARM_DESC(noacpi, "Disables the use of ACPI in probe/suspend/resume when set");
11ef697b 121
ae8d4ee7
AC
122int libata_allow_tpm = 0;
123module_param_named(allow_tpm, libata_allow_tpm, int, 0444);
124MODULE_PARM_DESC(allow_tpm, "Permit the use of TPM commands");
125
1da177e4
LT
126MODULE_AUTHOR("Jeff Garzik");
127MODULE_DESCRIPTION("Library module for ATA devices");
128MODULE_LICENSE("GPL");
129MODULE_VERSION(DRV_VERSION);
130
0baab86b 131
1da177e4
LT
132/**
133 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
134 * @tf: Taskfile to convert
1da177e4 135 * @pmp: Port multiplier port
9977126c
TH
136 * @is_cmd: This FIS is for command
137 * @fis: Buffer into which data will output
1da177e4
LT
138 *
139 * Converts a standard ATA taskfile to a Serial ATA
140 * FIS structure (Register - Host to Device).
141 *
142 * LOCKING:
143 * Inherited from caller.
144 */
9977126c 145void ata_tf_to_fis(const struct ata_taskfile *tf, u8 pmp, int is_cmd, u8 *fis)
1da177e4 146{
9977126c
TH
147 fis[0] = 0x27; /* Register - Host to Device FIS */
148 fis[1] = pmp & 0xf; /* Port multiplier number*/
149 if (is_cmd)
150 fis[1] |= (1 << 7); /* bit 7 indicates Command FIS */
151
1da177e4
LT
152 fis[2] = tf->command;
153 fis[3] = tf->feature;
154
155 fis[4] = tf->lbal;
156 fis[5] = tf->lbam;
157 fis[6] = tf->lbah;
158 fis[7] = tf->device;
159
160 fis[8] = tf->hob_lbal;
161 fis[9] = tf->hob_lbam;
162 fis[10] = tf->hob_lbah;
163 fis[11] = tf->hob_feature;
164
165 fis[12] = tf->nsect;
166 fis[13] = tf->hob_nsect;
167 fis[14] = 0;
168 fis[15] = tf->ctl;
169
170 fis[16] = 0;
171 fis[17] = 0;
172 fis[18] = 0;
173 fis[19] = 0;
174}
175
176/**
177 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
178 * @fis: Buffer from which data will be input
179 * @tf: Taskfile to output
180 *
e12a1be6 181 * Converts a serial ATA FIS structure to a standard ATA taskfile.
1da177e4
LT
182 *
183 * LOCKING:
184 * Inherited from caller.
185 */
186
057ace5e 187void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf)
1da177e4
LT
188{
189 tf->command = fis[2]; /* status */
190 tf->feature = fis[3]; /* error */
191
192 tf->lbal = fis[4];
193 tf->lbam = fis[5];
194 tf->lbah = fis[6];
195 tf->device = fis[7];
196
197 tf->hob_lbal = fis[8];
198 tf->hob_lbam = fis[9];
199 tf->hob_lbah = fis[10];
200
201 tf->nsect = fis[12];
202 tf->hob_nsect = fis[13];
203}
204
8cbd6df1
AL
205static const u8 ata_rw_cmds[] = {
206 /* pio multi */
207 ATA_CMD_READ_MULTI,
208 ATA_CMD_WRITE_MULTI,
209 ATA_CMD_READ_MULTI_EXT,
210 ATA_CMD_WRITE_MULTI_EXT,
9a3dccc4
TH
211 0,
212 0,
213 0,
214 ATA_CMD_WRITE_MULTI_FUA_EXT,
8cbd6df1
AL
215 /* pio */
216 ATA_CMD_PIO_READ,
217 ATA_CMD_PIO_WRITE,
218 ATA_CMD_PIO_READ_EXT,
219 ATA_CMD_PIO_WRITE_EXT,
9a3dccc4
TH
220 0,
221 0,
222 0,
223 0,
8cbd6df1
AL
224 /* dma */
225 ATA_CMD_READ,
226 ATA_CMD_WRITE,
227 ATA_CMD_READ_EXT,
9a3dccc4
TH
228 ATA_CMD_WRITE_EXT,
229 0,
230 0,
231 0,
232 ATA_CMD_WRITE_FUA_EXT
8cbd6df1 233};
1da177e4
LT
234
235/**
8cbd6df1 236 * ata_rwcmd_protocol - set taskfile r/w commands and protocol
bd056d7e
TH
237 * @tf: command to examine and configure
238 * @dev: device tf belongs to
1da177e4 239 *
2e9edbf8 240 * Examine the device configuration and tf->flags to calculate
8cbd6df1 241 * the proper read/write commands and protocol to use.
1da177e4
LT
242 *
243 * LOCKING:
244 * caller.
245 */
bd056d7e 246static int ata_rwcmd_protocol(struct ata_taskfile *tf, struct ata_device *dev)
1da177e4 247{
9a3dccc4 248 u8 cmd;
1da177e4 249
9a3dccc4 250 int index, fua, lba48, write;
2e9edbf8 251
9a3dccc4 252 fua = (tf->flags & ATA_TFLAG_FUA) ? 4 : 0;
8cbd6df1
AL
253 lba48 = (tf->flags & ATA_TFLAG_LBA48) ? 2 : 0;
254 write = (tf->flags & ATA_TFLAG_WRITE) ? 1 : 0;
1da177e4 255
8cbd6df1
AL
256 if (dev->flags & ATA_DFLAG_PIO) {
257 tf->protocol = ATA_PROT_PIO;
9a3dccc4 258 index = dev->multi_count ? 0 : 8;
9af5c9c9 259 } else if (lba48 && (dev->link->ap->flags & ATA_FLAG_PIO_LBA48)) {
8d238e01
AC
260 /* Unable to use DMA due to host limitation */
261 tf->protocol = ATA_PROT_PIO;
0565c26d 262 index = dev->multi_count ? 0 : 8;
8cbd6df1
AL
263 } else {
264 tf->protocol = ATA_PROT_DMA;
9a3dccc4 265 index = 16;
8cbd6df1 266 }
1da177e4 267
9a3dccc4
TH
268 cmd = ata_rw_cmds[index + fua + lba48 + write];
269 if (cmd) {
270 tf->command = cmd;
271 return 0;
272 }
273 return -1;
1da177e4
LT
274}
275
35b649fe
TH
276/**
277 * ata_tf_read_block - Read block address from ATA taskfile
278 * @tf: ATA taskfile of interest
279 * @dev: ATA device @tf belongs to
280 *
281 * LOCKING:
282 * None.
283 *
284 * Read block address from @tf. This function can handle all
285 * three address formats - LBA, LBA48 and CHS. tf->protocol and
286 * flags select the address format to use.
287 *
288 * RETURNS:
289 * Block address read from @tf.
290 */
291u64 ata_tf_read_block(struct ata_taskfile *tf, struct ata_device *dev)
292{
293 u64 block = 0;
294
295 if (tf->flags & ATA_TFLAG_LBA) {
296 if (tf->flags & ATA_TFLAG_LBA48) {
297 block |= (u64)tf->hob_lbah << 40;
298 block |= (u64)tf->hob_lbam << 32;
299 block |= tf->hob_lbal << 24;
300 } else
301 block |= (tf->device & 0xf) << 24;
302
303 block |= tf->lbah << 16;
304 block |= tf->lbam << 8;
305 block |= tf->lbal;
306 } else {
307 u32 cyl, head, sect;
308
309 cyl = tf->lbam | (tf->lbah << 8);
310 head = tf->device & 0xf;
311 sect = tf->lbal;
312
313 block = (cyl * dev->heads + head) * dev->sectors + sect;
314 }
315
316 return block;
317}
318
bd056d7e
TH
319/**
320 * ata_build_rw_tf - Build ATA taskfile for given read/write request
321 * @tf: Target ATA taskfile
322 * @dev: ATA device @tf belongs to
323 * @block: Block address
324 * @n_block: Number of blocks
325 * @tf_flags: RW/FUA etc...
326 * @tag: tag
327 *
328 * LOCKING:
329 * None.
330 *
331 * Build ATA taskfile @tf for read/write request described by
332 * @block, @n_block, @tf_flags and @tag on @dev.
333 *
334 * RETURNS:
335 *
336 * 0 on success, -ERANGE if the request is too large for @dev,
337 * -EINVAL if the request is invalid.
338 */
339int ata_build_rw_tf(struct ata_taskfile *tf, struct ata_device *dev,
340 u64 block, u32 n_block, unsigned int tf_flags,
341 unsigned int tag)
342{
343 tf->flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
344 tf->flags |= tf_flags;
345
6d1245bf 346 if (ata_ncq_enabled(dev) && likely(tag != ATA_TAG_INTERNAL)) {
bd056d7e
TH
347 /* yay, NCQ */
348 if (!lba_48_ok(block, n_block))
349 return -ERANGE;
350
351 tf->protocol = ATA_PROT_NCQ;
352 tf->flags |= ATA_TFLAG_LBA | ATA_TFLAG_LBA48;
353
354 if (tf->flags & ATA_TFLAG_WRITE)
355 tf->command = ATA_CMD_FPDMA_WRITE;
356 else
357 tf->command = ATA_CMD_FPDMA_READ;
358
359 tf->nsect = tag << 3;
360 tf->hob_feature = (n_block >> 8) & 0xff;
361 tf->feature = n_block & 0xff;
362
363 tf->hob_lbah = (block >> 40) & 0xff;
364 tf->hob_lbam = (block >> 32) & 0xff;
365 tf->hob_lbal = (block >> 24) & 0xff;
366 tf->lbah = (block >> 16) & 0xff;
367 tf->lbam = (block >> 8) & 0xff;
368 tf->lbal = block & 0xff;
369
370 tf->device = 1 << 6;
371 if (tf->flags & ATA_TFLAG_FUA)
372 tf->device |= 1 << 7;
373 } else if (dev->flags & ATA_DFLAG_LBA) {
374 tf->flags |= ATA_TFLAG_LBA;
375
376 if (lba_28_ok(block, n_block)) {
377 /* use LBA28 */
378 tf->device |= (block >> 24) & 0xf;
379 } else if (lba_48_ok(block, n_block)) {
380 if (!(dev->flags & ATA_DFLAG_LBA48))
381 return -ERANGE;
382
383 /* use LBA48 */
384 tf->flags |= ATA_TFLAG_LBA48;
385
386 tf->hob_nsect = (n_block >> 8) & 0xff;
387
388 tf->hob_lbah = (block >> 40) & 0xff;
389 tf->hob_lbam = (block >> 32) & 0xff;
390 tf->hob_lbal = (block >> 24) & 0xff;
391 } else
392 /* request too large even for LBA48 */
393 return -ERANGE;
394
395 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
396 return -EINVAL;
397
398 tf->nsect = n_block & 0xff;
399
400 tf->lbah = (block >> 16) & 0xff;
401 tf->lbam = (block >> 8) & 0xff;
402 tf->lbal = block & 0xff;
403
404 tf->device |= ATA_LBA;
405 } else {
406 /* CHS */
407 u32 sect, head, cyl, track;
408
409 /* The request -may- be too large for CHS addressing. */
410 if (!lba_28_ok(block, n_block))
411 return -ERANGE;
412
413 if (unlikely(ata_rwcmd_protocol(tf, dev) < 0))
414 return -EINVAL;
415
416 /* Convert LBA to CHS */
417 track = (u32)block / dev->sectors;
418 cyl = track / dev->heads;
419 head = track % dev->heads;
420 sect = (u32)block % dev->sectors + 1;
421
422 DPRINTK("block %u track %u cyl %u head %u sect %u\n",
423 (u32)block, track, cyl, head, sect);
424
425 /* Check whether the converted CHS can fit.
426 Cylinder: 0-65535
427 Head: 0-15
428 Sector: 1-255*/
429 if ((cyl >> 16) || (head >> 4) || (sect >> 8) || (!sect))
430 return -ERANGE;
431
432 tf->nsect = n_block & 0xff; /* Sector count 0 means 256 sectors */
433 tf->lbal = sect;
434 tf->lbam = cyl;
435 tf->lbah = cyl >> 8;
436 tf->device |= head;
437 }
438
439 return 0;
440}
441
cb95d562
TH
442/**
443 * ata_pack_xfermask - Pack pio, mwdma and udma masks into xfer_mask
444 * @pio_mask: pio_mask
445 * @mwdma_mask: mwdma_mask
446 * @udma_mask: udma_mask
447 *
448 * Pack @pio_mask, @mwdma_mask and @udma_mask into a single
449 * unsigned int xfer_mask.
450 *
451 * LOCKING:
452 * None.
453 *
454 * RETURNS:
455 * Packed xfer_mask.
456 */
7dc951ae
TH
457unsigned long ata_pack_xfermask(unsigned long pio_mask,
458 unsigned long mwdma_mask,
459 unsigned long udma_mask)
cb95d562
TH
460{
461 return ((pio_mask << ATA_SHIFT_PIO) & ATA_MASK_PIO) |
462 ((mwdma_mask << ATA_SHIFT_MWDMA) & ATA_MASK_MWDMA) |
463 ((udma_mask << ATA_SHIFT_UDMA) & ATA_MASK_UDMA);
464}
465
c0489e4e
TH
466/**
467 * ata_unpack_xfermask - Unpack xfer_mask into pio, mwdma and udma masks
468 * @xfer_mask: xfer_mask to unpack
469 * @pio_mask: resulting pio_mask
470 * @mwdma_mask: resulting mwdma_mask
471 * @udma_mask: resulting udma_mask
472 *
473 * Unpack @xfer_mask into @pio_mask, @mwdma_mask and @udma_mask.
474 * Any NULL distination masks will be ignored.
475 */
7dc951ae
TH
476void ata_unpack_xfermask(unsigned long xfer_mask, unsigned long *pio_mask,
477 unsigned long *mwdma_mask, unsigned long *udma_mask)
c0489e4e
TH
478{
479 if (pio_mask)
480 *pio_mask = (xfer_mask & ATA_MASK_PIO) >> ATA_SHIFT_PIO;
481 if (mwdma_mask)
482 *mwdma_mask = (xfer_mask & ATA_MASK_MWDMA) >> ATA_SHIFT_MWDMA;
483 if (udma_mask)
484 *udma_mask = (xfer_mask & ATA_MASK_UDMA) >> ATA_SHIFT_UDMA;
485}
486
cb95d562 487static const struct ata_xfer_ent {
be9a50c8 488 int shift, bits;
cb95d562
TH
489 u8 base;
490} ata_xfer_tbl[] = {
70cd071e
TH
491 { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 },
492 { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 },
493 { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 },
cb95d562
TH
494 { -1, },
495};
496
497/**
498 * ata_xfer_mask2mode - Find matching XFER_* for the given xfer_mask
499 * @xfer_mask: xfer_mask of interest
500 *
501 * Return matching XFER_* value for @xfer_mask. Only the highest
502 * bit of @xfer_mask is considered.
503 *
504 * LOCKING:
505 * None.
506 *
507 * RETURNS:
70cd071e 508 * Matching XFER_* value, 0xff if no match found.
cb95d562 509 */
7dc951ae 510u8 ata_xfer_mask2mode(unsigned long xfer_mask)
cb95d562
TH
511{
512 int highbit = fls(xfer_mask) - 1;
513 const struct ata_xfer_ent *ent;
514
515 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
516 if (highbit >= ent->shift && highbit < ent->shift + ent->bits)
517 return ent->base + highbit - ent->shift;
70cd071e 518 return 0xff;
cb95d562
TH
519}
520
521/**
522 * ata_xfer_mode2mask - Find matching xfer_mask for XFER_*
523 * @xfer_mode: XFER_* of interest
524 *
525 * Return matching xfer_mask for @xfer_mode.
526 *
527 * LOCKING:
528 * None.
529 *
530 * RETURNS:
531 * Matching xfer_mask, 0 if no match found.
532 */
7dc951ae 533unsigned long ata_xfer_mode2mask(u8 xfer_mode)
cb95d562
TH
534{
535 const struct ata_xfer_ent *ent;
536
537 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
538 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
70cd071e
TH
539 return ((2 << (ent->shift + xfer_mode - ent->base)) - 1)
540 & ~((1 << ent->shift) - 1);
cb95d562
TH
541 return 0;
542}
543
544/**
545 * ata_xfer_mode2shift - Find matching xfer_shift for XFER_*
546 * @xfer_mode: XFER_* of interest
547 *
548 * Return matching xfer_shift for @xfer_mode.
549 *
550 * LOCKING:
551 * None.
552 *
553 * RETURNS:
554 * Matching xfer_shift, -1 if no match found.
555 */
7dc951ae 556int ata_xfer_mode2shift(unsigned long xfer_mode)
cb95d562
TH
557{
558 const struct ata_xfer_ent *ent;
559
560 for (ent = ata_xfer_tbl; ent->shift >= 0; ent++)
561 if (xfer_mode >= ent->base && xfer_mode < ent->base + ent->bits)
562 return ent->shift;
563 return -1;
564}
565
1da177e4 566/**
1da7b0d0
TH
567 * ata_mode_string - convert xfer_mask to string
568 * @xfer_mask: mask of bits supported; only highest bit counts.
1da177e4
LT
569 *
570 * Determine string which represents the highest speed
1da7b0d0 571 * (highest bit in @modemask).
1da177e4
LT
572 *
573 * LOCKING:
574 * None.
575 *
576 * RETURNS:
577 * Constant C string representing highest speed listed in
1da7b0d0 578 * @mode_mask, or the constant C string "<n/a>".
1da177e4 579 */
7dc951ae 580const char *ata_mode_string(unsigned long xfer_mask)
1da177e4 581{
75f554bc
TH
582 static const char * const xfer_mode_str[] = {
583 "PIO0",
584 "PIO1",
585 "PIO2",
586 "PIO3",
587 "PIO4",
b352e57d
AC
588 "PIO5",
589 "PIO6",
75f554bc
TH
590 "MWDMA0",
591 "MWDMA1",
592 "MWDMA2",
b352e57d
AC
593 "MWDMA3",
594 "MWDMA4",
75f554bc
TH
595 "UDMA/16",
596 "UDMA/25",
597 "UDMA/33",
598 "UDMA/44",
599 "UDMA/66",
600 "UDMA/100",
601 "UDMA/133",
602 "UDMA7",
603 };
1da7b0d0 604 int highbit;
1da177e4 605
1da7b0d0
TH
606 highbit = fls(xfer_mask) - 1;
607 if (highbit >= 0 && highbit < ARRAY_SIZE(xfer_mode_str))
608 return xfer_mode_str[highbit];
1da177e4 609 return "<n/a>";
1da177e4
LT
610}
611
4c360c81
TH
612static const char *sata_spd_string(unsigned int spd)
613{
614 static const char * const spd_str[] = {
615 "1.5 Gbps",
616 "3.0 Gbps",
617 };
618
619 if (spd == 0 || (spd - 1) >= ARRAY_SIZE(spd_str))
620 return "<unknown>";
621 return spd_str[spd - 1];
622}
623
3373efd8 624void ata_dev_disable(struct ata_device *dev)
0b8efb0a 625{
09d7f9b0 626 if (ata_dev_enabled(dev)) {
9af5c9c9 627 if (ata_msg_drv(dev->link->ap))
09d7f9b0 628 ata_dev_printk(dev, KERN_WARNING, "disabled\n");
562f0c2d 629 ata_acpi_on_disable(dev);
4ae72a1e
TH
630 ata_down_xfermask_limit(dev, ATA_DNXFER_FORCE_PIO0 |
631 ATA_DNXFER_QUIET);
0b8efb0a
TH
632 dev->class++;
633 }
634}
635
ca77329f
KCA
636static int ata_dev_set_dipm(struct ata_device *dev, enum link_pm policy)
637{
638 struct ata_link *link = dev->link;
639 struct ata_port *ap = link->ap;
640 u32 scontrol;
641 unsigned int err_mask;
642 int rc;
643
644 /*
645 * disallow DIPM for drivers which haven't set
646 * ATA_FLAG_IPM. This is because when DIPM is enabled,
647 * phy ready will be set in the interrupt status on
648 * state changes, which will cause some drivers to
649 * think there are errors - additionally drivers will
650 * need to disable hot plug.
651 */
652 if (!(ap->flags & ATA_FLAG_IPM) || !ata_dev_enabled(dev)) {
653 ap->pm_policy = NOT_AVAILABLE;
654 return -EINVAL;
655 }
656
657 /*
658 * For DIPM, we will only enable it for the
659 * min_power setting.
660 *
661 * Why? Because Disks are too stupid to know that
662 * If the host rejects a request to go to SLUMBER
663 * they should retry at PARTIAL, and instead it
664 * just would give up. So, for medium_power to
665 * work at all, we need to only allow HIPM.
666 */
667 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
668 if (rc)
669 return rc;
670
671 switch (policy) {
672 case MIN_POWER:
673 /* no restrictions on IPM transitions */
674 scontrol &= ~(0x3 << 8);
675 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
676 if (rc)
677 return rc;
678
679 /* enable DIPM */
680 if (dev->flags & ATA_DFLAG_DIPM)
681 err_mask = ata_dev_set_feature(dev,
682 SETFEATURES_SATA_ENABLE, SATA_DIPM);
683 break;
684 case MEDIUM_POWER:
685 /* allow IPM to PARTIAL */
686 scontrol &= ~(0x1 << 8);
687 scontrol |= (0x2 << 8);
688 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
689 if (rc)
690 return rc;
691
f5456b63
KCA
692 /*
693 * we don't have to disable DIPM since IPM flags
694 * disallow transitions to SLUMBER, which effectively
695 * disable DIPM if it does not support PARTIAL
696 */
ca77329f
KCA
697 break;
698 case NOT_AVAILABLE:
699 case MAX_PERFORMANCE:
700 /* disable all IPM transitions */
701 scontrol |= (0x3 << 8);
702 rc = sata_scr_write(link, SCR_CONTROL, scontrol);
703 if (rc)
704 return rc;
705
f5456b63
KCA
706 /*
707 * we don't have to disable DIPM since IPM flags
708 * disallow all transitions which effectively
709 * disable DIPM anyway.
710 */
ca77329f
KCA
711 break;
712 }
713
714 /* FIXME: handle SET FEATURES failure */
715 (void) err_mask;
716
717 return 0;
718}
719
720/**
721 * ata_dev_enable_pm - enable SATA interface power management
48166fd9
SH
722 * @dev: device to enable power management
723 * @policy: the link power management policy
ca77329f
KCA
724 *
725 * Enable SATA Interface power management. This will enable
726 * Device Interface Power Management (DIPM) for min_power
727 * policy, and then call driver specific callbacks for
728 * enabling Host Initiated Power management.
729 *
730 * Locking: Caller.
731 * Returns: -EINVAL if IPM is not supported, 0 otherwise.
732 */
733void ata_dev_enable_pm(struct ata_device *dev, enum link_pm policy)
734{
735 int rc = 0;
736 struct ata_port *ap = dev->link->ap;
737
738 /* set HIPM first, then DIPM */
739 if (ap->ops->enable_pm)
740 rc = ap->ops->enable_pm(ap, policy);
741 if (rc)
742 goto enable_pm_out;
743 rc = ata_dev_set_dipm(dev, policy);
744
745enable_pm_out:
746 if (rc)
747 ap->pm_policy = MAX_PERFORMANCE;
748 else
749 ap->pm_policy = policy;
750 return /* rc */; /* hopefully we can use 'rc' eventually */
751}
752
1992a5ed 753#ifdef CONFIG_PM
ca77329f
KCA
754/**
755 * ata_dev_disable_pm - disable SATA interface power management
48166fd9 756 * @dev: device to disable power management
ca77329f
KCA
757 *
758 * Disable SATA Interface power management. This will disable
759 * Device Interface Power Management (DIPM) without changing
760 * policy, call driver specific callbacks for disabling Host
761 * Initiated Power management.
762 *
763 * Locking: Caller.
764 * Returns: void
765 */
766static void ata_dev_disable_pm(struct ata_device *dev)
767{
768 struct ata_port *ap = dev->link->ap;
769
770 ata_dev_set_dipm(dev, MAX_PERFORMANCE);
771 if (ap->ops->disable_pm)
772 ap->ops->disable_pm(ap);
773}
1992a5ed 774#endif /* CONFIG_PM */
ca77329f
KCA
775
776void ata_lpm_schedule(struct ata_port *ap, enum link_pm policy)
777{
778 ap->pm_policy = policy;
779 ap->link.eh_info.action |= ATA_EHI_LPM;
780 ap->link.eh_info.flags |= ATA_EHI_NO_AUTOPSY;
781 ata_port_schedule_eh(ap);
782}
783
1992a5ed 784#ifdef CONFIG_PM
ca77329f
KCA
785static void ata_lpm_enable(struct ata_host *host)
786{
787 struct ata_link *link;
788 struct ata_port *ap;
789 struct ata_device *dev;
790 int i;
791
792 for (i = 0; i < host->n_ports; i++) {
793 ap = host->ports[i];
794 ata_port_for_each_link(link, ap) {
795 ata_link_for_each_dev(dev, link)
796 ata_dev_disable_pm(dev);
797 }
798 }
799}
800
801static void ata_lpm_disable(struct ata_host *host)
802{
803 int i;
804
805 for (i = 0; i < host->n_ports; i++) {
806 struct ata_port *ap = host->ports[i];
807 ata_lpm_schedule(ap, ap->pm_policy);
808 }
809}
1992a5ed 810#endif /* CONFIG_PM */
ca77329f
KCA
811
812
1da177e4 813/**
0d5ff566 814 * ata_devchk - PATA device presence detection
1da177e4
LT
815 * @ap: ATA channel to examine
816 * @device: Device to examine (starting at zero)
817 *
818 * This technique was originally described in
819 * Hale Landis's ATADRVR (www.ata-atapi.com), and
820 * later found its way into the ATA/ATAPI spec.
821 *
822 * Write a pattern to the ATA shadow registers,
823 * and if a device is present, it will respond by
824 * correctly storing and echoing back the
825 * ATA shadow register contents.
826 *
827 * LOCKING:
828 * caller.
829 */
830
0d5ff566 831static unsigned int ata_devchk(struct ata_port *ap, unsigned int device)
1da177e4
LT
832{
833 struct ata_ioports *ioaddr = &ap->ioaddr;
834 u8 nsect, lbal;
835
836 ap->ops->dev_select(ap, device);
837
0d5ff566
TH
838 iowrite8(0x55, ioaddr->nsect_addr);
839 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 840
0d5ff566
TH
841 iowrite8(0xaa, ioaddr->nsect_addr);
842 iowrite8(0x55, ioaddr->lbal_addr);
1da177e4 843
0d5ff566
TH
844 iowrite8(0x55, ioaddr->nsect_addr);
845 iowrite8(0xaa, ioaddr->lbal_addr);
1da177e4 846
0d5ff566
TH
847 nsect = ioread8(ioaddr->nsect_addr);
848 lbal = ioread8(ioaddr->lbal_addr);
1da177e4
LT
849
850 if ((nsect == 0x55) && (lbal == 0xaa))
851 return 1; /* we found a device */
852
853 return 0; /* nothing found */
854}
855
1da177e4
LT
856/**
857 * ata_dev_classify - determine device type based on ATA-spec signature
858 * @tf: ATA taskfile register set for device to be identified
859 *
860 * Determine from taskfile register contents whether a device is
861 * ATA or ATAPI, as per "Signature and persistence" section
862 * of ATA/PI spec (volume 1, sect 5.14).
863 *
864 * LOCKING:
865 * None.
866 *
867 * RETURNS:
633273a3
TH
868 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, %ATA_DEV_PMP or
869 * %ATA_DEV_UNKNOWN the event of failure.
1da177e4 870 */
057ace5e 871unsigned int ata_dev_classify(const struct ata_taskfile *tf)
1da177e4
LT
872{
873 /* Apple's open source Darwin code hints that some devices only
874 * put a proper signature into the LBA mid/high registers,
875 * So, we only check those. It's sufficient for uniqueness.
633273a3
TH
876 *
877 * ATA/ATAPI-7 (d1532v1r1: Feb. 19, 2003) specified separate
878 * signatures for ATA and ATAPI devices attached on SerialATA,
879 * 0x3c/0xc3 and 0x69/0x96 respectively. However, SerialATA
880 * spec has never mentioned about using different signatures
881 * for ATA/ATAPI devices. Then, Serial ATA II: Port
882 * Multiplier specification began to use 0x69/0x96 to identify
883 * port multpliers and 0x3c/0xc3 to identify SEMB device.
884 * ATA/ATAPI-7 dropped descriptions about 0x3c/0xc3 and
885 * 0x69/0x96 shortly and described them as reserved for
886 * SerialATA.
887 *
888 * We follow the current spec and consider that 0x69/0x96
889 * identifies a port multiplier and 0x3c/0xc3 a SEMB device.
1da177e4 890 */
633273a3 891 if ((tf->lbam == 0) && (tf->lbah == 0)) {
1da177e4
LT
892 DPRINTK("found ATA device by sig\n");
893 return ATA_DEV_ATA;
894 }
895
633273a3 896 if ((tf->lbam == 0x14) && (tf->lbah == 0xeb)) {
1da177e4
LT
897 DPRINTK("found ATAPI device by sig\n");
898 return ATA_DEV_ATAPI;
899 }
900
633273a3
TH
901 if ((tf->lbam == 0x69) && (tf->lbah == 0x96)) {
902 DPRINTK("found PMP device by sig\n");
903 return ATA_DEV_PMP;
904 }
905
906 if ((tf->lbam == 0x3c) && (tf->lbah == 0xc3)) {
2dcb407e 907 printk(KERN_INFO "ata: SEMB device ignored\n");
633273a3
TH
908 return ATA_DEV_SEMB_UNSUP; /* not yet */
909 }
910
1da177e4
LT
911 DPRINTK("unknown device\n");
912 return ATA_DEV_UNKNOWN;
913}
914
915/**
916 * ata_dev_try_classify - Parse returned ATA device signature
3f19859e
TH
917 * @dev: ATA device to classify (starting at zero)
918 * @present: device seems present
b4dc7623 919 * @r_err: Value of error register on completion
1da177e4
LT
920 *
921 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
922 * an ATA/ATAPI-defined set of values is placed in the ATA
923 * shadow registers, indicating the results of device detection
924 * and diagnostics.
925 *
926 * Select the ATA device, and read the values from the ATA shadow
927 * registers. Then parse according to the Error register value,
928 * and the spec-defined values examined by ata_dev_classify().
929 *
930 * LOCKING:
931 * caller.
b4dc7623
TH
932 *
933 * RETURNS:
934 * Device type - %ATA_DEV_ATA, %ATA_DEV_ATAPI or %ATA_DEV_NONE.
1da177e4 935 */
3f19859e
TH
936unsigned int ata_dev_try_classify(struct ata_device *dev, int present,
937 u8 *r_err)
1da177e4 938{
3f19859e 939 struct ata_port *ap = dev->link->ap;
1da177e4
LT
940 struct ata_taskfile tf;
941 unsigned int class;
942 u8 err;
943
3f19859e 944 ap->ops->dev_select(ap, dev->devno);
1da177e4
LT
945
946 memset(&tf, 0, sizeof(tf));
947
1da177e4 948 ap->ops->tf_read(ap, &tf);
0169e284 949 err = tf.feature;
b4dc7623
TH
950 if (r_err)
951 *r_err = err;
1da177e4 952
93590859 953 /* see if device passed diags: if master then continue and warn later */
3f19859e 954 if (err == 0 && dev->devno == 0)
93590859 955 /* diagnostic fail : do nothing _YET_ */
3f19859e 956 dev->horkage |= ATA_HORKAGE_DIAGNOSTIC;
93590859 957 else if (err == 1)
1da177e4 958 /* do nothing */ ;
3f19859e 959 else if ((dev->devno == 0) && (err == 0x81))
1da177e4
LT
960 /* do nothing */ ;
961 else
b4dc7623 962 return ATA_DEV_NONE;
1da177e4 963
b4dc7623 964 /* determine if device is ATA or ATAPI */
1da177e4 965 class = ata_dev_classify(&tf);
b4dc7623 966
d7fbee05
TH
967 if (class == ATA_DEV_UNKNOWN) {
968 /* If the device failed diagnostic, it's likely to
969 * have reported incorrect device signature too.
970 * Assume ATA device if the device seems present but
971 * device signature is invalid with diagnostic
972 * failure.
973 */
974 if (present && (dev->horkage & ATA_HORKAGE_DIAGNOSTIC))
975 class = ATA_DEV_ATA;
976 else
977 class = ATA_DEV_NONE;
978 } else if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
979 class = ATA_DEV_NONE;
980
b4dc7623 981 return class;
1da177e4
LT
982}
983
984/**
6a62a04d 985 * ata_id_string - Convert IDENTIFY DEVICE page into string
1da177e4
LT
986 * @id: IDENTIFY DEVICE results we will examine
987 * @s: string into which data is output
988 * @ofs: offset into identify device page
989 * @len: length of string to return. must be an even number.
990 *
991 * The strings in the IDENTIFY DEVICE page are broken up into
992 * 16-bit chunks. Run through the string, and output each
993 * 8-bit chunk linearly, regardless of platform.
994 *
995 * LOCKING:
996 * caller.
997 */
998
6a62a04d
TH
999void ata_id_string(const u16 *id, unsigned char *s,
1000 unsigned int ofs, unsigned int len)
1da177e4
LT
1001{
1002 unsigned int c;
1003
1004 while (len > 0) {
1005 c = id[ofs] >> 8;
1006 *s = c;
1007 s++;
1008
1009 c = id[ofs] & 0xff;
1010 *s = c;
1011 s++;
1012
1013 ofs++;
1014 len -= 2;
1015 }
1016}
1017
0e949ff3 1018/**
6a62a04d 1019 * ata_id_c_string - Convert IDENTIFY DEVICE page into C string
0e949ff3
TH
1020 * @id: IDENTIFY DEVICE results we will examine
1021 * @s: string into which data is output
1022 * @ofs: offset into identify device page
1023 * @len: length of string to return. must be an odd number.
1024 *
6a62a04d 1025 * This function is identical to ata_id_string except that it
0e949ff3
TH
1026 * trims trailing spaces and terminates the resulting string with
1027 * null. @len must be actual maximum length (even number) + 1.
1028 *
1029 * LOCKING:
1030 * caller.
1031 */
6a62a04d
TH
1032void ata_id_c_string(const u16 *id, unsigned char *s,
1033 unsigned int ofs, unsigned int len)
0e949ff3
TH
1034{
1035 unsigned char *p;
1036
1037 WARN_ON(!(len & 1));
1038
6a62a04d 1039 ata_id_string(id, s, ofs, len - 1);
0e949ff3
TH
1040
1041 p = s + strnlen(s, len - 1);
1042 while (p > s && p[-1] == ' ')
1043 p--;
1044 *p = '\0';
1045}
0baab86b 1046
db6f8759
TH
1047static u64 ata_id_n_sectors(const u16 *id)
1048{
1049 if (ata_id_has_lba(id)) {
1050 if (ata_id_has_lba48(id))
1051 return ata_id_u64(id, 100);
1052 else
1053 return ata_id_u32(id, 60);
1054 } else {
1055 if (ata_id_current_chs_valid(id))
1056 return ata_id_u32(id, 57);
1057 else
1058 return id[1] * id[3] * id[6];
1059 }
1060}
1061
1e999736
AC
1062static u64 ata_tf_to_lba48(struct ata_taskfile *tf)
1063{
1064 u64 sectors = 0;
1065
1066 sectors |= ((u64)(tf->hob_lbah & 0xff)) << 40;
1067 sectors |= ((u64)(tf->hob_lbam & 0xff)) << 32;
1068 sectors |= (tf->hob_lbal & 0xff) << 24;
1069 sectors |= (tf->lbah & 0xff) << 16;
1070 sectors |= (tf->lbam & 0xff) << 8;
1071 sectors |= (tf->lbal & 0xff);
1072
1073 return ++sectors;
1074}
1075
1076static u64 ata_tf_to_lba(struct ata_taskfile *tf)
1077{
1078 u64 sectors = 0;
1079
1080 sectors |= (tf->device & 0x0f) << 24;
1081 sectors |= (tf->lbah & 0xff) << 16;
1082 sectors |= (tf->lbam & 0xff) << 8;
1083 sectors |= (tf->lbal & 0xff);
1084
1085 return ++sectors;
1086}
1087
1088/**
c728a914
TH
1089 * ata_read_native_max_address - Read native max address
1090 * @dev: target device
1091 * @max_sectors: out parameter for the result native max address
1e999736 1092 *
c728a914
TH
1093 * Perform an LBA48 or LBA28 native size query upon the device in
1094 * question.
1e999736 1095 *
c728a914
TH
1096 * RETURNS:
1097 * 0 on success, -EACCES if command is aborted by the drive.
1098 * -EIO on other errors.
1e999736 1099 */
c728a914 1100static int ata_read_native_max_address(struct ata_device *dev, u64 *max_sectors)
1e999736 1101{
c728a914 1102 unsigned int err_mask;
1e999736 1103 struct ata_taskfile tf;
c728a914 1104 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1105
1106 ata_tf_init(dev, &tf);
1107
c728a914 1108 /* always clear all address registers */
1e999736 1109 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
1e999736 1110
c728a914
TH
1111 if (lba48) {
1112 tf.command = ATA_CMD_READ_NATIVE_MAX_EXT;
1113 tf.flags |= ATA_TFLAG_LBA48;
1114 } else
1115 tf.command = ATA_CMD_READ_NATIVE_MAX;
1e999736 1116
1e999736 1117 tf.protocol |= ATA_PROT_NODATA;
c728a914
TH
1118 tf.device |= ATA_LBA;
1119
2b789108 1120 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914
TH
1121 if (err_mask) {
1122 ata_dev_printk(dev, KERN_WARNING, "failed to read native "
1123 "max address (err_mask=0x%x)\n", err_mask);
1124 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
1125 return -EACCES;
1126 return -EIO;
1127 }
1e999736 1128
c728a914
TH
1129 if (lba48)
1130 *max_sectors = ata_tf_to_lba48(&tf);
1131 else
1132 *max_sectors = ata_tf_to_lba(&tf);
2dcb407e 1133 if (dev->horkage & ATA_HORKAGE_HPA_SIZE)
93328e11 1134 (*max_sectors)--;
c728a914 1135 return 0;
1e999736
AC
1136}
1137
1138/**
c728a914
TH
1139 * ata_set_max_sectors - Set max sectors
1140 * @dev: target device
6b38d1d1 1141 * @new_sectors: new max sectors value to set for the device
1e999736 1142 *
c728a914
TH
1143 * Set max sectors of @dev to @new_sectors.
1144 *
1145 * RETURNS:
1146 * 0 on success, -EACCES if command is aborted or denied (due to
1147 * previous non-volatile SET_MAX) by the drive. -EIO on other
1148 * errors.
1e999736 1149 */
05027adc 1150static int ata_set_max_sectors(struct ata_device *dev, u64 new_sectors)
1e999736 1151{
c728a914 1152 unsigned int err_mask;
1e999736 1153 struct ata_taskfile tf;
c728a914 1154 int lba48 = ata_id_has_lba48(dev->id);
1e999736
AC
1155
1156 new_sectors--;
1157
1158 ata_tf_init(dev, &tf);
1159
1e999736 1160 tf.flags |= ATA_TFLAG_DEVICE | ATA_TFLAG_ISADDR;
c728a914
TH
1161
1162 if (lba48) {
1163 tf.command = ATA_CMD_SET_MAX_EXT;
1164 tf.flags |= ATA_TFLAG_LBA48;
1165
1166 tf.hob_lbal = (new_sectors >> 24) & 0xff;
1167 tf.hob_lbam = (new_sectors >> 32) & 0xff;
1168 tf.hob_lbah = (new_sectors >> 40) & 0xff;
1e582ba4 1169 } else {
c728a914
TH
1170 tf.command = ATA_CMD_SET_MAX;
1171
1e582ba4
TH
1172 tf.device |= (new_sectors >> 24) & 0xf;
1173 }
1174
1e999736 1175 tf.protocol |= ATA_PROT_NODATA;
c728a914 1176 tf.device |= ATA_LBA;
1e999736
AC
1177
1178 tf.lbal = (new_sectors >> 0) & 0xff;
1179 tf.lbam = (new_sectors >> 8) & 0xff;
1180 tf.lbah = (new_sectors >> 16) & 0xff;
1e999736 1181
2b789108 1182 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
c728a914
TH
1183 if (err_mask) {
1184 ata_dev_printk(dev, KERN_WARNING, "failed to set "
1185 "max address (err_mask=0x%x)\n", err_mask);
1186 if (err_mask == AC_ERR_DEV &&
1187 (tf.feature & (ATA_ABORTED | ATA_IDNF)))
1188 return -EACCES;
1189 return -EIO;
1190 }
1191
c728a914 1192 return 0;
1e999736
AC
1193}
1194
1195/**
1196 * ata_hpa_resize - Resize a device with an HPA set
1197 * @dev: Device to resize
1198 *
1199 * Read the size of an LBA28 or LBA48 disk with HPA features and resize
1200 * it if required to the full size of the media. The caller must check
1201 * the drive has the HPA feature set enabled.
05027adc
TH
1202 *
1203 * RETURNS:
1204 * 0 on success, -errno on failure.
1e999736 1205 */
05027adc 1206static int ata_hpa_resize(struct ata_device *dev)
1e999736 1207{
05027adc
TH
1208 struct ata_eh_context *ehc = &dev->link->eh_context;
1209 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1210 u64 sectors = ata_id_n_sectors(dev->id);
1211 u64 native_sectors;
c728a914 1212 int rc;
a617c09f 1213
05027adc
TH
1214 /* do we need to do it? */
1215 if (dev->class != ATA_DEV_ATA ||
1216 !ata_id_has_lba(dev->id) || !ata_id_hpa_enabled(dev->id) ||
1217 (dev->horkage & ATA_HORKAGE_BROKEN_HPA))
c728a914 1218 return 0;
1e999736 1219
05027adc
TH
1220 /* read native max address */
1221 rc = ata_read_native_max_address(dev, &native_sectors);
1222 if (rc) {
1223 /* If HPA isn't going to be unlocked, skip HPA
1224 * resizing from the next try.
1225 */
1226 if (!ata_ignore_hpa) {
1227 ata_dev_printk(dev, KERN_WARNING, "HPA support seems "
1228 "broken, will skip HPA handling\n");
1229 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1230
1231 /* we can continue if device aborted the command */
1232 if (rc == -EACCES)
1233 rc = 0;
1e999736 1234 }
37301a55 1235
05027adc
TH
1236 return rc;
1237 }
1238
1239 /* nothing to do? */
1240 if (native_sectors <= sectors || !ata_ignore_hpa) {
1241 if (!print_info || native_sectors == sectors)
1242 return 0;
1243
1244 if (native_sectors > sectors)
1245 ata_dev_printk(dev, KERN_INFO,
1246 "HPA detected: current %llu, native %llu\n",
1247 (unsigned long long)sectors,
1248 (unsigned long long)native_sectors);
1249 else if (native_sectors < sectors)
1250 ata_dev_printk(dev, KERN_WARNING,
1251 "native sectors (%llu) is smaller than "
1252 "sectors (%llu)\n",
1253 (unsigned long long)native_sectors,
1254 (unsigned long long)sectors);
1255 return 0;
1256 }
1257
1258 /* let's unlock HPA */
1259 rc = ata_set_max_sectors(dev, native_sectors);
1260 if (rc == -EACCES) {
1261 /* if device aborted the command, skip HPA resizing */
1262 ata_dev_printk(dev, KERN_WARNING, "device aborted resize "
1263 "(%llu -> %llu), skipping HPA handling\n",
1264 (unsigned long long)sectors,
1265 (unsigned long long)native_sectors);
1266 dev->horkage |= ATA_HORKAGE_BROKEN_HPA;
1267 return 0;
1268 } else if (rc)
1269 return rc;
1270
1271 /* re-read IDENTIFY data */
1272 rc = ata_dev_reread_id(dev, 0);
1273 if (rc) {
1274 ata_dev_printk(dev, KERN_ERR, "failed to re-read IDENTIFY "
1275 "data after HPA resizing\n");
1276 return rc;
1277 }
1278
1279 if (print_info) {
1280 u64 new_sectors = ata_id_n_sectors(dev->id);
1281 ata_dev_printk(dev, KERN_INFO,
1282 "HPA unlocked: %llu -> %llu, native %llu\n",
1283 (unsigned long long)sectors,
1284 (unsigned long long)new_sectors,
1285 (unsigned long long)native_sectors);
1286 }
1287
1288 return 0;
1e999736
AC
1289}
1290
0baab86b
EF
1291/**
1292 * ata_noop_dev_select - Select device 0/1 on ATA bus
1293 * @ap: ATA channel to manipulate
1294 * @device: ATA device (numbered from zero) to select
1295 *
1296 * This function performs no actual function.
1297 *
1298 * May be used as the dev_select() entry in ata_port_operations.
1299 *
1300 * LOCKING:
1301 * caller.
1302 */
2dcb407e 1303void ata_noop_dev_select(struct ata_port *ap, unsigned int device)
1da177e4
LT
1304{
1305}
1306
0baab86b 1307
1da177e4
LT
1308/**
1309 * ata_std_dev_select - Select device 0/1 on ATA bus
1310 * @ap: ATA channel to manipulate
1311 * @device: ATA device (numbered from zero) to select
1312 *
1313 * Use the method defined in the ATA specification to
1314 * make either device 0, or device 1, active on the
0baab86b
EF
1315 * ATA channel. Works with both PIO and MMIO.
1316 *
1317 * May be used as the dev_select() entry in ata_port_operations.
1da177e4
LT
1318 *
1319 * LOCKING:
1320 * caller.
1321 */
1322
2dcb407e 1323void ata_std_dev_select(struct ata_port *ap, unsigned int device)
1da177e4
LT
1324{
1325 u8 tmp;
1326
1327 if (device == 0)
1328 tmp = ATA_DEVICE_OBS;
1329 else
1330 tmp = ATA_DEVICE_OBS | ATA_DEV1;
1331
0d5ff566 1332 iowrite8(tmp, ap->ioaddr.device_addr);
1da177e4
LT
1333 ata_pause(ap); /* needed; also flushes, for mmio */
1334}
1335
1336/**
1337 * ata_dev_select - Select device 0/1 on ATA bus
1338 * @ap: ATA channel to manipulate
1339 * @device: ATA device (numbered from zero) to select
1340 * @wait: non-zero to wait for Status register BSY bit to clear
1341 * @can_sleep: non-zero if context allows sleeping
1342 *
1343 * Use the method defined in the ATA specification to
1344 * make either device 0, or device 1, active on the
1345 * ATA channel.
1346 *
1347 * This is a high-level version of ata_std_dev_select(),
1348 * which additionally provides the services of inserting
1349 * the proper pauses and status polling, where needed.
1350 *
1351 * LOCKING:
1352 * caller.
1353 */
1354
1355void ata_dev_select(struct ata_port *ap, unsigned int device,
1356 unsigned int wait, unsigned int can_sleep)
1357{
88574551 1358 if (ata_msg_probe(ap))
44877b4e
TH
1359 ata_port_printk(ap, KERN_INFO, "ata_dev_select: ENTER, "
1360 "device %u, wait %u\n", device, wait);
1da177e4
LT
1361
1362 if (wait)
1363 ata_wait_idle(ap);
1364
1365 ap->ops->dev_select(ap, device);
1366
1367 if (wait) {
9af5c9c9 1368 if (can_sleep && ap->link.device[device].class == ATA_DEV_ATAPI)
1da177e4
LT
1369 msleep(150);
1370 ata_wait_idle(ap);
1371 }
1372}
1373
1374/**
1375 * ata_dump_id - IDENTIFY DEVICE info debugging output
0bd3300a 1376 * @id: IDENTIFY DEVICE page to dump
1da177e4 1377 *
0bd3300a
TH
1378 * Dump selected 16-bit words from the given IDENTIFY DEVICE
1379 * page.
1da177e4
LT
1380 *
1381 * LOCKING:
1382 * caller.
1383 */
1384
0bd3300a 1385static inline void ata_dump_id(const u16 *id)
1da177e4
LT
1386{
1387 DPRINTK("49==0x%04x "
1388 "53==0x%04x "
1389 "63==0x%04x "
1390 "64==0x%04x "
1391 "75==0x%04x \n",
0bd3300a
TH
1392 id[49],
1393 id[53],
1394 id[63],
1395 id[64],
1396 id[75]);
1da177e4
LT
1397 DPRINTK("80==0x%04x "
1398 "81==0x%04x "
1399 "82==0x%04x "
1400 "83==0x%04x "
1401 "84==0x%04x \n",
0bd3300a
TH
1402 id[80],
1403 id[81],
1404 id[82],
1405 id[83],
1406 id[84]);
1da177e4
LT
1407 DPRINTK("88==0x%04x "
1408 "93==0x%04x\n",
0bd3300a
TH
1409 id[88],
1410 id[93]);
1da177e4
LT
1411}
1412
cb95d562
TH
1413/**
1414 * ata_id_xfermask - Compute xfermask from the given IDENTIFY data
1415 * @id: IDENTIFY data to compute xfer mask from
1416 *
1417 * Compute the xfermask for this device. This is not as trivial
1418 * as it seems if we must consider early devices correctly.
1419 *
1420 * FIXME: pre IDE drive timing (do we care ?).
1421 *
1422 * LOCKING:
1423 * None.
1424 *
1425 * RETURNS:
1426 * Computed xfermask
1427 */
7dc951ae 1428unsigned long ata_id_xfermask(const u16 *id)
cb95d562 1429{
7dc951ae 1430 unsigned long pio_mask, mwdma_mask, udma_mask;
cb95d562
TH
1431
1432 /* Usual case. Word 53 indicates word 64 is valid */
1433 if (id[ATA_ID_FIELD_VALID] & (1 << 1)) {
1434 pio_mask = id[ATA_ID_PIO_MODES] & 0x03;
1435 pio_mask <<= 3;
1436 pio_mask |= 0x7;
1437 } else {
1438 /* If word 64 isn't valid then Word 51 high byte holds
1439 * the PIO timing number for the maximum. Turn it into
1440 * a mask.
1441 */
7a0f1c8a 1442 u8 mode = (id[ATA_ID_OLD_PIO_MODES] >> 8) & 0xFF;
46767aeb 1443 if (mode < 5) /* Valid PIO range */
2dcb407e 1444 pio_mask = (2 << mode) - 1;
46767aeb
AC
1445 else
1446 pio_mask = 1;
cb95d562
TH
1447
1448 /* But wait.. there's more. Design your standards by
1449 * committee and you too can get a free iordy field to
1450 * process. However its the speeds not the modes that
1451 * are supported... Note drivers using the timing API
1452 * will get this right anyway
1453 */
1454 }
1455
1456 mwdma_mask = id[ATA_ID_MWDMA_MODES] & 0x07;
fb21f0d0 1457
b352e57d
AC
1458 if (ata_id_is_cfa(id)) {
1459 /*
1460 * Process compact flash extended modes
1461 */
1462 int pio = id[163] & 0x7;
1463 int dma = (id[163] >> 3) & 7;
1464
1465 if (pio)
1466 pio_mask |= (1 << 5);
1467 if (pio > 1)
1468 pio_mask |= (1 << 6);
1469 if (dma)
1470 mwdma_mask |= (1 << 3);
1471 if (dma > 1)
1472 mwdma_mask |= (1 << 4);
1473 }
1474
fb21f0d0
TH
1475 udma_mask = 0;
1476 if (id[ATA_ID_FIELD_VALID] & (1 << 2))
1477 udma_mask = id[ATA_ID_UDMA_MODES] & 0xff;
cb95d562
TH
1478
1479 return ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
1480}
1481
86e45b6b
TH
1482/**
1483 * ata_port_queue_task - Queue port_task
1484 * @ap: The ata_port to queue port_task for
e2a7f77a 1485 * @fn: workqueue function to be scheduled
65f27f38 1486 * @data: data for @fn to use
e2a7f77a 1487 * @delay: delay time for workqueue function
86e45b6b
TH
1488 *
1489 * Schedule @fn(@data) for execution after @delay jiffies using
1490 * port_task. There is one port_task per port and it's the
1491 * user(low level driver)'s responsibility to make sure that only
1492 * one task is active at any given time.
1493 *
1494 * libata core layer takes care of synchronization between
1495 * port_task and EH. ata_port_queue_task() may be ignored for EH
1496 * synchronization.
1497 *
1498 * LOCKING:
1499 * Inherited from caller.
1500 */
65f27f38 1501void ata_port_queue_task(struct ata_port *ap, work_func_t fn, void *data,
86e45b6b
TH
1502 unsigned long delay)
1503{
65f27f38
DH
1504 PREPARE_DELAYED_WORK(&ap->port_task, fn);
1505 ap->port_task_data = data;
86e45b6b 1506
45a66c1c
ON
1507 /* may fail if ata_port_flush_task() in progress */
1508 queue_delayed_work(ata_wq, &ap->port_task, delay);
86e45b6b
TH
1509}
1510
1511/**
1512 * ata_port_flush_task - Flush port_task
1513 * @ap: The ata_port to flush port_task for
1514 *
1515 * After this function completes, port_task is guranteed not to
1516 * be running or scheduled.
1517 *
1518 * LOCKING:
1519 * Kernel thread context (may sleep)
1520 */
1521void ata_port_flush_task(struct ata_port *ap)
1522{
86e45b6b
TH
1523 DPRINTK("ENTER\n");
1524
45a66c1c 1525 cancel_rearming_delayed_work(&ap->port_task);
86e45b6b 1526
0dd4b21f
BP
1527 if (ata_msg_ctl(ap))
1528 ata_port_printk(ap, KERN_DEBUG, "%s: EXIT\n", __FUNCTION__);
86e45b6b
TH
1529}
1530
7102d230 1531static void ata_qc_complete_internal(struct ata_queued_cmd *qc)
a2a7a662 1532{
77853bf2 1533 struct completion *waiting = qc->private_data;
a2a7a662 1534
a2a7a662 1535 complete(waiting);
a2a7a662
TH
1536}
1537
1538/**
2432697b 1539 * ata_exec_internal_sg - execute libata internal command
a2a7a662
TH
1540 * @dev: Device to which the command is sent
1541 * @tf: Taskfile registers for the command and the result
d69cf37d 1542 * @cdb: CDB for packet command
a2a7a662 1543 * @dma_dir: Data tranfer direction of the command
5c1ad8b3 1544 * @sgl: sg list for the data buffer of the command
2432697b 1545 * @n_elem: Number of sg entries
2b789108 1546 * @timeout: Timeout in msecs (0 for default)
a2a7a662
TH
1547 *
1548 * Executes libata internal command with timeout. @tf contains
1549 * command on entry and result on return. Timeout and error
1550 * conditions are reported via return value. No recovery action
1551 * is taken after a command times out. It's caller's duty to
1552 * clean up after timeout.
1553 *
1554 * LOCKING:
1555 * None. Should be called with kernel context, might sleep.
551e8889
TH
1556 *
1557 * RETURNS:
1558 * Zero on success, AC_ERR_* mask on failure
a2a7a662 1559 */
2432697b
TH
1560unsigned ata_exec_internal_sg(struct ata_device *dev,
1561 struct ata_taskfile *tf, const u8 *cdb,
87260216 1562 int dma_dir, struct scatterlist *sgl,
2b789108 1563 unsigned int n_elem, unsigned long timeout)
a2a7a662 1564{
9af5c9c9
TH
1565 struct ata_link *link = dev->link;
1566 struct ata_port *ap = link->ap;
a2a7a662
TH
1567 u8 command = tf->command;
1568 struct ata_queued_cmd *qc;
2ab7db1f 1569 unsigned int tag, preempted_tag;
dedaf2b0 1570 u32 preempted_sactive, preempted_qc_active;
da917d69 1571 int preempted_nr_active_links;
60be6b9a 1572 DECLARE_COMPLETION_ONSTACK(wait);
a2a7a662 1573 unsigned long flags;
77853bf2 1574 unsigned int err_mask;
d95a717f 1575 int rc;
a2a7a662 1576
ba6a1308 1577 spin_lock_irqsave(ap->lock, flags);
a2a7a662 1578
e3180499 1579 /* no internal command while frozen */
b51e9e5d 1580 if (ap->pflags & ATA_PFLAG_FROZEN) {
ba6a1308 1581 spin_unlock_irqrestore(ap->lock, flags);
e3180499
TH
1582 return AC_ERR_SYSTEM;
1583 }
1584
2ab7db1f 1585 /* initialize internal qc */
a2a7a662 1586
2ab7db1f
TH
1587 /* XXX: Tag 0 is used for drivers with legacy EH as some
1588 * drivers choke if any other tag is given. This breaks
1589 * ata_tag_internal() test for those drivers. Don't use new
1590 * EH stuff without converting to it.
1591 */
1592 if (ap->ops->error_handler)
1593 tag = ATA_TAG_INTERNAL;
1594 else
1595 tag = 0;
1596
6cec4a39 1597 if (test_and_set_bit(tag, &ap->qc_allocated))
2ab7db1f 1598 BUG();
f69499f4 1599 qc = __ata_qc_from_tag(ap, tag);
2ab7db1f
TH
1600
1601 qc->tag = tag;
1602 qc->scsicmd = NULL;
1603 qc->ap = ap;
1604 qc->dev = dev;
1605 ata_qc_reinit(qc);
1606
9af5c9c9
TH
1607 preempted_tag = link->active_tag;
1608 preempted_sactive = link->sactive;
dedaf2b0 1609 preempted_qc_active = ap->qc_active;
da917d69 1610 preempted_nr_active_links = ap->nr_active_links;
9af5c9c9
TH
1611 link->active_tag = ATA_TAG_POISON;
1612 link->sactive = 0;
dedaf2b0 1613 ap->qc_active = 0;
da917d69 1614 ap->nr_active_links = 0;
2ab7db1f
TH
1615
1616 /* prepare & issue qc */
a2a7a662 1617 qc->tf = *tf;
d69cf37d
TH
1618 if (cdb)
1619 memcpy(qc->cdb, cdb, ATAPI_CDB_LEN);
e61e0672 1620 qc->flags |= ATA_QCFLAG_RESULT_TF;
a2a7a662
TH
1621 qc->dma_dir = dma_dir;
1622 if (dma_dir != DMA_NONE) {
2432697b 1623 unsigned int i, buflen = 0;
87260216 1624 struct scatterlist *sg;
2432697b 1625
87260216
JA
1626 for_each_sg(sgl, sg, n_elem, i)
1627 buflen += sg->length;
2432697b 1628
87260216 1629 ata_sg_init(qc, sgl, n_elem);
49c80429 1630 qc->nbytes = buflen;
a2a7a662
TH
1631 }
1632
77853bf2 1633 qc->private_data = &wait;
a2a7a662
TH
1634 qc->complete_fn = ata_qc_complete_internal;
1635
8e0e694a 1636 ata_qc_issue(qc);
a2a7a662 1637
ba6a1308 1638 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662 1639
2b789108
TH
1640 if (!timeout)
1641 timeout = ata_probe_timeout * 1000 / HZ;
1642
1643 rc = wait_for_completion_timeout(&wait, msecs_to_jiffies(timeout));
d95a717f
TH
1644
1645 ata_port_flush_task(ap);
41ade50c 1646
d95a717f 1647 if (!rc) {
ba6a1308 1648 spin_lock_irqsave(ap->lock, flags);
a2a7a662
TH
1649
1650 /* We're racing with irq here. If we lose, the
1651 * following test prevents us from completing the qc
d95a717f
TH
1652 * twice. If we win, the port is frozen and will be
1653 * cleaned up by ->post_internal_cmd().
a2a7a662 1654 */
77853bf2 1655 if (qc->flags & ATA_QCFLAG_ACTIVE) {
d95a717f
TH
1656 qc->err_mask |= AC_ERR_TIMEOUT;
1657
1658 if (ap->ops->error_handler)
1659 ata_port_freeze(ap);
1660 else
1661 ata_qc_complete(qc);
f15a1daf 1662
0dd4b21f
BP
1663 if (ata_msg_warn(ap))
1664 ata_dev_printk(dev, KERN_WARNING,
88574551 1665 "qc timeout (cmd 0x%x)\n", command);
a2a7a662
TH
1666 }
1667
ba6a1308 1668 spin_unlock_irqrestore(ap->lock, flags);
a2a7a662
TH
1669 }
1670
d95a717f
TH
1671 /* do post_internal_cmd */
1672 if (ap->ops->post_internal_cmd)
1673 ap->ops->post_internal_cmd(qc);
1674
a51d644a
TH
1675 /* perform minimal error analysis */
1676 if (qc->flags & ATA_QCFLAG_FAILED) {
1677 if (qc->result_tf.command & (ATA_ERR | ATA_DF))
1678 qc->err_mask |= AC_ERR_DEV;
1679
1680 if (!qc->err_mask)
1681 qc->err_mask |= AC_ERR_OTHER;
1682
1683 if (qc->err_mask & ~AC_ERR_OTHER)
1684 qc->err_mask &= ~AC_ERR_OTHER;
d95a717f
TH
1685 }
1686
15869303 1687 /* finish up */
ba6a1308 1688 spin_lock_irqsave(ap->lock, flags);
15869303 1689
e61e0672 1690 *tf = qc->result_tf;
77853bf2
TH
1691 err_mask = qc->err_mask;
1692
1693 ata_qc_free(qc);
9af5c9c9
TH
1694 link->active_tag = preempted_tag;
1695 link->sactive = preempted_sactive;
dedaf2b0 1696 ap->qc_active = preempted_qc_active;
da917d69 1697 ap->nr_active_links = preempted_nr_active_links;
77853bf2 1698
1f7dd3e9
TH
1699 /* XXX - Some LLDDs (sata_mv) disable port on command failure.
1700 * Until those drivers are fixed, we detect the condition
1701 * here, fail the command with AC_ERR_SYSTEM and reenable the
1702 * port.
1703 *
1704 * Note that this doesn't change any behavior as internal
1705 * command failure results in disabling the device in the
1706 * higher layer for LLDDs without new reset/EH callbacks.
1707 *
1708 * Kill the following code as soon as those drivers are fixed.
1709 */
198e0fed 1710 if (ap->flags & ATA_FLAG_DISABLED) {
1f7dd3e9
TH
1711 err_mask |= AC_ERR_SYSTEM;
1712 ata_port_probe(ap);
1713 }
1714
ba6a1308 1715 spin_unlock_irqrestore(ap->lock, flags);
15869303 1716
77853bf2 1717 return err_mask;
a2a7a662
TH
1718}
1719
2432697b 1720/**
33480a0e 1721 * ata_exec_internal - execute libata internal command
2432697b
TH
1722 * @dev: Device to which the command is sent
1723 * @tf: Taskfile registers for the command and the result
1724 * @cdb: CDB for packet command
1725 * @dma_dir: Data tranfer direction of the command
1726 * @buf: Data buffer of the command
1727 * @buflen: Length of data buffer
2b789108 1728 * @timeout: Timeout in msecs (0 for default)
2432697b
TH
1729 *
1730 * Wrapper around ata_exec_internal_sg() which takes simple
1731 * buffer instead of sg list.
1732 *
1733 * LOCKING:
1734 * None. Should be called with kernel context, might sleep.
1735 *
1736 * RETURNS:
1737 * Zero on success, AC_ERR_* mask on failure
1738 */
1739unsigned ata_exec_internal(struct ata_device *dev,
1740 struct ata_taskfile *tf, const u8 *cdb,
2b789108
TH
1741 int dma_dir, void *buf, unsigned int buflen,
1742 unsigned long timeout)
2432697b 1743{
33480a0e
TH
1744 struct scatterlist *psg = NULL, sg;
1745 unsigned int n_elem = 0;
2432697b 1746
33480a0e
TH
1747 if (dma_dir != DMA_NONE) {
1748 WARN_ON(!buf);
1749 sg_init_one(&sg, buf, buflen);
1750 psg = &sg;
1751 n_elem++;
1752 }
2432697b 1753
2b789108
TH
1754 return ata_exec_internal_sg(dev, tf, cdb, dma_dir, psg, n_elem,
1755 timeout);
2432697b
TH
1756}
1757
977e6b9f
TH
1758/**
1759 * ata_do_simple_cmd - execute simple internal command
1760 * @dev: Device to which the command is sent
1761 * @cmd: Opcode to execute
1762 *
1763 * Execute a 'simple' command, that only consists of the opcode
1764 * 'cmd' itself, without filling any other registers
1765 *
1766 * LOCKING:
1767 * Kernel thread context (may sleep).
1768 *
1769 * RETURNS:
1770 * Zero on success, AC_ERR_* mask on failure
e58eb583 1771 */
77b08fb5 1772unsigned int ata_do_simple_cmd(struct ata_device *dev, u8 cmd)
e58eb583
TH
1773{
1774 struct ata_taskfile tf;
e58eb583
TH
1775
1776 ata_tf_init(dev, &tf);
1777
1778 tf.command = cmd;
1779 tf.flags |= ATA_TFLAG_DEVICE;
1780 tf.protocol = ATA_PROT_NODATA;
1781
2b789108 1782 return ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
e58eb583
TH
1783}
1784
1bc4ccff
AC
1785/**
1786 * ata_pio_need_iordy - check if iordy needed
1787 * @adev: ATA device
1788 *
1789 * Check if the current speed of the device requires IORDY. Used
1790 * by various controllers for chip configuration.
1791 */
a617c09f 1792
1bc4ccff
AC
1793unsigned int ata_pio_need_iordy(const struct ata_device *adev)
1794{
432729f0
AC
1795 /* Controller doesn't support IORDY. Probably a pointless check
1796 as the caller should know this */
9af5c9c9 1797 if (adev->link->ap->flags & ATA_FLAG_NO_IORDY)
1bc4ccff 1798 return 0;
432729f0
AC
1799 /* PIO3 and higher it is mandatory */
1800 if (adev->pio_mode > XFER_PIO_2)
1801 return 1;
1802 /* We turn it on when possible */
1803 if (ata_id_has_iordy(adev->id))
1bc4ccff 1804 return 1;
432729f0
AC
1805 return 0;
1806}
2e9edbf8 1807
432729f0
AC
1808/**
1809 * ata_pio_mask_no_iordy - Return the non IORDY mask
1810 * @adev: ATA device
1811 *
1812 * Compute the highest mode possible if we are not using iordy. Return
1813 * -1 if no iordy mode is available.
1814 */
a617c09f 1815
432729f0
AC
1816static u32 ata_pio_mask_no_iordy(const struct ata_device *adev)
1817{
1bc4ccff 1818 /* If we have no drive specific rule, then PIO 2 is non IORDY */
1bc4ccff 1819 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE */
432729f0 1820 u16 pio = adev->id[ATA_ID_EIDE_PIO];
1bc4ccff
AC
1821 /* Is the speed faster than the drive allows non IORDY ? */
1822 if (pio) {
1823 /* This is cycle times not frequency - watch the logic! */
1824 if (pio > 240) /* PIO2 is 240nS per cycle */
432729f0
AC
1825 return 3 << ATA_SHIFT_PIO;
1826 return 7 << ATA_SHIFT_PIO;
1bc4ccff
AC
1827 }
1828 }
432729f0 1829 return 3 << ATA_SHIFT_PIO;
1bc4ccff
AC
1830}
1831
1da177e4 1832/**
49016aca 1833 * ata_dev_read_id - Read ID data from the specified device
49016aca
TH
1834 * @dev: target device
1835 * @p_class: pointer to class of the target device (may be changed)
bff04647 1836 * @flags: ATA_READID_* flags
fe635c7e 1837 * @id: buffer to read IDENTIFY data into
1da177e4 1838 *
49016aca
TH
1839 * Read ID data from the specified device. ATA_CMD_ID_ATA is
1840 * performed on ATA devices and ATA_CMD_ID_ATAPI on ATAPI
aec5c3c1
TH
1841 * devices. This function also issues ATA_CMD_INIT_DEV_PARAMS
1842 * for pre-ATA4 drives.
1da177e4 1843 *
50a99018 1844 * FIXME: ATA_CMD_ID_ATA is optional for early drives and right
2dcb407e 1845 * now we abort if we hit that case.
50a99018 1846 *
1da177e4 1847 * LOCKING:
49016aca
TH
1848 * Kernel thread context (may sleep)
1849 *
1850 * RETURNS:
1851 * 0 on success, -errno otherwise.
1da177e4 1852 */
a9beec95 1853int ata_dev_read_id(struct ata_device *dev, unsigned int *p_class,
bff04647 1854 unsigned int flags, u16 *id)
1da177e4 1855{
9af5c9c9 1856 struct ata_port *ap = dev->link->ap;
49016aca 1857 unsigned int class = *p_class;
a0123703 1858 struct ata_taskfile tf;
49016aca
TH
1859 unsigned int err_mask = 0;
1860 const char *reason;
54936f8b 1861 int may_fallback = 1, tried_spinup = 0;
49016aca 1862 int rc;
1da177e4 1863
0dd4b21f 1864 if (ata_msg_ctl(ap))
44877b4e 1865 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 1866
49016aca 1867 ata_dev_select(ap, dev->devno, 1, 1); /* select device 0/1 */
49016aca 1868 retry:
3373efd8 1869 ata_tf_init(dev, &tf);
a0123703 1870
49016aca
TH
1871 switch (class) {
1872 case ATA_DEV_ATA:
a0123703 1873 tf.command = ATA_CMD_ID_ATA;
49016aca
TH
1874 break;
1875 case ATA_DEV_ATAPI:
a0123703 1876 tf.command = ATA_CMD_ID_ATAPI;
49016aca
TH
1877 break;
1878 default:
1879 rc = -ENODEV;
1880 reason = "unsupported class";
1881 goto err_out;
1da177e4
LT
1882 }
1883
a0123703 1884 tf.protocol = ATA_PROT_PIO;
81afe893
TH
1885
1886 /* Some devices choke if TF registers contain garbage. Make
1887 * sure those are properly initialized.
1888 */
1889 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1890
1891 /* Device presence detection is unreliable on some
1892 * controllers. Always poll IDENTIFY if available.
1893 */
1894 tf.flags |= ATA_TFLAG_POLLING;
1da177e4 1895
3373efd8 1896 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_FROM_DEVICE,
2b789108 1897 id, sizeof(id[0]) * ATA_ID_WORDS, 0);
a0123703 1898 if (err_mask) {
800b3996 1899 if (err_mask & AC_ERR_NODEV_HINT) {
55a8e2c8 1900 DPRINTK("ata%u.%d: NODEV after polling detection\n",
44877b4e 1901 ap->print_id, dev->devno);
55a8e2c8
TH
1902 return -ENOENT;
1903 }
1904
54936f8b
TH
1905 /* Device or controller might have reported the wrong
1906 * device class. Give a shot at the other IDENTIFY if
1907 * the current one is aborted by the device.
1908 */
1909 if (may_fallback &&
1910 (err_mask == AC_ERR_DEV) && (tf.feature & ATA_ABORTED)) {
1911 may_fallback = 0;
1912
1913 if (class == ATA_DEV_ATA)
1914 class = ATA_DEV_ATAPI;
1915 else
1916 class = ATA_DEV_ATA;
1917 goto retry;
1918 }
1919
49016aca
TH
1920 rc = -EIO;
1921 reason = "I/O error";
1da177e4
LT
1922 goto err_out;
1923 }
1924
54936f8b
TH
1925 /* Falling back doesn't make sense if ID data was read
1926 * successfully at least once.
1927 */
1928 may_fallback = 0;
1929
49016aca 1930 swap_buf_le16(id, ATA_ID_WORDS);
1da177e4 1931
49016aca 1932 /* sanity check */
a4f5749b 1933 rc = -EINVAL;
6070068b 1934 reason = "device reports invalid type";
a4f5749b
TH
1935
1936 if (class == ATA_DEV_ATA) {
1937 if (!ata_id_is_ata(id) && !ata_id_is_cfa(id))
1938 goto err_out;
1939 } else {
1940 if (ata_id_is_ata(id))
1941 goto err_out;
49016aca
TH
1942 }
1943
169439c2
ML
1944 if (!tried_spinup && (id[2] == 0x37c8 || id[2] == 0x738c)) {
1945 tried_spinup = 1;
1946 /*
1947 * Drive powered-up in standby mode, and requires a specific
1948 * SET_FEATURES spin-up subcommand before it will accept
1949 * anything other than the original IDENTIFY command.
1950 */
218f3d30 1951 err_mask = ata_dev_set_feature(dev, SETFEATURES_SPINUP, 0);
fb0582f9 1952 if (err_mask && id[2] != 0x738c) {
169439c2
ML
1953 rc = -EIO;
1954 reason = "SPINUP failed";
1955 goto err_out;
1956 }
1957 /*
1958 * If the drive initially returned incomplete IDENTIFY info,
1959 * we now must reissue the IDENTIFY command.
1960 */
1961 if (id[2] == 0x37c8)
1962 goto retry;
1963 }
1964
bff04647 1965 if ((flags & ATA_READID_POSTRESET) && class == ATA_DEV_ATA) {
49016aca
TH
1966 /*
1967 * The exact sequence expected by certain pre-ATA4 drives is:
1968 * SRST RESET
50a99018
AC
1969 * IDENTIFY (optional in early ATA)
1970 * INITIALIZE DEVICE PARAMETERS (later IDE and ATA)
49016aca
TH
1971 * anything else..
1972 * Some drives were very specific about that exact sequence.
50a99018
AC
1973 *
1974 * Note that ATA4 says lba is mandatory so the second check
1975 * shoud never trigger.
49016aca
TH
1976 */
1977 if (ata_id_major_version(id) < 4 || !ata_id_has_lba(id)) {
3373efd8 1978 err_mask = ata_dev_init_params(dev, id[3], id[6]);
49016aca
TH
1979 if (err_mask) {
1980 rc = -EIO;
1981 reason = "INIT_DEV_PARAMS failed";
1982 goto err_out;
1983 }
1984
1985 /* current CHS translation info (id[53-58]) might be
1986 * changed. reread the identify device info.
1987 */
bff04647 1988 flags &= ~ATA_READID_POSTRESET;
49016aca
TH
1989 goto retry;
1990 }
1991 }
1992
1993 *p_class = class;
fe635c7e 1994
49016aca
TH
1995 return 0;
1996
1997 err_out:
88574551 1998 if (ata_msg_warn(ap))
0dd4b21f 1999 ata_dev_printk(dev, KERN_WARNING, "failed to IDENTIFY "
88574551 2000 "(%s, err_mask=0x%x)\n", reason, err_mask);
49016aca
TH
2001 return rc;
2002}
2003
3373efd8 2004static inline u8 ata_dev_knobble(struct ata_device *dev)
4b2f3ede 2005{
9af5c9c9
TH
2006 struct ata_port *ap = dev->link->ap;
2007 return ((ap->cbl == ATA_CBL_SATA) && (!ata_id_is_sata(dev->id)));
4b2f3ede
TH
2008}
2009
a6e6ce8e
TH
2010static void ata_dev_config_ncq(struct ata_device *dev,
2011 char *desc, size_t desc_sz)
2012{
9af5c9c9 2013 struct ata_port *ap = dev->link->ap;
a6e6ce8e
TH
2014 int hdepth = 0, ddepth = ata_id_queue_depth(dev->id);
2015
2016 if (!ata_id_has_ncq(dev->id)) {
2017 desc[0] = '\0';
2018 return;
2019 }
75683fe7 2020 if (dev->horkage & ATA_HORKAGE_NONCQ) {
6919a0a6
AC
2021 snprintf(desc, desc_sz, "NCQ (not used)");
2022 return;
2023 }
a6e6ce8e 2024 if (ap->flags & ATA_FLAG_NCQ) {
cca3974e 2025 hdepth = min(ap->scsi_host->can_queue, ATA_MAX_QUEUE - 1);
a6e6ce8e
TH
2026 dev->flags |= ATA_DFLAG_NCQ;
2027 }
2028
2029 if (hdepth >= ddepth)
2030 snprintf(desc, desc_sz, "NCQ (depth %d)", ddepth);
2031 else
2032 snprintf(desc, desc_sz, "NCQ (depth %d/%d)", hdepth, ddepth);
2033}
2034
49016aca 2035/**
ffeae418 2036 * ata_dev_configure - Configure the specified ATA/ATAPI device
ffeae418
TH
2037 * @dev: Target device to configure
2038 *
2039 * Configure @dev according to @dev->id. Generic and low-level
2040 * driver specific fixups are also applied.
49016aca
TH
2041 *
2042 * LOCKING:
ffeae418
TH
2043 * Kernel thread context (may sleep)
2044 *
2045 * RETURNS:
2046 * 0 on success, -errno otherwise
49016aca 2047 */
efdaedc4 2048int ata_dev_configure(struct ata_device *dev)
49016aca 2049{
9af5c9c9
TH
2050 struct ata_port *ap = dev->link->ap;
2051 struct ata_eh_context *ehc = &dev->link->eh_context;
6746544c 2052 int print_info = ehc->i.flags & ATA_EHI_PRINTINFO;
1148c3a7 2053 const u16 *id = dev->id;
7dc951ae 2054 unsigned long xfer_mask;
b352e57d 2055 char revbuf[7]; /* XYZ-99\0 */
3f64f565
EM
2056 char fwrevbuf[ATA_ID_FW_REV_LEN+1];
2057 char modelbuf[ATA_ID_PROD_LEN+1];
e6d902a3 2058 int rc;
49016aca 2059
0dd4b21f 2060 if (!ata_dev_enabled(dev) && ata_msg_info(ap)) {
44877b4e
TH
2061 ata_dev_printk(dev, KERN_INFO, "%s: ENTER/EXIT -- nodev\n",
2062 __FUNCTION__);
ffeae418 2063 return 0;
49016aca
TH
2064 }
2065
0dd4b21f 2066 if (ata_msg_probe(ap))
44877b4e 2067 ata_dev_printk(dev, KERN_DEBUG, "%s: ENTER\n", __FUNCTION__);
1da177e4 2068
75683fe7
TH
2069 /* set horkage */
2070 dev->horkage |= ata_dev_blacklisted(dev);
2071
6746544c
TH
2072 /* let ACPI work its magic */
2073 rc = ata_acpi_on_devcfg(dev);
2074 if (rc)
2075 return rc;
08573a86 2076
05027adc
TH
2077 /* massage HPA, do it early as it might change IDENTIFY data */
2078 rc = ata_hpa_resize(dev);
2079 if (rc)
2080 return rc;
2081
c39f5ebe 2082 /* print device capabilities */
0dd4b21f 2083 if (ata_msg_probe(ap))
88574551
TH
2084 ata_dev_printk(dev, KERN_DEBUG,
2085 "%s: cfg 49:%04x 82:%04x 83:%04x 84:%04x "
2086 "85:%04x 86:%04x 87:%04x 88:%04x\n",
0dd4b21f 2087 __FUNCTION__,
f15a1daf
TH
2088 id[49], id[82], id[83], id[84],
2089 id[85], id[86], id[87], id[88]);
c39f5ebe 2090
208a9933 2091 /* initialize to-be-configured parameters */
ea1dd4e1 2092 dev->flags &= ~ATA_DFLAG_CFG_MASK;
208a9933
TH
2093 dev->max_sectors = 0;
2094 dev->cdb_len = 0;
2095 dev->n_sectors = 0;
2096 dev->cylinders = 0;
2097 dev->heads = 0;
2098 dev->sectors = 0;
2099
1da177e4
LT
2100 /*
2101 * common ATA, ATAPI feature tests
2102 */
2103
ff8854b2 2104 /* find max transfer mode; for printk only */
1148c3a7 2105 xfer_mask = ata_id_xfermask(id);
1da177e4 2106
0dd4b21f
BP
2107 if (ata_msg_probe(ap))
2108 ata_dump_id(id);
1da177e4 2109
ef143d57
AL
2110 /* SCSI only uses 4-char revisions, dump full 8 chars from ATA */
2111 ata_id_c_string(dev->id, fwrevbuf, ATA_ID_FW_REV,
2112 sizeof(fwrevbuf));
2113
2114 ata_id_c_string(dev->id, modelbuf, ATA_ID_PROD,
2115 sizeof(modelbuf));
2116
1da177e4
LT
2117 /* ATA-specific feature tests */
2118 if (dev->class == ATA_DEV_ATA) {
b352e57d
AC
2119 if (ata_id_is_cfa(id)) {
2120 if (id[162] & 1) /* CPRM may make this media unusable */
44877b4e
TH
2121 ata_dev_printk(dev, KERN_WARNING,
2122 "supports DRM functions and may "
2123 "not be fully accessable.\n");
b352e57d 2124 snprintf(revbuf, 7, "CFA");
ae8d4ee7 2125 } else {
2dcb407e 2126 snprintf(revbuf, 7, "ATA-%d", ata_id_major_version(id));
ae8d4ee7
AC
2127 /* Warn the user if the device has TPM extensions */
2128 if (ata_id_has_tpm(id))
2129 ata_dev_printk(dev, KERN_WARNING,
2130 "supports DRM functions and may "
2131 "not be fully accessable.\n");
2132 }
b352e57d 2133
1148c3a7 2134 dev->n_sectors = ata_id_n_sectors(id);
2940740b 2135
3f64f565
EM
2136 if (dev->id[59] & 0x100)
2137 dev->multi_count = dev->id[59] & 0xff;
2138
1148c3a7 2139 if (ata_id_has_lba(id)) {
4c2d721a 2140 const char *lba_desc;
a6e6ce8e 2141 char ncq_desc[20];
8bf62ece 2142
4c2d721a
TH
2143 lba_desc = "LBA";
2144 dev->flags |= ATA_DFLAG_LBA;
1148c3a7 2145 if (ata_id_has_lba48(id)) {
8bf62ece 2146 dev->flags |= ATA_DFLAG_LBA48;
4c2d721a 2147 lba_desc = "LBA48";
6fc49adb
TH
2148
2149 if (dev->n_sectors >= (1UL << 28) &&
2150 ata_id_has_flush_ext(id))
2151 dev->flags |= ATA_DFLAG_FLUSH_EXT;
4c2d721a 2152 }
8bf62ece 2153
a6e6ce8e
TH
2154 /* config NCQ */
2155 ata_dev_config_ncq(dev, ncq_desc, sizeof(ncq_desc));
2156
8bf62ece 2157 /* print device info to dmesg */
3f64f565
EM
2158 if (ata_msg_drv(ap) && print_info) {
2159 ata_dev_printk(dev, KERN_INFO,
2160 "%s: %s, %s, max %s\n",
2161 revbuf, modelbuf, fwrevbuf,
2162 ata_mode_string(xfer_mask));
2163 ata_dev_printk(dev, KERN_INFO,
2164 "%Lu sectors, multi %u: %s %s\n",
f15a1daf 2165 (unsigned long long)dev->n_sectors,
3f64f565
EM
2166 dev->multi_count, lba_desc, ncq_desc);
2167 }
ffeae418 2168 } else {
8bf62ece
AL
2169 /* CHS */
2170
2171 /* Default translation */
1148c3a7
TH
2172 dev->cylinders = id[1];
2173 dev->heads = id[3];
2174 dev->sectors = id[6];
8bf62ece 2175
1148c3a7 2176 if (ata_id_current_chs_valid(id)) {
8bf62ece 2177 /* Current CHS translation is valid. */
1148c3a7
TH
2178 dev->cylinders = id[54];
2179 dev->heads = id[55];
2180 dev->sectors = id[56];
8bf62ece
AL
2181 }
2182
2183 /* print device info to dmesg */
3f64f565 2184 if (ata_msg_drv(ap) && print_info) {
88574551 2185 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2186 "%s: %s, %s, max %s\n",
2187 revbuf, modelbuf, fwrevbuf,
2188 ata_mode_string(xfer_mask));
a84471fe 2189 ata_dev_printk(dev, KERN_INFO,
3f64f565
EM
2190 "%Lu sectors, multi %u, CHS %u/%u/%u\n",
2191 (unsigned long long)dev->n_sectors,
2192 dev->multi_count, dev->cylinders,
2193 dev->heads, dev->sectors);
2194 }
07f6f7d0
AL
2195 }
2196
6e7846e9 2197 dev->cdb_len = 16;
1da177e4
LT
2198 }
2199
2200 /* ATAPI-specific feature tests */
2c13b7ce 2201 else if (dev->class == ATA_DEV_ATAPI) {
854c73a2
TH
2202 const char *cdb_intr_string = "";
2203 const char *atapi_an_string = "";
7d77b247 2204 u32 sntf;
08a556db 2205
1148c3a7 2206 rc = atapi_cdb_len(id);
1da177e4 2207 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
0dd4b21f 2208 if (ata_msg_warn(ap))
88574551
TH
2209 ata_dev_printk(dev, KERN_WARNING,
2210 "unsupported CDB len\n");
ffeae418 2211 rc = -EINVAL;
1da177e4
LT
2212 goto err_out_nosup;
2213 }
6e7846e9 2214 dev->cdb_len = (unsigned int) rc;
1da177e4 2215
7d77b247
TH
2216 /* Enable ATAPI AN if both the host and device have
2217 * the support. If PMP is attached, SNTF is required
2218 * to enable ATAPI AN to discern between PHY status
2219 * changed notifications and ATAPI ANs.
9f45cbd3 2220 */
7d77b247
TH
2221 if ((ap->flags & ATA_FLAG_AN) && ata_id_has_atapi_AN(id) &&
2222 (!ap->nr_pmp_links ||
2223 sata_scr_read(&ap->link, SCR_NOTIFICATION, &sntf) == 0)) {
854c73a2
TH
2224 unsigned int err_mask;
2225
9f45cbd3 2226 /* issue SET feature command to turn this on */
218f3d30
JG
2227 err_mask = ata_dev_set_feature(dev,
2228 SETFEATURES_SATA_ENABLE, SATA_AN);
854c73a2 2229 if (err_mask)
9f45cbd3 2230 ata_dev_printk(dev, KERN_ERR,
854c73a2
TH
2231 "failed to enable ATAPI AN "
2232 "(err_mask=0x%x)\n", err_mask);
2233 else {
9f45cbd3 2234 dev->flags |= ATA_DFLAG_AN;
854c73a2
TH
2235 atapi_an_string = ", ATAPI AN";
2236 }
9f45cbd3
KCA
2237 }
2238
08a556db 2239 if (ata_id_cdb_intr(dev->id)) {
312f7da2 2240 dev->flags |= ATA_DFLAG_CDB_INTR;
08a556db
AL
2241 cdb_intr_string = ", CDB intr";
2242 }
312f7da2 2243
1da177e4 2244 /* print device info to dmesg */
5afc8142 2245 if (ata_msg_drv(ap) && print_info)
ef143d57 2246 ata_dev_printk(dev, KERN_INFO,
854c73a2 2247 "ATAPI: %s, %s, max %s%s%s\n",
ef143d57 2248 modelbuf, fwrevbuf,
12436c30 2249 ata_mode_string(xfer_mask),
854c73a2 2250 cdb_intr_string, atapi_an_string);
1da177e4
LT
2251 }
2252
914ed354
TH
2253 /* determine max_sectors */
2254 dev->max_sectors = ATA_MAX_SECTORS;
2255 if (dev->flags & ATA_DFLAG_LBA48)
2256 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2257
ca77329f
KCA
2258 if (!(dev->horkage & ATA_HORKAGE_IPM)) {
2259 if (ata_id_has_hipm(dev->id))
2260 dev->flags |= ATA_DFLAG_HIPM;
2261 if (ata_id_has_dipm(dev->id))
2262 dev->flags |= ATA_DFLAG_DIPM;
2263 }
2264
93590859
AC
2265 if (dev->horkage & ATA_HORKAGE_DIAGNOSTIC) {
2266 /* Let the user know. We don't want to disallow opens for
2267 rescue purposes, or in case the vendor is just a blithering
2268 idiot */
2dcb407e 2269 if (print_info) {
93590859
AC
2270 ata_dev_printk(dev, KERN_WARNING,
2271"Drive reports diagnostics failure. This may indicate a drive\n");
2272 ata_dev_printk(dev, KERN_WARNING,
2273"fault or invalid emulation. Contact drive vendor for information.\n");
2274 }
2275 }
2276
4b2f3ede 2277 /* limit bridge transfers to udma5, 200 sectors */
3373efd8 2278 if (ata_dev_knobble(dev)) {
5afc8142 2279 if (ata_msg_drv(ap) && print_info)
f15a1daf
TH
2280 ata_dev_printk(dev, KERN_INFO,
2281 "applying bridge limits\n");
5a529139 2282 dev->udma_mask &= ATA_UDMA5;
4b2f3ede
TH
2283 dev->max_sectors = ATA_MAX_SECTORS;
2284 }
2285
f8d8e579 2286 if ((dev->class == ATA_DEV_ATAPI) &&
f442cd86 2287 (atapi_command_packet_set(id) == TYPE_TAPE)) {
f8d8e579 2288 dev->max_sectors = ATA_MAX_SECTORS_TAPE;
f442cd86
AL
2289 dev->horkage |= ATA_HORKAGE_STUCK_ERR;
2290 }
f8d8e579 2291
75683fe7 2292 if (dev->horkage & ATA_HORKAGE_MAX_SEC_128)
03ec52de
TH
2293 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2294 dev->max_sectors);
18d6e9d5 2295
ca77329f
KCA
2296 if (ata_dev_blacklisted(dev) & ATA_HORKAGE_IPM) {
2297 dev->horkage |= ATA_HORKAGE_IPM;
2298
2299 /* reset link pm_policy for this port to no pm */
2300 ap->pm_policy = MAX_PERFORMANCE;
2301 }
2302
4b2f3ede 2303 if (ap->ops->dev_config)
cd0d3bbc 2304 ap->ops->dev_config(dev);
4b2f3ede 2305
0dd4b21f
BP
2306 if (ata_msg_probe(ap))
2307 ata_dev_printk(dev, KERN_DEBUG, "%s: EXIT, drv_stat = 0x%x\n",
2308 __FUNCTION__, ata_chk_status(ap));
ffeae418 2309 return 0;
1da177e4
LT
2310
2311err_out_nosup:
0dd4b21f 2312 if (ata_msg_probe(ap))
88574551
TH
2313 ata_dev_printk(dev, KERN_DEBUG,
2314 "%s: EXIT, err\n", __FUNCTION__);
ffeae418 2315 return rc;
1da177e4
LT
2316}
2317
be0d18df 2318/**
2e41e8e6 2319 * ata_cable_40wire - return 40 wire cable type
be0d18df
AC
2320 * @ap: port
2321 *
2e41e8e6 2322 * Helper method for drivers which want to hardwire 40 wire cable
be0d18df
AC
2323 * detection.
2324 */
2325
2326int ata_cable_40wire(struct ata_port *ap)
2327{
2328 return ATA_CBL_PATA40;
2329}
2330
2331/**
2e41e8e6 2332 * ata_cable_80wire - return 80 wire cable type
be0d18df
AC
2333 * @ap: port
2334 *
2e41e8e6 2335 * Helper method for drivers which want to hardwire 80 wire cable
be0d18df
AC
2336 * detection.
2337 */
2338
2339int ata_cable_80wire(struct ata_port *ap)
2340{
2341 return ATA_CBL_PATA80;
2342}
2343
2344/**
2345 * ata_cable_unknown - return unknown PATA cable.
2346 * @ap: port
2347 *
2348 * Helper method for drivers which have no PATA cable detection.
2349 */
2350
2351int ata_cable_unknown(struct ata_port *ap)
2352{
2353 return ATA_CBL_PATA_UNK;
2354}
2355
2356/**
2357 * ata_cable_sata - return SATA cable type
2358 * @ap: port
2359 *
2360 * Helper method for drivers which have SATA cables
2361 */
2362
2363int ata_cable_sata(struct ata_port *ap)
2364{
2365 return ATA_CBL_SATA;
2366}
2367
1da177e4
LT
2368/**
2369 * ata_bus_probe - Reset and probe ATA bus
2370 * @ap: Bus to probe
2371 *
0cba632b
JG
2372 * Master ATA bus probing function. Initiates a hardware-dependent
2373 * bus reset, then attempts to identify any devices found on
2374 * the bus.
2375 *
1da177e4 2376 * LOCKING:
0cba632b 2377 * PCI/etc. bus probe sem.
1da177e4
LT
2378 *
2379 * RETURNS:
96072e69 2380 * Zero on success, negative errno otherwise.
1da177e4
LT
2381 */
2382
80289167 2383int ata_bus_probe(struct ata_port *ap)
1da177e4 2384{
28ca5c57 2385 unsigned int classes[ATA_MAX_DEVICES];
14d2bac1 2386 int tries[ATA_MAX_DEVICES];
f58229f8 2387 int rc;
e82cbdb9 2388 struct ata_device *dev;
1da177e4 2389
28ca5c57 2390 ata_port_probe(ap);
c19ba8af 2391
f58229f8
TH
2392 ata_link_for_each_dev(dev, &ap->link)
2393 tries[dev->devno] = ATA_PROBE_MAX_TRIES;
14d2bac1
TH
2394
2395 retry:
cdeab114
TH
2396 ata_link_for_each_dev(dev, &ap->link) {
2397 /* If we issue an SRST then an ATA drive (not ATAPI)
2398 * may change configuration and be in PIO0 timing. If
2399 * we do a hard reset (or are coming from power on)
2400 * this is true for ATA or ATAPI. Until we've set a
2401 * suitable controller mode we should not touch the
2402 * bus as we may be talking too fast.
2403 */
2404 dev->pio_mode = XFER_PIO_0;
2405
2406 /* If the controller has a pio mode setup function
2407 * then use it to set the chipset to rights. Don't
2408 * touch the DMA setup as that will be dealt with when
2409 * configuring devices.
2410 */
2411 if (ap->ops->set_piomode)
2412 ap->ops->set_piomode(ap, dev);
2413 }
2414
2044470c 2415 /* reset and determine device classes */
52783c5d 2416 ap->ops->phy_reset(ap);
2061a47a 2417
f58229f8 2418 ata_link_for_each_dev(dev, &ap->link) {
52783c5d
TH
2419 if (!(ap->flags & ATA_FLAG_DISABLED) &&
2420 dev->class != ATA_DEV_UNKNOWN)
2421 classes[dev->devno] = dev->class;
2422 else
2423 classes[dev->devno] = ATA_DEV_NONE;
2044470c 2424
52783c5d 2425 dev->class = ATA_DEV_UNKNOWN;
28ca5c57 2426 }
1da177e4 2427
52783c5d 2428 ata_port_probe(ap);
2044470c 2429
f31f0cc2
JG
2430 /* read IDENTIFY page and configure devices. We have to do the identify
2431 specific sequence bass-ackwards so that PDIAG- is released by
2432 the slave device */
2433
f58229f8
TH
2434 ata_link_for_each_dev(dev, &ap->link) {
2435 if (tries[dev->devno])
2436 dev->class = classes[dev->devno];
ffeae418 2437
14d2bac1 2438 if (!ata_dev_enabled(dev))
ffeae418 2439 continue;
ffeae418 2440
bff04647
TH
2441 rc = ata_dev_read_id(dev, &dev->class, ATA_READID_POSTRESET,
2442 dev->id);
14d2bac1
TH
2443 if (rc)
2444 goto fail;
f31f0cc2
JG
2445 }
2446
be0d18df
AC
2447 /* Now ask for the cable type as PDIAG- should have been released */
2448 if (ap->ops->cable_detect)
2449 ap->cbl = ap->ops->cable_detect(ap);
2450
614fe29b
AC
2451 /* We may have SATA bridge glue hiding here irrespective of the
2452 reported cable types and sensed types */
2453 ata_link_for_each_dev(dev, &ap->link) {
2454 if (!ata_dev_enabled(dev))
2455 continue;
2456 /* SATA drives indicate we have a bridge. We don't know which
2457 end of the link the bridge is which is a problem */
2458 if (ata_id_is_sata(dev->id))
2459 ap->cbl = ATA_CBL_SATA;
2460 }
2461
f31f0cc2
JG
2462 /* After the identify sequence we can now set up the devices. We do
2463 this in the normal order so that the user doesn't get confused */
2464
f58229f8 2465 ata_link_for_each_dev(dev, &ap->link) {
f31f0cc2
JG
2466 if (!ata_dev_enabled(dev))
2467 continue;
14d2bac1 2468
9af5c9c9 2469 ap->link.eh_context.i.flags |= ATA_EHI_PRINTINFO;
efdaedc4 2470 rc = ata_dev_configure(dev);
9af5c9c9 2471 ap->link.eh_context.i.flags &= ~ATA_EHI_PRINTINFO;
14d2bac1
TH
2472 if (rc)
2473 goto fail;
1da177e4
LT
2474 }
2475
e82cbdb9 2476 /* configure transfer mode */
0260731f 2477 rc = ata_set_mode(&ap->link, &dev);
4ae72a1e 2478 if (rc)
51713d35 2479 goto fail;
1da177e4 2480
f58229f8
TH
2481 ata_link_for_each_dev(dev, &ap->link)
2482 if (ata_dev_enabled(dev))
e82cbdb9 2483 return 0;
1da177e4 2484
e82cbdb9
TH
2485 /* no device present, disable port */
2486 ata_port_disable(ap);
96072e69 2487 return -ENODEV;
14d2bac1
TH
2488
2489 fail:
4ae72a1e
TH
2490 tries[dev->devno]--;
2491
14d2bac1
TH
2492 switch (rc) {
2493 case -EINVAL:
4ae72a1e 2494 /* eeek, something went very wrong, give up */
14d2bac1
TH
2495 tries[dev->devno] = 0;
2496 break;
4ae72a1e
TH
2497
2498 case -ENODEV:
2499 /* give it just one more chance */
2500 tries[dev->devno] = min(tries[dev->devno], 1);
14d2bac1 2501 case -EIO:
4ae72a1e
TH
2502 if (tries[dev->devno] == 1) {
2503 /* This is the last chance, better to slow
2504 * down than lose it.
2505 */
936fd732 2506 sata_down_spd_limit(&ap->link);
4ae72a1e
TH
2507 ata_down_xfermask_limit(dev, ATA_DNXFER_PIO);
2508 }
14d2bac1
TH
2509 }
2510
4ae72a1e 2511 if (!tries[dev->devno])
3373efd8 2512 ata_dev_disable(dev);
ec573755 2513
14d2bac1 2514 goto retry;
1da177e4
LT
2515}
2516
2517/**
0cba632b
JG
2518 * ata_port_probe - Mark port as enabled
2519 * @ap: Port for which we indicate enablement
1da177e4 2520 *
0cba632b
JG
2521 * Modify @ap data structure such that the system
2522 * thinks that the entire port is enabled.
2523 *
cca3974e 2524 * LOCKING: host lock, or some other form of
0cba632b 2525 * serialization.
1da177e4
LT
2526 */
2527
2528void ata_port_probe(struct ata_port *ap)
2529{
198e0fed 2530 ap->flags &= ~ATA_FLAG_DISABLED;
1da177e4
LT
2531}
2532
3be680b7
TH
2533/**
2534 * sata_print_link_status - Print SATA link status
936fd732 2535 * @link: SATA link to printk link status about
3be680b7
TH
2536 *
2537 * This function prints link speed and status of a SATA link.
2538 *
2539 * LOCKING:
2540 * None.
2541 */
936fd732 2542void sata_print_link_status(struct ata_link *link)
3be680b7 2543{
6d5f9732 2544 u32 sstatus, scontrol, tmp;
3be680b7 2545
936fd732 2546 if (sata_scr_read(link, SCR_STATUS, &sstatus))
3be680b7 2547 return;
936fd732 2548 sata_scr_read(link, SCR_CONTROL, &scontrol);
3be680b7 2549
936fd732 2550 if (ata_link_online(link)) {
3be680b7 2551 tmp = (sstatus >> 4) & 0xf;
936fd732 2552 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2553 "SATA link up %s (SStatus %X SControl %X)\n",
2554 sata_spd_string(tmp), sstatus, scontrol);
3be680b7 2555 } else {
936fd732 2556 ata_link_printk(link, KERN_INFO,
f15a1daf
TH
2557 "SATA link down (SStatus %X SControl %X)\n",
2558 sstatus, scontrol);
3be680b7
TH
2559 }
2560}
2561
ebdfca6e
AC
2562/**
2563 * ata_dev_pair - return other device on cable
ebdfca6e
AC
2564 * @adev: device
2565 *
2566 * Obtain the other device on the same cable, or if none is
2567 * present NULL is returned
2568 */
2e9edbf8 2569
3373efd8 2570struct ata_device *ata_dev_pair(struct ata_device *adev)
ebdfca6e 2571{
9af5c9c9
TH
2572 struct ata_link *link = adev->link;
2573 struct ata_device *pair = &link->device[1 - adev->devno];
e1211e3f 2574 if (!ata_dev_enabled(pair))
ebdfca6e
AC
2575 return NULL;
2576 return pair;
2577}
2578
1da177e4 2579/**
780a87f7
JG
2580 * ata_port_disable - Disable port.
2581 * @ap: Port to be disabled.
1da177e4 2582 *
780a87f7
JG
2583 * Modify @ap data structure such that the system
2584 * thinks that the entire port is disabled, and should
2585 * never attempt to probe or communicate with devices
2586 * on this port.
2587 *
cca3974e 2588 * LOCKING: host lock, or some other form of
780a87f7 2589 * serialization.
1da177e4
LT
2590 */
2591
2592void ata_port_disable(struct ata_port *ap)
2593{
9af5c9c9
TH
2594 ap->link.device[0].class = ATA_DEV_NONE;
2595 ap->link.device[1].class = ATA_DEV_NONE;
198e0fed 2596 ap->flags |= ATA_FLAG_DISABLED;
1da177e4
LT
2597}
2598
1c3fae4d 2599/**
3c567b7d 2600 * sata_down_spd_limit - adjust SATA spd limit downward
936fd732 2601 * @link: Link to adjust SATA spd limit for
1c3fae4d 2602 *
936fd732 2603 * Adjust SATA spd limit of @link downward. Note that this
1c3fae4d 2604 * function only adjusts the limit. The change must be applied
3c567b7d 2605 * using sata_set_spd().
1c3fae4d
TH
2606 *
2607 * LOCKING:
2608 * Inherited from caller.
2609 *
2610 * RETURNS:
2611 * 0 on success, negative errno on failure
2612 */
936fd732 2613int sata_down_spd_limit(struct ata_link *link)
1c3fae4d 2614{
81952c54
TH
2615 u32 sstatus, spd, mask;
2616 int rc, highbit;
1c3fae4d 2617
936fd732 2618 if (!sata_scr_valid(link))
008a7896
TH
2619 return -EOPNOTSUPP;
2620
2621 /* If SCR can be read, use it to determine the current SPD.
936fd732 2622 * If not, use cached value in link->sata_spd.
008a7896 2623 */
936fd732 2624 rc = sata_scr_read(link, SCR_STATUS, &sstatus);
008a7896
TH
2625 if (rc == 0)
2626 spd = (sstatus >> 4) & 0xf;
2627 else
936fd732 2628 spd = link->sata_spd;
1c3fae4d 2629
936fd732 2630 mask = link->sata_spd_limit;
1c3fae4d
TH
2631 if (mask <= 1)
2632 return -EINVAL;
008a7896
TH
2633
2634 /* unconditionally mask off the highest bit */
1c3fae4d
TH
2635 highbit = fls(mask) - 1;
2636 mask &= ~(1 << highbit);
2637
008a7896
TH
2638 /* Mask off all speeds higher than or equal to the current
2639 * one. Force 1.5Gbps if current SPD is not available.
2640 */
2641 if (spd > 1)
2642 mask &= (1 << (spd - 1)) - 1;
2643 else
2644 mask &= 1;
2645
2646 /* were we already at the bottom? */
1c3fae4d
TH
2647 if (!mask)
2648 return -EINVAL;
2649
936fd732 2650 link->sata_spd_limit = mask;
1c3fae4d 2651
936fd732 2652 ata_link_printk(link, KERN_WARNING, "limiting SATA link speed to %s\n",
f15a1daf 2653 sata_spd_string(fls(mask)));
1c3fae4d
TH
2654
2655 return 0;
2656}
2657
936fd732 2658static int __sata_set_spd_needed(struct ata_link *link, u32 *scontrol)
1c3fae4d 2659{
5270222f
TH
2660 struct ata_link *host_link = &link->ap->link;
2661 u32 limit, target, spd;
1c3fae4d 2662
5270222f
TH
2663 limit = link->sata_spd_limit;
2664
2665 /* Don't configure downstream link faster than upstream link.
2666 * It doesn't speed up anything and some PMPs choke on such
2667 * configuration.
2668 */
2669 if (!ata_is_host_link(link) && host_link->sata_spd)
2670 limit &= (1 << host_link->sata_spd) - 1;
2671
2672 if (limit == UINT_MAX)
2673 target = 0;
1c3fae4d 2674 else
5270222f 2675 target = fls(limit);
1c3fae4d
TH
2676
2677 spd = (*scontrol >> 4) & 0xf;
5270222f 2678 *scontrol = (*scontrol & ~0xf0) | ((target & 0xf) << 4);
1c3fae4d 2679
5270222f 2680 return spd != target;
1c3fae4d
TH
2681}
2682
2683/**
3c567b7d 2684 * sata_set_spd_needed - is SATA spd configuration needed
936fd732 2685 * @link: Link in question
1c3fae4d
TH
2686 *
2687 * Test whether the spd limit in SControl matches
936fd732 2688 * @link->sata_spd_limit. This function is used to determine
1c3fae4d
TH
2689 * whether hardreset is necessary to apply SATA spd
2690 * configuration.
2691 *
2692 * LOCKING:
2693 * Inherited from caller.
2694 *
2695 * RETURNS:
2696 * 1 if SATA spd configuration is needed, 0 otherwise.
2697 */
936fd732 2698int sata_set_spd_needed(struct ata_link *link)
1c3fae4d
TH
2699{
2700 u32 scontrol;
2701
936fd732 2702 if (sata_scr_read(link, SCR_CONTROL, &scontrol))
db64bcf3 2703 return 1;
1c3fae4d 2704
936fd732 2705 return __sata_set_spd_needed(link, &scontrol);
1c3fae4d
TH
2706}
2707
2708/**
3c567b7d 2709 * sata_set_spd - set SATA spd according to spd limit
936fd732 2710 * @link: Link to set SATA spd for
1c3fae4d 2711 *
936fd732 2712 * Set SATA spd of @link according to sata_spd_limit.
1c3fae4d
TH
2713 *
2714 * LOCKING:
2715 * Inherited from caller.
2716 *
2717 * RETURNS:
2718 * 0 if spd doesn't need to be changed, 1 if spd has been
81952c54 2719 * changed. Negative errno if SCR registers are inaccessible.
1c3fae4d 2720 */
936fd732 2721int sata_set_spd(struct ata_link *link)
1c3fae4d
TH
2722{
2723 u32 scontrol;
81952c54 2724 int rc;
1c3fae4d 2725
936fd732 2726 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 2727 return rc;
1c3fae4d 2728
936fd732 2729 if (!__sata_set_spd_needed(link, &scontrol))
1c3fae4d
TH
2730 return 0;
2731
936fd732 2732 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54
TH
2733 return rc;
2734
1c3fae4d
TH
2735 return 1;
2736}
2737
452503f9
AC
2738/*
2739 * This mode timing computation functionality is ported over from
2740 * drivers/ide/ide-timing.h and was originally written by Vojtech Pavlik
2741 */
2742/*
b352e57d 2743 * PIO 0-4, MWDMA 0-2 and UDMA 0-6 timings (in nanoseconds).
452503f9 2744 * These were taken from ATA/ATAPI-6 standard, rev 0a, except
b352e57d
AC
2745 * for UDMA6, which is currently supported only by Maxtor drives.
2746 *
2747 * For PIO 5/6 MWDMA 3/4 see the CFA specification 3.0.
452503f9
AC
2748 */
2749
2750static const struct ata_timing ata_timing[] = {
70cd071e
TH
2751/* { XFER_PIO_SLOW, 120, 290, 240, 960, 290, 240, 960, 0 }, */
2752 { XFER_PIO_0, 70, 290, 240, 600, 165, 150, 600, 0 },
2753 { XFER_PIO_1, 50, 290, 93, 383, 125, 100, 383, 0 },
2754 { XFER_PIO_2, 30, 290, 40, 330, 100, 90, 240, 0 },
2755 { XFER_PIO_3, 30, 80, 70, 180, 80, 70, 180, 0 },
2756 { XFER_PIO_4, 25, 70, 25, 120, 70, 25, 120, 0 },
2757 { XFER_PIO_5, 15, 65, 25, 100, 65, 25, 100, 0 },
2758 { XFER_PIO_6, 10, 55, 20, 80, 55, 20, 80, 0 },
452503f9 2759
70cd071e
TH
2760 { XFER_SW_DMA_0, 120, 0, 0, 0, 480, 480, 960, 0 },
2761 { XFER_SW_DMA_1, 90, 0, 0, 0, 240, 240, 480, 0 },
2762 { XFER_SW_DMA_2, 60, 0, 0, 0, 120, 120, 240, 0 },
452503f9 2763
70cd071e
TH
2764 { XFER_MW_DMA_0, 60, 0, 0, 0, 215, 215, 480, 0 },
2765 { XFER_MW_DMA_1, 45, 0, 0, 0, 80, 50, 150, 0 },
2766 { XFER_MW_DMA_2, 25, 0, 0, 0, 70, 25, 120, 0 },
b352e57d 2767 { XFER_MW_DMA_3, 25, 0, 0, 0, 65, 25, 100, 0 },
70cd071e 2768 { XFER_MW_DMA_4, 25, 0, 0, 0, 55, 20, 80, 0 },
452503f9
AC
2769
2770/* { XFER_UDMA_SLOW, 0, 0, 0, 0, 0, 0, 0, 150 }, */
70cd071e
TH
2771 { XFER_UDMA_0, 0, 0, 0, 0, 0, 0, 0, 120 },
2772 { XFER_UDMA_1, 0, 0, 0, 0, 0, 0, 0, 80 },
2773 { XFER_UDMA_2, 0, 0, 0, 0, 0, 0, 0, 60 },
2774 { XFER_UDMA_3, 0, 0, 0, 0, 0, 0, 0, 45 },
2775 { XFER_UDMA_4, 0, 0, 0, 0, 0, 0, 0, 30 },
2776 { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 20 },
2777 { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 15 },
452503f9
AC
2778
2779 { 0xFF }
2780};
2781
2dcb407e
JG
2782#define ENOUGH(v, unit) (((v)-1)/(unit)+1)
2783#define EZ(v, unit) ((v)?ENOUGH(v, unit):0)
452503f9
AC
2784
2785static void ata_timing_quantize(const struct ata_timing *t, struct ata_timing *q, int T, int UT)
2786{
2787 q->setup = EZ(t->setup * 1000, T);
2788 q->act8b = EZ(t->act8b * 1000, T);
2789 q->rec8b = EZ(t->rec8b * 1000, T);
2790 q->cyc8b = EZ(t->cyc8b * 1000, T);
2791 q->active = EZ(t->active * 1000, T);
2792 q->recover = EZ(t->recover * 1000, T);
2793 q->cycle = EZ(t->cycle * 1000, T);
2794 q->udma = EZ(t->udma * 1000, UT);
2795}
2796
2797void ata_timing_merge(const struct ata_timing *a, const struct ata_timing *b,
2798 struct ata_timing *m, unsigned int what)
2799{
2800 if (what & ATA_TIMING_SETUP ) m->setup = max(a->setup, b->setup);
2801 if (what & ATA_TIMING_ACT8B ) m->act8b = max(a->act8b, b->act8b);
2802 if (what & ATA_TIMING_REC8B ) m->rec8b = max(a->rec8b, b->rec8b);
2803 if (what & ATA_TIMING_CYC8B ) m->cyc8b = max(a->cyc8b, b->cyc8b);
2804 if (what & ATA_TIMING_ACTIVE ) m->active = max(a->active, b->active);
2805 if (what & ATA_TIMING_RECOVER) m->recover = max(a->recover, b->recover);
2806 if (what & ATA_TIMING_CYCLE ) m->cycle = max(a->cycle, b->cycle);
2807 if (what & ATA_TIMING_UDMA ) m->udma = max(a->udma, b->udma);
2808}
2809
6357357c 2810const struct ata_timing *ata_timing_find_mode(u8 xfer_mode)
452503f9 2811{
70cd071e
TH
2812 const struct ata_timing *t = ata_timing;
2813
2814 while (xfer_mode > t->mode)
2815 t++;
452503f9 2816
70cd071e
TH
2817 if (xfer_mode == t->mode)
2818 return t;
2819 return NULL;
452503f9
AC
2820}
2821
2822int ata_timing_compute(struct ata_device *adev, unsigned short speed,
2823 struct ata_timing *t, int T, int UT)
2824{
2825 const struct ata_timing *s;
2826 struct ata_timing p;
2827
2828 /*
2e9edbf8 2829 * Find the mode.
75b1f2f8 2830 */
452503f9
AC
2831
2832 if (!(s = ata_timing_find_mode(speed)))
2833 return -EINVAL;
2834
75b1f2f8
AL
2835 memcpy(t, s, sizeof(*s));
2836
452503f9
AC
2837 /*
2838 * If the drive is an EIDE drive, it can tell us it needs extended
2839 * PIO/MW_DMA cycle timing.
2840 */
2841
2842 if (adev->id[ATA_ID_FIELD_VALID] & 2) { /* EIDE drive */
2843 memset(&p, 0, sizeof(p));
2dcb407e 2844 if (speed >= XFER_PIO_0 && speed <= XFER_SW_DMA_0) {
452503f9
AC
2845 if (speed <= XFER_PIO_2) p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO];
2846 else p.cycle = p.cyc8b = adev->id[ATA_ID_EIDE_PIO_IORDY];
2dcb407e 2847 } else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2) {
452503f9
AC
2848 p.cycle = adev->id[ATA_ID_EIDE_DMA_MIN];
2849 }
2850 ata_timing_merge(&p, t, t, ATA_TIMING_CYCLE | ATA_TIMING_CYC8B);
2851 }
2852
2853 /*
2854 * Convert the timing to bus clock counts.
2855 */
2856
75b1f2f8 2857 ata_timing_quantize(t, t, T, UT);
452503f9
AC
2858
2859 /*
c893a3ae
RD
2860 * Even in DMA/UDMA modes we still use PIO access for IDENTIFY,
2861 * S.M.A.R.T * and some other commands. We have to ensure that the
2862 * DMA cycle timing is slower/equal than the fastest PIO timing.
452503f9
AC
2863 */
2864
fd3367af 2865 if (speed > XFER_PIO_6) {
452503f9
AC
2866 ata_timing_compute(adev, adev->pio_mode, &p, T, UT);
2867 ata_timing_merge(&p, t, t, ATA_TIMING_ALL);
2868 }
2869
2870 /*
c893a3ae 2871 * Lengthen active & recovery time so that cycle time is correct.
452503f9
AC
2872 */
2873
2874 if (t->act8b + t->rec8b < t->cyc8b) {
2875 t->act8b += (t->cyc8b - (t->act8b + t->rec8b)) / 2;
2876 t->rec8b = t->cyc8b - t->act8b;
2877 }
2878
2879 if (t->active + t->recover < t->cycle) {
2880 t->active += (t->cycle - (t->active + t->recover)) / 2;
2881 t->recover = t->cycle - t->active;
2882 }
a617c09f 2883
4f701d1e
AC
2884 /* In a few cases quantisation may produce enough errors to
2885 leave t->cycle too low for the sum of active and recovery
2886 if so we must correct this */
2887 if (t->active + t->recover > t->cycle)
2888 t->cycle = t->active + t->recover;
452503f9
AC
2889
2890 return 0;
2891}
2892
cf176e1a
TH
2893/**
2894 * ata_down_xfermask_limit - adjust dev xfer masks downward
cf176e1a 2895 * @dev: Device to adjust xfer masks
458337db 2896 * @sel: ATA_DNXFER_* selector
cf176e1a
TH
2897 *
2898 * Adjust xfer masks of @dev downward. Note that this function
2899 * does not apply the change. Invoking ata_set_mode() afterwards
2900 * will apply the limit.
2901 *
2902 * LOCKING:
2903 * Inherited from caller.
2904 *
2905 * RETURNS:
2906 * 0 on success, negative errno on failure
2907 */
458337db 2908int ata_down_xfermask_limit(struct ata_device *dev, unsigned int sel)
cf176e1a 2909{
458337db 2910 char buf[32];
7dc951ae
TH
2911 unsigned long orig_mask, xfer_mask;
2912 unsigned long pio_mask, mwdma_mask, udma_mask;
458337db 2913 int quiet, highbit;
cf176e1a 2914
458337db
TH
2915 quiet = !!(sel & ATA_DNXFER_QUIET);
2916 sel &= ~ATA_DNXFER_QUIET;
cf176e1a 2917
458337db
TH
2918 xfer_mask = orig_mask = ata_pack_xfermask(dev->pio_mask,
2919 dev->mwdma_mask,
2920 dev->udma_mask);
2921 ata_unpack_xfermask(xfer_mask, &pio_mask, &mwdma_mask, &udma_mask);
cf176e1a 2922
458337db
TH
2923 switch (sel) {
2924 case ATA_DNXFER_PIO:
2925 highbit = fls(pio_mask) - 1;
2926 pio_mask &= ~(1 << highbit);
2927 break;
2928
2929 case ATA_DNXFER_DMA:
2930 if (udma_mask) {
2931 highbit = fls(udma_mask) - 1;
2932 udma_mask &= ~(1 << highbit);
2933 if (!udma_mask)
2934 return -ENOENT;
2935 } else if (mwdma_mask) {
2936 highbit = fls(mwdma_mask) - 1;
2937 mwdma_mask &= ~(1 << highbit);
2938 if (!mwdma_mask)
2939 return -ENOENT;
2940 }
2941 break;
2942
2943 case ATA_DNXFER_40C:
2944 udma_mask &= ATA_UDMA_MASK_40C;
2945 break;
2946
2947 case ATA_DNXFER_FORCE_PIO0:
2948 pio_mask &= 1;
2949 case ATA_DNXFER_FORCE_PIO:
2950 mwdma_mask = 0;
2951 udma_mask = 0;
2952 break;
2953
458337db
TH
2954 default:
2955 BUG();
2956 }
2957
2958 xfer_mask &= ata_pack_xfermask(pio_mask, mwdma_mask, udma_mask);
2959
2960 if (!(xfer_mask & ATA_MASK_PIO) || xfer_mask == orig_mask)
2961 return -ENOENT;
2962
2963 if (!quiet) {
2964 if (xfer_mask & (ATA_MASK_MWDMA | ATA_MASK_UDMA))
2965 snprintf(buf, sizeof(buf), "%s:%s",
2966 ata_mode_string(xfer_mask),
2967 ata_mode_string(xfer_mask & ATA_MASK_PIO));
2968 else
2969 snprintf(buf, sizeof(buf), "%s",
2970 ata_mode_string(xfer_mask));
2971
2972 ata_dev_printk(dev, KERN_WARNING,
2973 "limiting speed to %s\n", buf);
2974 }
cf176e1a
TH
2975
2976 ata_unpack_xfermask(xfer_mask, &dev->pio_mask, &dev->mwdma_mask,
2977 &dev->udma_mask);
2978
cf176e1a 2979 return 0;
cf176e1a
TH
2980}
2981
3373efd8 2982static int ata_dev_set_mode(struct ata_device *dev)
1da177e4 2983{
9af5c9c9 2984 struct ata_eh_context *ehc = &dev->link->eh_context;
83206a29
TH
2985 unsigned int err_mask;
2986 int rc;
1da177e4 2987
e8384607 2988 dev->flags &= ~ATA_DFLAG_PIO;
1da177e4
LT
2989 if (dev->xfer_shift == ATA_SHIFT_PIO)
2990 dev->flags |= ATA_DFLAG_PIO;
2991
3373efd8 2992 err_mask = ata_dev_set_xfermode(dev);
2dcb407e 2993
11750a40
AC
2994 /* Old CFA may refuse this command, which is just fine */
2995 if (dev->xfer_shift == ATA_SHIFT_PIO && ata_id_is_cfa(dev->id))
2dcb407e
JG
2996 err_mask &= ~AC_ERR_DEV;
2997
0bc2a79a
AC
2998 /* Some very old devices and some bad newer ones fail any kind of
2999 SET_XFERMODE request but support PIO0-2 timings and no IORDY */
3000 if (dev->xfer_shift == ATA_SHIFT_PIO && !ata_id_has_iordy(dev->id) &&
3001 dev->pio_mode <= XFER_PIO_2)
3002 err_mask &= ~AC_ERR_DEV;
2dcb407e 3003
3acaf94b
AC
3004 /* Early MWDMA devices do DMA but don't allow DMA mode setting.
3005 Don't fail an MWDMA0 set IFF the device indicates it is in MWDMA0 */
3006 if (dev->xfer_shift == ATA_SHIFT_MWDMA &&
3007 dev->dma_mode == XFER_MW_DMA_0 &&
3008 (dev->id[63] >> 8) & 1)
3009 err_mask &= ~AC_ERR_DEV;
3010
83206a29 3011 if (err_mask) {
f15a1daf
TH
3012 ata_dev_printk(dev, KERN_ERR, "failed to set xfermode "
3013 "(err_mask=0x%x)\n", err_mask);
83206a29
TH
3014 return -EIO;
3015 }
1da177e4 3016
baa1e78a 3017 ehc->i.flags |= ATA_EHI_POST_SETMODE;
422c9daa 3018 rc = ata_dev_revalidate(dev, ATA_DEV_UNKNOWN, 0);
baa1e78a 3019 ehc->i.flags &= ~ATA_EHI_POST_SETMODE;
5eb45c02 3020 if (rc)
83206a29 3021 return rc;
48a8a14f 3022
23e71c3d
TH
3023 DPRINTK("xfer_shift=%u, xfer_mode=0x%x\n",
3024 dev->xfer_shift, (int)dev->xfer_mode);
1da177e4 3025
f15a1daf
TH
3026 ata_dev_printk(dev, KERN_INFO, "configured for %s\n",
3027 ata_mode_string(ata_xfer_mode2mask(dev->xfer_mode)));
83206a29 3028 return 0;
1da177e4
LT
3029}
3030
1da177e4 3031/**
04351821 3032 * ata_do_set_mode - Program timings and issue SET FEATURES - XFER
0260731f 3033 * @link: link on which timings will be programmed
e82cbdb9 3034 * @r_failed_dev: out paramter for failed device
1da177e4 3035 *
04351821
AC
3036 * Standard implementation of the function used to tune and set
3037 * ATA device disk transfer mode (PIO3, UDMA6, etc.). If
3038 * ata_dev_set_mode() fails, pointer to the failing device is
e82cbdb9 3039 * returned in @r_failed_dev.
780a87f7 3040 *
1da177e4 3041 * LOCKING:
0cba632b 3042 * PCI/etc. bus probe sem.
e82cbdb9
TH
3043 *
3044 * RETURNS:
3045 * 0 on success, negative errno otherwise
1da177e4 3046 */
04351821 3047
0260731f 3048int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev)
1da177e4 3049{
0260731f 3050 struct ata_port *ap = link->ap;
e8e0619f 3051 struct ata_device *dev;
f58229f8 3052 int rc = 0, used_dma = 0, found = 0;
3adcebb2 3053
a6d5a51c 3054 /* step 1: calculate xfer_mask */
f58229f8 3055 ata_link_for_each_dev(dev, link) {
7dc951ae 3056 unsigned long pio_mask, dma_mask;
b3a70601 3057 unsigned int mode_mask;
a6d5a51c 3058
e1211e3f 3059 if (!ata_dev_enabled(dev))
a6d5a51c
TH
3060 continue;
3061
b3a70601
AC
3062 mode_mask = ATA_DMA_MASK_ATA;
3063 if (dev->class == ATA_DEV_ATAPI)
3064 mode_mask = ATA_DMA_MASK_ATAPI;
3065 else if (ata_id_is_cfa(dev->id))
3066 mode_mask = ATA_DMA_MASK_CFA;
3067
3373efd8 3068 ata_dev_xfermask(dev);
1da177e4 3069
acf356b1
TH
3070 pio_mask = ata_pack_xfermask(dev->pio_mask, 0, 0);
3071 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
b3a70601
AC
3072
3073 if (libata_dma_mask & mode_mask)
3074 dma_mask = ata_pack_xfermask(0, dev->mwdma_mask, dev->udma_mask);
3075 else
3076 dma_mask = 0;
3077
acf356b1
TH
3078 dev->pio_mode = ata_xfer_mask2mode(pio_mask);
3079 dev->dma_mode = ata_xfer_mask2mode(dma_mask);
5444a6f4 3080
4f65977d 3081 found = 1;
70cd071e 3082 if (dev->dma_mode != 0xff)
5444a6f4 3083 used_dma = 1;
a6d5a51c 3084 }
4f65977d 3085 if (!found)
e82cbdb9 3086 goto out;
a6d5a51c
TH
3087
3088 /* step 2: always set host PIO timings */
f58229f8 3089 ata_link_for_each_dev(dev, link) {
e8e0619f
TH
3090 if (!ata_dev_enabled(dev))
3091 continue;
3092
70cd071e 3093 if (dev->pio_mode == 0xff) {
f15a1daf 3094 ata_dev_printk(dev, KERN_WARNING, "no PIO support\n");
e8e0619f 3095 rc = -EINVAL;
e82cbdb9 3096 goto out;
e8e0619f
TH
3097 }
3098
3099 dev->xfer_mode = dev->pio_mode;
3100 dev->xfer_shift = ATA_SHIFT_PIO;
3101 if (ap->ops->set_piomode)
3102 ap->ops->set_piomode(ap, dev);
3103 }
1da177e4 3104
a6d5a51c 3105 /* step 3: set host DMA timings */
f58229f8 3106 ata_link_for_each_dev(dev, link) {
70cd071e 3107 if (!ata_dev_enabled(dev) || dev->dma_mode == 0xff)
e8e0619f
TH
3108 continue;
3109
3110 dev->xfer_mode = dev->dma_mode;
3111 dev->xfer_shift = ata_xfer_mode2shift(dev->dma_mode);
3112 if (ap->ops->set_dmamode)
3113 ap->ops->set_dmamode(ap, dev);
3114 }
1da177e4
LT
3115
3116 /* step 4: update devices' xfer mode */
f58229f8 3117 ata_link_for_each_dev(dev, link) {
18d90deb 3118 /* don't update suspended devices' xfer mode */
9666f400 3119 if (!ata_dev_enabled(dev))
83206a29
TH
3120 continue;
3121
3373efd8 3122 rc = ata_dev_set_mode(dev);
5bbc53f4 3123 if (rc)
e82cbdb9 3124 goto out;
83206a29 3125 }
1da177e4 3126
e8e0619f
TH
3127 /* Record simplex status. If we selected DMA then the other
3128 * host channels are not permitted to do so.
5444a6f4 3129 */
cca3974e 3130 if (used_dma && (ap->host->flags & ATA_HOST_SIMPLEX))
032af1ce 3131 ap->host->simplex_claimed = ap;
5444a6f4 3132
e82cbdb9
TH
3133 out:
3134 if (rc)
3135 *r_failed_dev = dev;
3136 return rc;
1da177e4
LT
3137}
3138
1fdffbce
JG
3139/**
3140 * ata_tf_to_host - issue ATA taskfile to host controller
3141 * @ap: port to which command is being issued
3142 * @tf: ATA taskfile register set
3143 *
3144 * Issues ATA taskfile register set to ATA host controller,
3145 * with proper synchronization with interrupt handler and
3146 * other threads.
3147 *
3148 * LOCKING:
cca3974e 3149 * spin_lock_irqsave(host lock)
1fdffbce
JG
3150 */
3151
3152static inline void ata_tf_to_host(struct ata_port *ap,
3153 const struct ata_taskfile *tf)
3154{
3155 ap->ops->tf_load(ap, tf);
3156 ap->ops->exec_command(ap, tf);
3157}
3158
1da177e4
LT
3159/**
3160 * ata_busy_sleep - sleep until BSY clears, or timeout
3161 * @ap: port containing status register to be polled
3162 * @tmout_pat: impatience timeout
3163 * @tmout: overall timeout
3164 *
780a87f7
JG
3165 * Sleep until ATA Status register bit BSY clears,
3166 * or a timeout occurs.
3167 *
d1adc1bb
TH
3168 * LOCKING:
3169 * Kernel thread context (may sleep).
3170 *
3171 * RETURNS:
3172 * 0 on success, -errno otherwise.
1da177e4 3173 */
d1adc1bb
TH
3174int ata_busy_sleep(struct ata_port *ap,
3175 unsigned long tmout_pat, unsigned long tmout)
1da177e4
LT
3176{
3177 unsigned long timer_start, timeout;
3178 u8 status;
3179
3180 status = ata_busy_wait(ap, ATA_BUSY, 300);
3181 timer_start = jiffies;
3182 timeout = timer_start + tmout_pat;
d1adc1bb
TH
3183 while (status != 0xff && (status & ATA_BUSY) &&
3184 time_before(jiffies, timeout)) {
1da177e4
LT
3185 msleep(50);
3186 status = ata_busy_wait(ap, ATA_BUSY, 3);
3187 }
3188
d1adc1bb 3189 if (status != 0xff && (status & ATA_BUSY))
f15a1daf 3190 ata_port_printk(ap, KERN_WARNING,
35aa7a43
JG
3191 "port is slow to respond, please be patient "
3192 "(Status 0x%x)\n", status);
1da177e4
LT
3193
3194 timeout = timer_start + tmout;
d1adc1bb
TH
3195 while (status != 0xff && (status & ATA_BUSY) &&
3196 time_before(jiffies, timeout)) {
1da177e4
LT
3197 msleep(50);
3198 status = ata_chk_status(ap);
3199 }
3200
d1adc1bb
TH
3201 if (status == 0xff)
3202 return -ENODEV;
3203
1da177e4 3204 if (status & ATA_BUSY) {
f15a1daf 3205 ata_port_printk(ap, KERN_ERR, "port failed to respond "
35aa7a43
JG
3206 "(%lu secs, Status 0x%x)\n",
3207 tmout / HZ, status);
d1adc1bb 3208 return -EBUSY;
1da177e4
LT
3209 }
3210
3211 return 0;
3212}
3213
88ff6eaf
TH
3214/**
3215 * ata_wait_after_reset - wait before checking status after reset
3216 * @ap: port containing status register to be polled
3217 * @deadline: deadline jiffies for the operation
3218 *
3219 * After reset, we need to pause a while before reading status.
3220 * Also, certain combination of controller and device report 0xff
3221 * for some duration (e.g. until SATA PHY is up and running)
3222 * which is interpreted as empty port in ATA world. This
3223 * function also waits for such devices to get out of 0xff
3224 * status.
3225 *
3226 * LOCKING:
3227 * Kernel thread context (may sleep).
3228 */
3229void ata_wait_after_reset(struct ata_port *ap, unsigned long deadline)
3230{
3231 unsigned long until = jiffies + ATA_TMOUT_FF_WAIT;
3232
3233 if (time_before(until, deadline))
3234 deadline = until;
3235
3236 /* Spec mandates ">= 2ms" before checking status. We wait
3237 * 150ms, because that was the magic delay used for ATAPI
3238 * devices in Hale Landis's ATADRVR, for the period of time
3239 * between when the ATA command register is written, and then
3240 * status is checked. Because waiting for "a while" before
3241 * checking status is fine, post SRST, we perform this magic
3242 * delay here as well.
3243 *
3244 * Old drivers/ide uses the 2mS rule and then waits for ready.
3245 */
3246 msleep(150);
3247
3248 /* Wait for 0xff to clear. Some SATA devices take a long time
3249 * to clear 0xff after reset. For example, HHD424020F7SV00
3250 * iVDR needs >= 800ms while. Quantum GoVault needs even more
3251 * than that.
1974e201
TH
3252 *
3253 * Note that some PATA controllers (pata_ali) explode if
3254 * status register is read more than once when there's no
3255 * device attached.
88ff6eaf 3256 */
1974e201
TH
3257 if (ap->flags & ATA_FLAG_SATA) {
3258 while (1) {
3259 u8 status = ata_chk_status(ap);
88ff6eaf 3260
1974e201
TH
3261 if (status != 0xff || time_after(jiffies, deadline))
3262 return;
88ff6eaf 3263
1974e201
TH
3264 msleep(50);
3265 }
88ff6eaf
TH
3266 }
3267}
3268
d4b2bab4
TH
3269/**
3270 * ata_wait_ready - sleep until BSY clears, or timeout
3271 * @ap: port containing status register to be polled
3272 * @deadline: deadline jiffies for the operation
3273 *
3274 * Sleep until ATA Status register bit BSY clears, or timeout
3275 * occurs.
3276 *
3277 * LOCKING:
3278 * Kernel thread context (may sleep).
3279 *
3280 * RETURNS:
3281 * 0 on success, -errno otherwise.
3282 */
3283int ata_wait_ready(struct ata_port *ap, unsigned long deadline)
3284{
3285 unsigned long start = jiffies;
3286 int warned = 0;
3287
3288 while (1) {
3289 u8 status = ata_chk_status(ap);
3290 unsigned long now = jiffies;
3291
3292 if (!(status & ATA_BUSY))
3293 return 0;
936fd732 3294 if (!ata_link_online(&ap->link) && status == 0xff)
d4b2bab4
TH
3295 return -ENODEV;
3296 if (time_after(now, deadline))
3297 return -EBUSY;
3298
3299 if (!warned && time_after(now, start + 5 * HZ) &&
3300 (deadline - now > 3 * HZ)) {
3301 ata_port_printk(ap, KERN_WARNING,
3302 "port is slow to respond, please be patient "
3303 "(Status 0x%x)\n", status);
3304 warned = 1;
3305 }
3306
3307 msleep(50);
3308 }
3309}
3310
3311static int ata_bus_post_reset(struct ata_port *ap, unsigned int devmask,
3312 unsigned long deadline)
1da177e4
LT
3313{
3314 struct ata_ioports *ioaddr = &ap->ioaddr;
3315 unsigned int dev0 = devmask & (1 << 0);
3316 unsigned int dev1 = devmask & (1 << 1);
9b89391c 3317 int rc, ret = 0;
1da177e4
LT
3318
3319 /* if device 0 was found in ata_devchk, wait for its
3320 * BSY bit to clear
3321 */
d4b2bab4
TH
3322 if (dev0) {
3323 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3324 if (rc) {
3325 if (rc != -ENODEV)
3326 return rc;
3327 ret = rc;
3328 }
d4b2bab4 3329 }
1da177e4 3330
e141d999
TH
3331 /* if device 1 was found in ata_devchk, wait for register
3332 * access briefly, then wait for BSY to clear.
1da177e4 3333 */
e141d999
TH
3334 if (dev1) {
3335 int i;
1da177e4
LT
3336
3337 ap->ops->dev_select(ap, 1);
e141d999
TH
3338
3339 /* Wait for register access. Some ATAPI devices fail
3340 * to set nsect/lbal after reset, so don't waste too
3341 * much time on it. We're gonna wait for !BSY anyway.
3342 */
3343 for (i = 0; i < 2; i++) {
3344 u8 nsect, lbal;
3345
3346 nsect = ioread8(ioaddr->nsect_addr);
3347 lbal = ioread8(ioaddr->lbal_addr);
3348 if ((nsect == 1) && (lbal == 1))
3349 break;
3350 msleep(50); /* give drive a breather */
3351 }
3352
d4b2bab4 3353 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3354 if (rc) {
3355 if (rc != -ENODEV)
3356 return rc;
3357 ret = rc;
3358 }
d4b2bab4 3359 }
1da177e4
LT
3360
3361 /* is all this really necessary? */
3362 ap->ops->dev_select(ap, 0);
3363 if (dev1)
3364 ap->ops->dev_select(ap, 1);
3365 if (dev0)
3366 ap->ops->dev_select(ap, 0);
d4b2bab4 3367
9b89391c 3368 return ret;
1da177e4
LT
3369}
3370
d4b2bab4
TH
3371static int ata_bus_softreset(struct ata_port *ap, unsigned int devmask,
3372 unsigned long deadline)
1da177e4
LT
3373{
3374 struct ata_ioports *ioaddr = &ap->ioaddr;
3375
44877b4e 3376 DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
1da177e4
LT
3377
3378 /* software reset. causes dev0 to be selected */
0d5ff566
TH
3379 iowrite8(ap->ctl, ioaddr->ctl_addr);
3380 udelay(20); /* FIXME: flush */
3381 iowrite8(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
3382 udelay(20); /* FIXME: flush */
3383 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4 3384
88ff6eaf
TH
3385 /* wait a while before checking status */
3386 ata_wait_after_reset(ap, deadline);
1da177e4 3387
2e9edbf8 3388 /* Before we perform post reset processing we want to see if
298a41ca
TH
3389 * the bus shows 0xFF because the odd clown forgets the D7
3390 * pulldown resistor.
3391 */
150981b0 3392 if (ata_chk_status(ap) == 0xFF)
9b89391c 3393 return -ENODEV;
09c7ad79 3394
d4b2bab4 3395 return ata_bus_post_reset(ap, devmask, deadline);
1da177e4
LT
3396}
3397
3398/**
3399 * ata_bus_reset - reset host port and associated ATA channel
3400 * @ap: port to reset
3401 *
3402 * This is typically the first time we actually start issuing
3403 * commands to the ATA channel. We wait for BSY to clear, then
3404 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
3405 * result. Determine what devices, if any, are on the channel
3406 * by looking at the device 0/1 error register. Look at the signature
3407 * stored in each device's taskfile registers, to determine if
3408 * the device is ATA or ATAPI.
3409 *
3410 * LOCKING:
0cba632b 3411 * PCI/etc. bus probe sem.
cca3974e 3412 * Obtains host lock.
1da177e4
LT
3413 *
3414 * SIDE EFFECTS:
198e0fed 3415 * Sets ATA_FLAG_DISABLED if bus reset fails.
1da177e4
LT
3416 */
3417
3418void ata_bus_reset(struct ata_port *ap)
3419{
9af5c9c9 3420 struct ata_device *device = ap->link.device;
1da177e4
LT
3421 struct ata_ioports *ioaddr = &ap->ioaddr;
3422 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
3423 u8 err;
aec5c3c1 3424 unsigned int dev0, dev1 = 0, devmask = 0;
9b89391c 3425 int rc;
1da177e4 3426
44877b4e 3427 DPRINTK("ENTER, host %u, port %u\n", ap->print_id, ap->port_no);
1da177e4
LT
3428
3429 /* determine if device 0/1 are present */
3430 if (ap->flags & ATA_FLAG_SATA_RESET)
3431 dev0 = 1;
3432 else {
3433 dev0 = ata_devchk(ap, 0);
3434 if (slave_possible)
3435 dev1 = ata_devchk(ap, 1);
3436 }
3437
3438 if (dev0)
3439 devmask |= (1 << 0);
3440 if (dev1)
3441 devmask |= (1 << 1);
3442
3443 /* select device 0 again */
3444 ap->ops->dev_select(ap, 0);
3445
3446 /* issue bus reset */
9b89391c
TH
3447 if (ap->flags & ATA_FLAG_SRST) {
3448 rc = ata_bus_softreset(ap, devmask, jiffies + 40 * HZ);
3449 if (rc && rc != -ENODEV)
aec5c3c1 3450 goto err_out;
9b89391c 3451 }
1da177e4
LT
3452
3453 /*
3454 * determine by signature whether we have ATA or ATAPI devices
3455 */
3f19859e 3456 device[0].class = ata_dev_try_classify(&device[0], dev0, &err);
1da177e4 3457 if ((slave_possible) && (err != 0x81))
3f19859e 3458 device[1].class = ata_dev_try_classify(&device[1], dev1, &err);
1da177e4 3459
1da177e4 3460 /* is double-select really necessary? */
9af5c9c9 3461 if (device[1].class != ATA_DEV_NONE)
1da177e4 3462 ap->ops->dev_select(ap, 1);
9af5c9c9 3463 if (device[0].class != ATA_DEV_NONE)
1da177e4
LT
3464 ap->ops->dev_select(ap, 0);
3465
3466 /* if no devices were detected, disable this port */
9af5c9c9
TH
3467 if ((device[0].class == ATA_DEV_NONE) &&
3468 (device[1].class == ATA_DEV_NONE))
1da177e4
LT
3469 goto err_out;
3470
3471 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
3472 /* set up device control for ATA_FLAG_SATA_RESET */
0d5ff566 3473 iowrite8(ap->ctl, ioaddr->ctl_addr);
1da177e4
LT
3474 }
3475
3476 DPRINTK("EXIT\n");
3477 return;
3478
3479err_out:
f15a1daf 3480 ata_port_printk(ap, KERN_ERR, "disabling port\n");
ac8869d5 3481 ata_port_disable(ap);
1da177e4
LT
3482
3483 DPRINTK("EXIT\n");
3484}
3485
d7bb4cc7 3486/**
936fd732
TH
3487 * sata_link_debounce - debounce SATA phy status
3488 * @link: ATA link to debounce SATA phy status for
d7bb4cc7 3489 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3490 * @deadline: deadline jiffies for the operation
d7bb4cc7 3491 *
936fd732 3492* Make sure SStatus of @link reaches stable state, determined by
d7bb4cc7
TH
3493 * holding the same value where DET is not 1 for @duration polled
3494 * every @interval, before @timeout. Timeout constraints the
d4b2bab4
TH
3495 * beginning of the stable state. Because DET gets stuck at 1 on
3496 * some controllers after hot unplugging, this functions waits
d7bb4cc7
TH
3497 * until timeout then returns 0 if DET is stable at 1.
3498 *
d4b2bab4
TH
3499 * @timeout is further limited by @deadline. The sooner of the
3500 * two is used.
3501 *
d7bb4cc7
TH
3502 * LOCKING:
3503 * Kernel thread context (may sleep)
3504 *
3505 * RETURNS:
3506 * 0 on success, -errno on failure.
3507 */
936fd732
TH
3508int sata_link_debounce(struct ata_link *link, const unsigned long *params,
3509 unsigned long deadline)
7a7921e8 3510{
d7bb4cc7 3511 unsigned long interval_msec = params[0];
d4b2bab4
TH
3512 unsigned long duration = msecs_to_jiffies(params[1]);
3513 unsigned long last_jiffies, t;
d7bb4cc7
TH
3514 u32 last, cur;
3515 int rc;
3516
d4b2bab4
TH
3517 t = jiffies + msecs_to_jiffies(params[2]);
3518 if (time_before(t, deadline))
3519 deadline = t;
3520
936fd732 3521 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3522 return rc;
3523 cur &= 0xf;
3524
3525 last = cur;
3526 last_jiffies = jiffies;
3527
3528 while (1) {
3529 msleep(interval_msec);
936fd732 3530 if ((rc = sata_scr_read(link, SCR_STATUS, &cur)))
d7bb4cc7
TH
3531 return rc;
3532 cur &= 0xf;
3533
3534 /* DET stable? */
3535 if (cur == last) {
d4b2bab4 3536 if (cur == 1 && time_before(jiffies, deadline))
d7bb4cc7
TH
3537 continue;
3538 if (time_after(jiffies, last_jiffies + duration))
3539 return 0;
3540 continue;
3541 }
3542
3543 /* unstable, start over */
3544 last = cur;
3545 last_jiffies = jiffies;
3546
f1545154
TH
3547 /* Check deadline. If debouncing failed, return
3548 * -EPIPE to tell upper layer to lower link speed.
3549 */
d4b2bab4 3550 if (time_after(jiffies, deadline))
f1545154 3551 return -EPIPE;
d7bb4cc7
TH
3552 }
3553}
3554
3555/**
936fd732
TH
3556 * sata_link_resume - resume SATA link
3557 * @link: ATA link to resume SATA
d7bb4cc7 3558 * @params: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3559 * @deadline: deadline jiffies for the operation
d7bb4cc7 3560 *
936fd732 3561 * Resume SATA phy @link and debounce it.
d7bb4cc7
TH
3562 *
3563 * LOCKING:
3564 * Kernel thread context (may sleep)
3565 *
3566 * RETURNS:
3567 * 0 on success, -errno on failure.
3568 */
936fd732
TH
3569int sata_link_resume(struct ata_link *link, const unsigned long *params,
3570 unsigned long deadline)
d7bb4cc7
TH
3571{
3572 u32 scontrol;
81952c54
TH
3573 int rc;
3574
936fd732 3575 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
81952c54 3576 return rc;
7a7921e8 3577
852ee16a 3578 scontrol = (scontrol & 0x0f0) | 0x300;
81952c54 3579
936fd732 3580 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
81952c54 3581 return rc;
7a7921e8 3582
d7bb4cc7
TH
3583 /* Some PHYs react badly if SStatus is pounded immediately
3584 * after resuming. Delay 200ms before debouncing.
3585 */
3586 msleep(200);
7a7921e8 3587
936fd732 3588 return sata_link_debounce(link, params, deadline);
7a7921e8
TH
3589}
3590
f5914a46
TH
3591/**
3592 * ata_std_prereset - prepare for reset
cc0680a5 3593 * @link: ATA link to be reset
d4b2bab4 3594 * @deadline: deadline jiffies for the operation
f5914a46 3595 *
cc0680a5 3596 * @link is about to be reset. Initialize it. Failure from
b8cffc6a
TH
3597 * prereset makes libata abort whole reset sequence and give up
3598 * that port, so prereset should be best-effort. It does its
3599 * best to prepare for reset sequence but if things go wrong, it
3600 * should just whine, not fail.
f5914a46
TH
3601 *
3602 * LOCKING:
3603 * Kernel thread context (may sleep)
3604 *
3605 * RETURNS:
3606 * 0 on success, -errno otherwise.
3607 */
cc0680a5 3608int ata_std_prereset(struct ata_link *link, unsigned long deadline)
f5914a46 3609{
cc0680a5 3610 struct ata_port *ap = link->ap;
936fd732 3611 struct ata_eh_context *ehc = &link->eh_context;
e9c83914 3612 const unsigned long *timing = sata_ehc_deb_timing(ehc);
f5914a46
TH
3613 int rc;
3614
31daabda 3615 /* handle link resume */
28324304 3616 if ((ehc->i.flags & ATA_EHI_RESUME_LINK) &&
0c88758b 3617 (link->flags & ATA_LFLAG_HRST_TO_RESUME))
28324304
TH
3618 ehc->i.action |= ATA_EH_HARDRESET;
3619
633273a3
TH
3620 /* Some PMPs don't work with only SRST, force hardreset if PMP
3621 * is supported.
3622 */
3623 if (ap->flags & ATA_FLAG_PMP)
3624 ehc->i.action |= ATA_EH_HARDRESET;
3625
f5914a46
TH
3626 /* if we're about to do hardreset, nothing more to do */
3627 if (ehc->i.action & ATA_EH_HARDRESET)
3628 return 0;
3629
936fd732 3630 /* if SATA, resume link */
a16abc0b 3631 if (ap->flags & ATA_FLAG_SATA) {
936fd732 3632 rc = sata_link_resume(link, timing, deadline);
b8cffc6a
TH
3633 /* whine about phy resume failure but proceed */
3634 if (rc && rc != -EOPNOTSUPP)
cc0680a5 3635 ata_link_printk(link, KERN_WARNING, "failed to resume "
f5914a46 3636 "link for reset (errno=%d)\n", rc);
f5914a46
TH
3637 }
3638
3639 /* Wait for !BSY if the controller can wait for the first D2H
3640 * Reg FIS and we don't know that no device is attached.
3641 */
0c88758b 3642 if (!(link->flags & ATA_LFLAG_SKIP_D2H_BSY) && !ata_link_offline(link)) {
b8cffc6a 3643 rc = ata_wait_ready(ap, deadline);
6dffaf61 3644 if (rc && rc != -ENODEV) {
cc0680a5 3645 ata_link_printk(link, KERN_WARNING, "device not ready "
b8cffc6a
TH
3646 "(errno=%d), forcing hardreset\n", rc);
3647 ehc->i.action |= ATA_EH_HARDRESET;
3648 }
3649 }
f5914a46
TH
3650
3651 return 0;
3652}
3653
c2bd5804
TH
3654/**
3655 * ata_std_softreset - reset host port via ATA SRST
cc0680a5 3656 * @link: ATA link to reset
c2bd5804 3657 * @classes: resulting classes of attached devices
d4b2bab4 3658 * @deadline: deadline jiffies for the operation
c2bd5804 3659 *
52783c5d 3660 * Reset host port using ATA SRST.
c2bd5804
TH
3661 *
3662 * LOCKING:
3663 * Kernel thread context (may sleep)
3664 *
3665 * RETURNS:
3666 * 0 on success, -errno otherwise.
3667 */
cc0680a5 3668int ata_std_softreset(struct ata_link *link, unsigned int *classes,
d4b2bab4 3669 unsigned long deadline)
c2bd5804 3670{
cc0680a5 3671 struct ata_port *ap = link->ap;
c2bd5804 3672 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
d4b2bab4
TH
3673 unsigned int devmask = 0;
3674 int rc;
c2bd5804
TH
3675 u8 err;
3676
3677 DPRINTK("ENTER\n");
3678
936fd732 3679 if (ata_link_offline(link)) {
3a39746a
TH
3680 classes[0] = ATA_DEV_NONE;
3681 goto out;
3682 }
3683
c2bd5804
TH
3684 /* determine if device 0/1 are present */
3685 if (ata_devchk(ap, 0))
3686 devmask |= (1 << 0);
3687 if (slave_possible && ata_devchk(ap, 1))
3688 devmask |= (1 << 1);
3689
c2bd5804
TH
3690 /* select device 0 again */
3691 ap->ops->dev_select(ap, 0);
3692
3693 /* issue bus reset */
3694 DPRINTK("about to softreset, devmask=%x\n", devmask);
d4b2bab4 3695 rc = ata_bus_softreset(ap, devmask, deadline);
9b89391c 3696 /* if link is occupied, -ENODEV too is an error */
936fd732 3697 if (rc && (rc != -ENODEV || sata_scr_valid(link))) {
cc0680a5 3698 ata_link_printk(link, KERN_ERR, "SRST failed (errno=%d)\n", rc);
d4b2bab4 3699 return rc;
c2bd5804
TH
3700 }
3701
3702 /* determine by signature whether we have ATA or ATAPI devices */
3f19859e
TH
3703 classes[0] = ata_dev_try_classify(&link->device[0],
3704 devmask & (1 << 0), &err);
c2bd5804 3705 if (slave_possible && err != 0x81)
3f19859e
TH
3706 classes[1] = ata_dev_try_classify(&link->device[1],
3707 devmask & (1 << 1), &err);
c2bd5804 3708
3a39746a 3709 out:
c2bd5804
TH
3710 DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
3711 return 0;
3712}
3713
3714/**
cc0680a5
TH
3715 * sata_link_hardreset - reset link via SATA phy reset
3716 * @link: link to reset
b6103f6d 3717 * @timing: timing parameters { interval, duratinon, timeout } in msec
d4b2bab4 3718 * @deadline: deadline jiffies for the operation
c2bd5804 3719 *
cc0680a5 3720 * SATA phy-reset @link using DET bits of SControl register.
c2bd5804
TH
3721 *
3722 * LOCKING:
3723 * Kernel thread context (may sleep)
3724 *
3725 * RETURNS:
3726 * 0 on success, -errno otherwise.
3727 */
cc0680a5 3728int sata_link_hardreset(struct ata_link *link, const unsigned long *timing,
d4b2bab4 3729 unsigned long deadline)
c2bd5804 3730{
852ee16a 3731 u32 scontrol;
81952c54 3732 int rc;
852ee16a 3733
c2bd5804
TH
3734 DPRINTK("ENTER\n");
3735
936fd732 3736 if (sata_set_spd_needed(link)) {
1c3fae4d
TH
3737 /* SATA spec says nothing about how to reconfigure
3738 * spd. To be on the safe side, turn off phy during
3739 * reconfiguration. This works for at least ICH7 AHCI
3740 * and Sil3124.
3741 */
936fd732 3742 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3743 goto out;
81952c54 3744
a34b6fc0 3745 scontrol = (scontrol & 0x0f0) | 0x304;
81952c54 3746
936fd732 3747 if ((rc = sata_scr_write(link, SCR_CONTROL, scontrol)))
b6103f6d 3748 goto out;
1c3fae4d 3749
936fd732 3750 sata_set_spd(link);
1c3fae4d
TH
3751 }
3752
3753 /* issue phy wake/reset */
936fd732 3754 if ((rc = sata_scr_read(link, SCR_CONTROL, &scontrol)))
b6103f6d 3755 goto out;
81952c54 3756
852ee16a 3757 scontrol = (scontrol & 0x0f0) | 0x301;
81952c54 3758
936fd732 3759 if ((rc = sata_scr_write_flush(link, SCR_CONTROL, scontrol)))
b6103f6d 3760 goto out;
c2bd5804 3761
1c3fae4d 3762 /* Couldn't find anything in SATA I/II specs, but AHCI-1.1
c2bd5804
TH
3763 * 10.4.2 says at least 1 ms.
3764 */
3765 msleep(1);
3766
936fd732
TH
3767 /* bring link back */
3768 rc = sata_link_resume(link, timing, deadline);
b6103f6d
TH
3769 out:
3770 DPRINTK("EXIT, rc=%d\n", rc);
3771 return rc;
3772}
3773
3774/**
3775 * sata_std_hardreset - reset host port via SATA phy reset
cc0680a5 3776 * @link: link to reset
b6103f6d 3777 * @class: resulting class of attached device
d4b2bab4 3778 * @deadline: deadline jiffies for the operation
b6103f6d
TH
3779 *
3780 * SATA phy-reset host port using DET bits of SControl register,
3781 * wait for !BSY and classify the attached device.
3782 *
3783 * LOCKING:
3784 * Kernel thread context (may sleep)
3785 *
3786 * RETURNS:
3787 * 0 on success, -errno otherwise.
3788 */
cc0680a5 3789int sata_std_hardreset(struct ata_link *link, unsigned int *class,
d4b2bab4 3790 unsigned long deadline)
b6103f6d 3791{
cc0680a5 3792 struct ata_port *ap = link->ap;
936fd732 3793 const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
b6103f6d
TH
3794 int rc;
3795
3796 DPRINTK("ENTER\n");
3797
3798 /* do hardreset */
cc0680a5 3799 rc = sata_link_hardreset(link, timing, deadline);
b6103f6d 3800 if (rc) {
cc0680a5 3801 ata_link_printk(link, KERN_ERR,
b6103f6d
TH
3802 "COMRESET failed (errno=%d)\n", rc);
3803 return rc;
3804 }
c2bd5804 3805
c2bd5804 3806 /* TODO: phy layer with polling, timeouts, etc. */
936fd732 3807 if (ata_link_offline(link)) {
c2bd5804
TH
3808 *class = ATA_DEV_NONE;
3809 DPRINTK("EXIT, link offline\n");
3810 return 0;
3811 }
3812
88ff6eaf
TH
3813 /* wait a while before checking status */
3814 ata_wait_after_reset(ap, deadline);
34fee227 3815
633273a3
TH
3816 /* If PMP is supported, we have to do follow-up SRST. Note
3817 * that some PMPs don't send D2H Reg FIS after hardreset at
3818 * all if the first port is empty. Wait for it just for a
3819 * second and request follow-up SRST.
3820 */
3821 if (ap->flags & ATA_FLAG_PMP) {
3822 ata_wait_ready(ap, jiffies + HZ);
3823 return -EAGAIN;
3824 }
3825
d4b2bab4 3826 rc = ata_wait_ready(ap, deadline);
9b89391c
TH
3827 /* link occupied, -ENODEV too is an error */
3828 if (rc) {
cc0680a5 3829 ata_link_printk(link, KERN_ERR,
d4b2bab4
TH
3830 "COMRESET failed (errno=%d)\n", rc);
3831 return rc;
c2bd5804
TH
3832 }
3833
3a39746a
TH
3834 ap->ops->dev_select(ap, 0); /* probably unnecessary */
3835
3f19859e 3836 *class = ata_dev_try_classify(link->device, 1, NULL);
c2bd5804
TH
3837
3838 DPRINTK("EXIT, class=%u\n", *class);
3839 return 0;
3840}
3841
3842/**
3843 * ata_std_postreset - standard postreset callback
cc0680a5 3844 * @link: the target ata_link
c2bd5804
TH
3845 * @classes: classes of attached devices
3846 *
3847 * This function is invoked after a successful reset. Note that
3848 * the device might have been reset more than once using
3849 * different reset methods before postreset is invoked.
c2bd5804 3850 *
c2bd5804
TH
3851 * LOCKING:
3852 * Kernel thread context (may sleep)
3853 */
cc0680a5 3854void ata_std_postreset(struct ata_link *link, unsigned int *classes)
c2bd5804 3855{
cc0680a5 3856 struct ata_port *ap = link->ap;
dc2b3515
TH
3857 u32 serror;
3858
c2bd5804
TH
3859 DPRINTK("ENTER\n");
3860
c2bd5804 3861 /* print link status */
936fd732 3862 sata_print_link_status(link);
c2bd5804 3863
dc2b3515 3864 /* clear SError */
936fd732
TH
3865 if (sata_scr_read(link, SCR_ERROR, &serror) == 0)
3866 sata_scr_write(link, SCR_ERROR, serror);
f7fe7ad4 3867 link->eh_info.serror = 0;
dc2b3515 3868
c2bd5804
TH
3869 /* is double-select really necessary? */
3870 if (classes[0] != ATA_DEV_NONE)
3871 ap->ops->dev_select(ap, 1);
3872 if (classes[1] != ATA_DEV_NONE)
3873 ap->ops->dev_select(ap, 0);
3874
3a39746a
TH
3875 /* bail out if no device is present */
3876 if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
3877 DPRINTK("EXIT, no device\n");
3878 return;
3879 }
3880
3881 /* set up device control */
0d5ff566
TH
3882 if (ap->ioaddr.ctl_addr)
3883 iowrite8(ap->ctl, ap->ioaddr.ctl_addr);
c2bd5804
TH
3884
3885 DPRINTK("EXIT\n");
3886}
3887
623a3128
TH
3888/**
3889 * ata_dev_same_device - Determine whether new ID matches configured device
623a3128
TH
3890 * @dev: device to compare against
3891 * @new_class: class of the new device
3892 * @new_id: IDENTIFY page of the new device
3893 *
3894 * Compare @new_class and @new_id against @dev and determine
3895 * whether @dev is the device indicated by @new_class and
3896 * @new_id.
3897 *
3898 * LOCKING:
3899 * None.
3900 *
3901 * RETURNS:
3902 * 1 if @dev matches @new_class and @new_id, 0 otherwise.
3903 */
3373efd8
TH
3904static int ata_dev_same_device(struct ata_device *dev, unsigned int new_class,
3905 const u16 *new_id)
623a3128
TH
3906{
3907 const u16 *old_id = dev->id;
a0cf733b
TH
3908 unsigned char model[2][ATA_ID_PROD_LEN + 1];
3909 unsigned char serial[2][ATA_ID_SERNO_LEN + 1];
623a3128
TH
3910
3911 if (dev->class != new_class) {
f15a1daf
TH
3912 ata_dev_printk(dev, KERN_INFO, "class mismatch %d != %d\n",
3913 dev->class, new_class);
623a3128
TH
3914 return 0;
3915 }
3916
a0cf733b
TH
3917 ata_id_c_string(old_id, model[0], ATA_ID_PROD, sizeof(model[0]));
3918 ata_id_c_string(new_id, model[1], ATA_ID_PROD, sizeof(model[1]));
3919 ata_id_c_string(old_id, serial[0], ATA_ID_SERNO, sizeof(serial[0]));
3920 ata_id_c_string(new_id, serial[1], ATA_ID_SERNO, sizeof(serial[1]));
623a3128
TH
3921
3922 if (strcmp(model[0], model[1])) {
f15a1daf
TH
3923 ata_dev_printk(dev, KERN_INFO, "model number mismatch "
3924 "'%s' != '%s'\n", model[0], model[1]);
623a3128
TH
3925 return 0;
3926 }
3927
3928 if (strcmp(serial[0], serial[1])) {
f15a1daf
TH
3929 ata_dev_printk(dev, KERN_INFO, "serial number mismatch "
3930 "'%s' != '%s'\n", serial[0], serial[1]);
623a3128
TH
3931 return 0;
3932 }
3933
623a3128
TH
3934 return 1;
3935}
3936
3937/**
fe30911b 3938 * ata_dev_reread_id - Re-read IDENTIFY data
3fae450c 3939 * @dev: target ATA device
bff04647 3940 * @readid_flags: read ID flags
623a3128
TH
3941 *
3942 * Re-read IDENTIFY page and make sure @dev is still attached to
3943 * the port.
3944 *
3945 * LOCKING:
3946 * Kernel thread context (may sleep)
3947 *
3948 * RETURNS:
3949 * 0 on success, negative errno otherwise
3950 */
fe30911b 3951int ata_dev_reread_id(struct ata_device *dev, unsigned int readid_flags)
623a3128 3952{
5eb45c02 3953 unsigned int class = dev->class;
9af5c9c9 3954 u16 *id = (void *)dev->link->ap->sector_buf;
623a3128
TH
3955 int rc;
3956
fe635c7e 3957 /* read ID data */
bff04647 3958 rc = ata_dev_read_id(dev, &class, readid_flags, id);
623a3128 3959 if (rc)
fe30911b 3960 return rc;
623a3128
TH
3961
3962 /* is the device still there? */
fe30911b
TH
3963 if (!ata_dev_same_device(dev, class, id))
3964 return -ENODEV;
623a3128 3965
fe635c7e 3966 memcpy(dev->id, id, sizeof(id[0]) * ATA_ID_WORDS);
fe30911b
TH
3967 return 0;
3968}
3969
3970/**
3971 * ata_dev_revalidate - Revalidate ATA device
3972 * @dev: device to revalidate
422c9daa 3973 * @new_class: new class code
fe30911b
TH
3974 * @readid_flags: read ID flags
3975 *
3976 * Re-read IDENTIFY page, make sure @dev is still attached to the
3977 * port and reconfigure it according to the new IDENTIFY page.
3978 *
3979 * LOCKING:
3980 * Kernel thread context (may sleep)
3981 *
3982 * RETURNS:
3983 * 0 on success, negative errno otherwise
3984 */
422c9daa
TH
3985int ata_dev_revalidate(struct ata_device *dev, unsigned int new_class,
3986 unsigned int readid_flags)
fe30911b 3987{
6ddcd3b0 3988 u64 n_sectors = dev->n_sectors;
fe30911b
TH
3989 int rc;
3990
3991 if (!ata_dev_enabled(dev))
3992 return -ENODEV;
3993
422c9daa
TH
3994 /* fail early if !ATA && !ATAPI to avoid issuing [P]IDENTIFY to PMP */
3995 if (ata_class_enabled(new_class) &&
3996 new_class != ATA_DEV_ATA && new_class != ATA_DEV_ATAPI) {
3997 ata_dev_printk(dev, KERN_INFO, "class mismatch %u != %u\n",
3998 dev->class, new_class);
3999 rc = -ENODEV;
4000 goto fail;
4001 }
4002
fe30911b
TH
4003 /* re-read ID */
4004 rc = ata_dev_reread_id(dev, readid_flags);
4005 if (rc)
4006 goto fail;
623a3128
TH
4007
4008 /* configure device according to the new ID */
efdaedc4 4009 rc = ata_dev_configure(dev);
6ddcd3b0
TH
4010 if (rc)
4011 goto fail;
4012
4013 /* verify n_sectors hasn't changed */
b54eebd6
TH
4014 if (dev->class == ATA_DEV_ATA && n_sectors &&
4015 dev->n_sectors != n_sectors) {
6ddcd3b0
TH
4016 ata_dev_printk(dev, KERN_INFO, "n_sectors mismatch "
4017 "%llu != %llu\n",
4018 (unsigned long long)n_sectors,
4019 (unsigned long long)dev->n_sectors);
8270bec4
TH
4020
4021 /* restore original n_sectors */
4022 dev->n_sectors = n_sectors;
4023
6ddcd3b0
TH
4024 rc = -ENODEV;
4025 goto fail;
4026 }
4027
4028 return 0;
623a3128
TH
4029
4030 fail:
f15a1daf 4031 ata_dev_printk(dev, KERN_ERR, "revalidation failed (errno=%d)\n", rc);
623a3128
TH
4032 return rc;
4033}
4034
6919a0a6
AC
4035struct ata_blacklist_entry {
4036 const char *model_num;
4037 const char *model_rev;
4038 unsigned long horkage;
4039};
4040
4041static const struct ata_blacklist_entry ata_device_blacklist [] = {
4042 /* Devices with DMA related problems under Linux */
4043 { "WDC AC11000H", NULL, ATA_HORKAGE_NODMA },
4044 { "WDC AC22100H", NULL, ATA_HORKAGE_NODMA },
4045 { "WDC AC32500H", NULL, ATA_HORKAGE_NODMA },
4046 { "WDC AC33100H", NULL, ATA_HORKAGE_NODMA },
4047 { "WDC AC31600H", NULL, ATA_HORKAGE_NODMA },
4048 { "WDC AC32100H", "24.09P07", ATA_HORKAGE_NODMA },
4049 { "WDC AC23200L", "21.10N21", ATA_HORKAGE_NODMA },
4050 { "Compaq CRD-8241B", NULL, ATA_HORKAGE_NODMA },
4051 { "CRD-8400B", NULL, ATA_HORKAGE_NODMA },
4052 { "CRD-8480B", NULL, ATA_HORKAGE_NODMA },
4053 { "CRD-8482B", NULL, ATA_HORKAGE_NODMA },
4054 { "CRD-84", NULL, ATA_HORKAGE_NODMA },
4055 { "SanDisk SDP3B", NULL, ATA_HORKAGE_NODMA },
4056 { "SanDisk SDP3B-64", NULL, ATA_HORKAGE_NODMA },
4057 { "SANYO CD-ROM CRD", NULL, ATA_HORKAGE_NODMA },
4058 { "HITACHI CDR-8", NULL, ATA_HORKAGE_NODMA },
4059 { "HITACHI CDR-8335", NULL, ATA_HORKAGE_NODMA },
4060 { "HITACHI CDR-8435", NULL, ATA_HORKAGE_NODMA },
4061 { "Toshiba CD-ROM XM-6202B", NULL, ATA_HORKAGE_NODMA },
4062 { "TOSHIBA CD-ROM XM-1702BC", NULL, ATA_HORKAGE_NODMA },
4063 { "CD-532E-A", NULL, ATA_HORKAGE_NODMA },
4064 { "E-IDE CD-ROM CR-840",NULL, ATA_HORKAGE_NODMA },
4065 { "CD-ROM Drive/F5A", NULL, ATA_HORKAGE_NODMA },
4066 { "WPI CDD-820", NULL, ATA_HORKAGE_NODMA },
4067 { "SAMSUNG CD-ROM SC-148C", NULL, ATA_HORKAGE_NODMA },
4068 { "SAMSUNG CD-ROM SC", NULL, ATA_HORKAGE_NODMA },
6919a0a6
AC
4069 { "ATAPI CD-ROM DRIVE 40X MAXIMUM",NULL,ATA_HORKAGE_NODMA },
4070 { "_NEC DV5800A", NULL, ATA_HORKAGE_NODMA },
2dcb407e 4071 { "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
39f19886 4072 { "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
3af9a77a
TH
4073 /* Odd clown on sil3726/4726 PMPs */
4074 { "Config Disk", NULL, ATA_HORKAGE_NODMA |
4075 ATA_HORKAGE_SKIP_PM },
6919a0a6 4076
18d6e9d5 4077 /* Weird ATAPI devices */
40a1d531 4078 { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 },
18d6e9d5 4079
6919a0a6
AC
4080 /* Devices we expect to fail diagnostics */
4081
4082 /* Devices where NCQ should be avoided */
4083 /* NCQ is slow */
2dcb407e 4084 { "WDC WD740ADFD-00", NULL, ATA_HORKAGE_NONCQ },
459ad688 4085 { "WDC WD740ADFD-00NLR1", NULL, ATA_HORKAGE_NONCQ, },
09125ea6
TH
4086 /* http://thread.gmane.org/gmane.linux.ide/14907 */
4087 { "FUJITSU MHT2060BH", NULL, ATA_HORKAGE_NONCQ },
7acfaf30 4088 /* NCQ is broken */
539cc7c7 4089 { "Maxtor *", "BANC*", ATA_HORKAGE_NONCQ },
0e3dbc01 4090 { "Maxtor 7V300F0", "VA111630", ATA_HORKAGE_NONCQ },
0b0a43e0
DM
4091 { "HITACHI HDS7250SASUN500G*", NULL, ATA_HORKAGE_NONCQ },
4092 { "HITACHI HDS7225SBSUN250G*", NULL, ATA_HORKAGE_NONCQ },
da6f0ec2 4093 { "ST380817AS", "3.42", ATA_HORKAGE_NONCQ },
e41bd3e8 4094 { "ST3160023AS", "3.42", ATA_HORKAGE_NONCQ },
539cc7c7 4095
36e337d0
RH
4096 /* Blacklist entries taken from Silicon Image 3124/3132
4097 Windows driver .inf file - also several Linux problem reports */
4098 { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, },
4099 { "HTS541080G9SA00", "MB4OC60D", ATA_HORKAGE_NONCQ, },
4100 { "HTS541010G9SA00", "MBZOC60D", ATA_HORKAGE_NONCQ, },
6919a0a6 4101
16c55b03
TH
4102 /* devices which puke on READ_NATIVE_MAX */
4103 { "HDS724040KLSA80", "KFAOA20N", ATA_HORKAGE_BROKEN_HPA, },
4104 { "WDC WD3200JD-00KLB0", "WD-WCAMR1130137", ATA_HORKAGE_BROKEN_HPA },
4105 { "WDC WD2500JD-00HBB0", "WD-WMAL71490727", ATA_HORKAGE_BROKEN_HPA },
4106 { "MAXTOR 6L080L4", "A93.0500", ATA_HORKAGE_BROKEN_HPA },
6919a0a6 4107
93328e11
AC
4108 /* Devices which report 1 sector over size HPA */
4109 { "ST340823A", NULL, ATA_HORKAGE_HPA_SIZE, },
4110 { "ST320413A", NULL, ATA_HORKAGE_HPA_SIZE, },
4111
6bbfd53d
AC
4112 /* Devices which get the IVB wrong */
4113 { "QUANTUM FIREBALLlct10 05", "A03.0900", ATA_HORKAGE_IVB, },
4114 { "TSSTcorp CDDVDW SH-S202J", "SB00", ATA_HORKAGE_IVB, },
e9f33406
PM
4115 { "TSSTcorp CDDVDW SH-S202J", "SB01", ATA_HORKAGE_IVB, },
4116 { "TSSTcorp CDDVDW SH-S202N", "SB00", ATA_HORKAGE_IVB, },
4117 { "TSSTcorp CDDVDW SH-S202N", "SB01", ATA_HORKAGE_IVB, },
6bbfd53d 4118
6919a0a6
AC
4119 /* End Marker */
4120 { }
1da177e4 4121};
2e9edbf8 4122
741b7763 4123static int strn_pattern_cmp(const char *patt, const char *name, int wildchar)
539cc7c7
JG
4124{
4125 const char *p;
4126 int len;
4127
4128 /*
4129 * check for trailing wildcard: *\0
4130 */
4131 p = strchr(patt, wildchar);
4132 if (p && ((*(p + 1)) == 0))
4133 len = p - patt;
317b50b8 4134 else {
539cc7c7 4135 len = strlen(name);
317b50b8
AP
4136 if (!len) {
4137 if (!*patt)
4138 return 0;
4139 return -1;
4140 }
4141 }
539cc7c7
JG
4142
4143 return strncmp(patt, name, len);
4144}
4145
75683fe7 4146static unsigned long ata_dev_blacklisted(const struct ata_device *dev)
1da177e4 4147{
8bfa79fc
TH
4148 unsigned char model_num[ATA_ID_PROD_LEN + 1];
4149 unsigned char model_rev[ATA_ID_FW_REV_LEN + 1];
6919a0a6 4150 const struct ata_blacklist_entry *ad = ata_device_blacklist;
3a778275 4151
8bfa79fc
TH
4152 ata_id_c_string(dev->id, model_num, ATA_ID_PROD, sizeof(model_num));
4153 ata_id_c_string(dev->id, model_rev, ATA_ID_FW_REV, sizeof(model_rev));
1da177e4 4154
6919a0a6 4155 while (ad->model_num) {
539cc7c7 4156 if (!strn_pattern_cmp(ad->model_num, model_num, '*')) {
6919a0a6
AC
4157 if (ad->model_rev == NULL)
4158 return ad->horkage;
539cc7c7 4159 if (!strn_pattern_cmp(ad->model_rev, model_rev, '*'))
6919a0a6 4160 return ad->horkage;
f4b15fef 4161 }
6919a0a6 4162 ad++;
f4b15fef 4163 }
1da177e4
LT
4164 return 0;
4165}
4166
6919a0a6
AC
4167static int ata_dma_blacklisted(const struct ata_device *dev)
4168{
4169 /* We don't support polling DMA.
4170 * DMA blacklist those ATAPI devices with CDB-intr (and use PIO)
4171 * if the LLDD handles only interrupts in the HSM_ST_LAST state.
4172 */
9af5c9c9 4173 if ((dev->link->ap->flags & ATA_FLAG_PIO_POLLING) &&
6919a0a6
AC
4174 (dev->flags & ATA_DFLAG_CDB_INTR))
4175 return 1;
75683fe7 4176 return (dev->horkage & ATA_HORKAGE_NODMA) ? 1 : 0;
6919a0a6
AC
4177}
4178
6bbfd53d
AC
4179/**
4180 * ata_is_40wire - check drive side detection
4181 * @dev: device
4182 *
4183 * Perform drive side detection decoding, allowing for device vendors
4184 * who can't follow the documentation.
4185 */
4186
4187static int ata_is_40wire(struct ata_device *dev)
4188{
4189 if (dev->horkage & ATA_HORKAGE_IVB)
4190 return ata_drive_40wire_relaxed(dev->id);
4191 return ata_drive_40wire(dev->id);
4192}
4193
a6d5a51c
TH
4194/**
4195 * ata_dev_xfermask - Compute supported xfermask of the given device
a6d5a51c
TH
4196 * @dev: Device to compute xfermask for
4197 *
acf356b1
TH
4198 * Compute supported xfermask of @dev and store it in
4199 * dev->*_mask. This function is responsible for applying all
4200 * known limits including host controller limits, device
4201 * blacklist, etc...
a6d5a51c
TH
4202 *
4203 * LOCKING:
4204 * None.
a6d5a51c 4205 */
3373efd8 4206static void ata_dev_xfermask(struct ata_device *dev)
1da177e4 4207{
9af5c9c9
TH
4208 struct ata_link *link = dev->link;
4209 struct ata_port *ap = link->ap;
cca3974e 4210 struct ata_host *host = ap->host;
a6d5a51c 4211 unsigned long xfer_mask;
1da177e4 4212
37deecb5 4213 /* controller modes available */
565083e1
TH
4214 xfer_mask = ata_pack_xfermask(ap->pio_mask,
4215 ap->mwdma_mask, ap->udma_mask);
4216
8343f889 4217 /* drive modes available */
37deecb5
TH
4218 xfer_mask &= ata_pack_xfermask(dev->pio_mask,
4219 dev->mwdma_mask, dev->udma_mask);
4220 xfer_mask &= ata_id_xfermask(dev->id);
565083e1 4221
b352e57d
AC
4222 /*
4223 * CFA Advanced TrueIDE timings are not allowed on a shared
4224 * cable
4225 */
4226 if (ata_dev_pair(dev)) {
4227 /* No PIO5 or PIO6 */
4228 xfer_mask &= ~(0x03 << (ATA_SHIFT_PIO + 5));
4229 /* No MWDMA3 or MWDMA 4 */
4230 xfer_mask &= ~(0x03 << (ATA_SHIFT_MWDMA + 3));
4231 }
4232
37deecb5
TH
4233 if (ata_dma_blacklisted(dev)) {
4234 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
f15a1daf
TH
4235 ata_dev_printk(dev, KERN_WARNING,
4236 "device is on DMA blacklist, disabling DMA\n");
37deecb5 4237 }
a6d5a51c 4238
14d66ab7 4239 if ((host->flags & ATA_HOST_SIMPLEX) &&
2dcb407e 4240 host->simplex_claimed && host->simplex_claimed != ap) {
37deecb5
TH
4241 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
4242 ata_dev_printk(dev, KERN_WARNING, "simplex DMA is claimed by "
4243 "other device, disabling DMA\n");
5444a6f4 4244 }
565083e1 4245
e424675f
JG
4246 if (ap->flags & ATA_FLAG_NO_IORDY)
4247 xfer_mask &= ata_pio_mask_no_iordy(dev);
4248
5444a6f4 4249 if (ap->ops->mode_filter)
a76b62ca 4250 xfer_mask = ap->ops->mode_filter(dev, xfer_mask);
5444a6f4 4251
8343f889
RH
4252 /* Apply cable rule here. Don't apply it early because when
4253 * we handle hot plug the cable type can itself change.
4254 * Check this last so that we know if the transfer rate was
4255 * solely limited by the cable.
4256 * Unknown or 80 wire cables reported host side are checked
4257 * drive side as well. Cases where we know a 40wire cable
4258 * is used safely for 80 are not checked here.
4259 */
4260 if (xfer_mask & (0xF8 << ATA_SHIFT_UDMA))
4261 /* UDMA/44 or higher would be available */
2dcb407e 4262 if ((ap->cbl == ATA_CBL_PATA40) ||
6bbfd53d 4263 (ata_is_40wire(dev) &&
2dcb407e
JG
4264 (ap->cbl == ATA_CBL_PATA_UNK ||
4265 ap->cbl == ATA_CBL_PATA80))) {
4266 ata_dev_printk(dev, KERN_WARNING,
8343f889
RH
4267 "limited to UDMA/33 due to 40-wire cable\n");
4268 xfer_mask &= ~(0xF8 << ATA_SHIFT_UDMA);
4269 }
4270
565083e1
TH
4271 ata_unpack_xfermask(xfer_mask, &dev->pio_mask,
4272 &dev->mwdma_mask, &dev->udma_mask);
1da177e4
LT
4273}
4274
1da177e4
LT
4275/**
4276 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1da177e4
LT
4277 * @dev: Device to which command will be sent
4278 *
780a87f7
JG
4279 * Issue SET FEATURES - XFER MODE command to device @dev
4280 * on port @ap.
4281 *
1da177e4 4282 * LOCKING:
0cba632b 4283 * PCI/etc. bus probe sem.
83206a29
TH
4284 *
4285 * RETURNS:
4286 * 0 on success, AC_ERR_* mask otherwise.
1da177e4
LT
4287 */
4288
3373efd8 4289static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
1da177e4 4290{
a0123703 4291 struct ata_taskfile tf;
83206a29 4292 unsigned int err_mask;
1da177e4
LT
4293
4294 /* set up set-features taskfile */
4295 DPRINTK("set features - xfer mode\n");
4296
464cf177
TH
4297 /* Some controllers and ATAPI devices show flaky interrupt
4298 * behavior after setting xfer mode. Use polling instead.
4299 */
3373efd8 4300 ata_tf_init(dev, &tf);
a0123703
TH
4301 tf.command = ATA_CMD_SET_FEATURES;
4302 tf.feature = SETFEATURES_XFER;
464cf177 4303 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE | ATA_TFLAG_POLLING;
a0123703 4304 tf.protocol = ATA_PROT_NODATA;
b9f8ab2d 4305 /* If we are using IORDY we must send the mode setting command */
11b7becc
JG
4306 if (ata_pio_need_iordy(dev))
4307 tf.nsect = dev->xfer_mode;
b9f8ab2d
AC
4308 /* If the device has IORDY and the controller does not - turn it off */
4309 else if (ata_id_has_iordy(dev->id))
11b7becc 4310 tf.nsect = 0x01;
b9f8ab2d
AC
4311 else /* In the ancient relic department - skip all of this */
4312 return 0;
1da177e4 4313
2b789108 4314 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
9f45cbd3
KCA
4315
4316 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4317 return err_mask;
4318}
9f45cbd3 4319/**
218f3d30 4320 * ata_dev_set_feature - Issue SET FEATURES - SATA FEATURES
9f45cbd3
KCA
4321 * @dev: Device to which command will be sent
4322 * @enable: Whether to enable or disable the feature
218f3d30 4323 * @feature: The sector count represents the feature to set
9f45cbd3
KCA
4324 *
4325 * Issue SET FEATURES - SATA FEATURES command to device @dev
218f3d30 4326 * on port @ap with sector count
9f45cbd3
KCA
4327 *
4328 * LOCKING:
4329 * PCI/etc. bus probe sem.
4330 *
4331 * RETURNS:
4332 * 0 on success, AC_ERR_* mask otherwise.
4333 */
218f3d30
JG
4334static unsigned int ata_dev_set_feature(struct ata_device *dev, u8 enable,
4335 u8 feature)
9f45cbd3
KCA
4336{
4337 struct ata_taskfile tf;
4338 unsigned int err_mask;
4339
4340 /* set up set-features taskfile */
4341 DPRINTK("set features - SATA features\n");
4342
4343 ata_tf_init(dev, &tf);
4344 tf.command = ATA_CMD_SET_FEATURES;
4345 tf.feature = enable;
4346 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4347 tf.protocol = ATA_PROT_NODATA;
218f3d30 4348 tf.nsect = feature;
9f45cbd3 4349
2b789108 4350 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
1da177e4 4351
83206a29
TH
4352 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4353 return err_mask;
1da177e4
LT
4354}
4355
8bf62ece
AL
4356/**
4357 * ata_dev_init_params - Issue INIT DEV PARAMS command
8bf62ece 4358 * @dev: Device to which command will be sent
e2a7f77a
RD
4359 * @heads: Number of heads (taskfile parameter)
4360 * @sectors: Number of sectors (taskfile parameter)
8bf62ece
AL
4361 *
4362 * LOCKING:
6aff8f1f
TH
4363 * Kernel thread context (may sleep)
4364 *
4365 * RETURNS:
4366 * 0 on success, AC_ERR_* mask otherwise.
8bf62ece 4367 */
3373efd8
TH
4368static unsigned int ata_dev_init_params(struct ata_device *dev,
4369 u16 heads, u16 sectors)
8bf62ece 4370{
a0123703 4371 struct ata_taskfile tf;
6aff8f1f 4372 unsigned int err_mask;
8bf62ece
AL
4373
4374 /* Number of sectors per track 1-255. Number of heads 1-16 */
4375 if (sectors < 1 || sectors > 255 || heads < 1 || heads > 16)
00b6f5e9 4376 return AC_ERR_INVALID;
8bf62ece
AL
4377
4378 /* set up init dev params taskfile */
4379 DPRINTK("init dev params \n");
4380
3373efd8 4381 ata_tf_init(dev, &tf);
a0123703
TH
4382 tf.command = ATA_CMD_INIT_DEV_PARAMS;
4383 tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
4384 tf.protocol = ATA_PROT_NODATA;
4385 tf.nsect = sectors;
4386 tf.device |= (heads - 1) & 0x0f; /* max head = num. of heads - 1 */
8bf62ece 4387
2b789108 4388 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0);
18b2466c
AC
4389 /* A clean abort indicates an original or just out of spec drive
4390 and we should continue as we issue the setup based on the
4391 drive reported working geometry */
4392 if (err_mask == AC_ERR_DEV && (tf.feature & ATA_ABORTED))
4393 err_mask = 0;
8bf62ece 4394
6aff8f1f
TH
4395 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4396 return err_mask;
8bf62ece
AL
4397}
4398
1da177e4 4399/**
0cba632b
JG
4400 * ata_sg_clean - Unmap DMA memory associated with command
4401 * @qc: Command containing DMA memory to be released
4402 *
4403 * Unmap all mapped DMA memory associated with this command.
1da177e4
LT
4404 *
4405 * LOCKING:
cca3974e 4406 * spin_lock_irqsave(host lock)
1da177e4 4407 */
70e6ad0c 4408void ata_sg_clean(struct ata_queued_cmd *qc)
1da177e4
LT
4409{
4410 struct ata_port *ap = qc->ap;
cedc9a47 4411 struct scatterlist *sg = qc->__sg;
1da177e4 4412 int dir = qc->dma_dir;
cedc9a47 4413 void *pad_buf = NULL;
1da177e4 4414
a4631474
TH
4415 WARN_ON(!(qc->flags & ATA_QCFLAG_DMAMAP));
4416 WARN_ON(sg == NULL);
1da177e4
LT
4417
4418 if (qc->flags & ATA_QCFLAG_SINGLE)
f131883e 4419 WARN_ON(qc->n_elem > 1);
1da177e4 4420
2c13b7ce 4421 VPRINTK("unmapping %u sg elements\n", qc->n_elem);
1da177e4 4422
cedc9a47
JG
4423 /* if we padded the buffer out to 32-bit bound, and data
4424 * xfer direction is from-device, we must copy from the
4425 * pad buffer back into the supplied buffer
4426 */
4427 if (qc->pad_len && !(qc->tf.flags & ATA_TFLAG_WRITE))
4428 pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4429
4430 if (qc->flags & ATA_QCFLAG_SG) {
e1410f2d 4431 if (qc->n_elem)
2f1f610b 4432 dma_unmap_sg(ap->dev, sg, qc->n_elem, dir);
cedc9a47 4433 /* restore last sg */
87260216 4434 sg_last(sg, qc->orig_n_elem)->length += qc->pad_len;
cedc9a47
JG
4435 if (pad_buf) {
4436 struct scatterlist *psg = &qc->pad_sgent;
45711f1a 4437 void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
cedc9a47 4438 memcpy(addr + psg->offset, pad_buf, qc->pad_len);
dfa15988 4439 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4440 }
4441 } else {
2e242fa9 4442 if (qc->n_elem)
2f1f610b 4443 dma_unmap_single(ap->dev,
e1410f2d
JG
4444 sg_dma_address(&sg[0]), sg_dma_len(&sg[0]),
4445 dir);
cedc9a47
JG
4446 /* restore sg */
4447 sg->length += qc->pad_len;
4448 if (pad_buf)
4449 memcpy(qc->buf_virt + sg->length - qc->pad_len,
4450 pad_buf, qc->pad_len);
4451 }
1da177e4
LT
4452
4453 qc->flags &= ~ATA_QCFLAG_DMAMAP;
cedc9a47 4454 qc->__sg = NULL;
1da177e4
LT
4455}
4456
4457/**
4458 * ata_fill_sg - Fill PCI IDE PRD table
4459 * @qc: Metadata associated with taskfile to be transferred
4460 *
780a87f7
JG
4461 * Fill PCI IDE PRD (scatter-gather) table with segments
4462 * associated with the current disk command.
4463 *
1da177e4 4464 * LOCKING:
cca3974e 4465 * spin_lock_irqsave(host lock)
1da177e4
LT
4466 *
4467 */
4468static void ata_fill_sg(struct ata_queued_cmd *qc)
4469{
1da177e4 4470 struct ata_port *ap = qc->ap;
cedc9a47
JG
4471 struct scatterlist *sg;
4472 unsigned int idx;
1da177e4 4473
a4631474 4474 WARN_ON(qc->__sg == NULL);
f131883e 4475 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
1da177e4
LT
4476
4477 idx = 0;
cedc9a47 4478 ata_for_each_sg(sg, qc) {
1da177e4
LT
4479 u32 addr, offset;
4480 u32 sg_len, len;
4481
4482 /* determine if physical DMA addr spans 64K boundary.
4483 * Note h/w doesn't support 64-bit, so we unconditionally
4484 * truncate dma_addr_t to u32.
4485 */
4486 addr = (u32) sg_dma_address(sg);
4487 sg_len = sg_dma_len(sg);
4488
4489 while (sg_len) {
4490 offset = addr & 0xffff;
4491 len = sg_len;
4492 if ((offset + sg_len) > 0x10000)
4493 len = 0x10000 - offset;
4494
4495 ap->prd[idx].addr = cpu_to_le32(addr);
4496 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
4497 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4498
4499 idx++;
4500 sg_len -= len;
4501 addr += len;
4502 }
4503 }
4504
4505 if (idx)
4506 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4507}
b9a4197e 4508
d26fc955
AC
4509/**
4510 * ata_fill_sg_dumb - Fill PCI IDE PRD table
4511 * @qc: Metadata associated with taskfile to be transferred
4512 *
4513 * Fill PCI IDE PRD (scatter-gather) table with segments
4514 * associated with the current disk command. Perform the fill
4515 * so that we avoid writing any length 64K records for
4516 * controllers that don't follow the spec.
4517 *
4518 * LOCKING:
4519 * spin_lock_irqsave(host lock)
4520 *
4521 */
4522static void ata_fill_sg_dumb(struct ata_queued_cmd *qc)
4523{
4524 struct ata_port *ap = qc->ap;
4525 struct scatterlist *sg;
4526 unsigned int idx;
4527
4528 WARN_ON(qc->__sg == NULL);
4529 WARN_ON(qc->n_elem == 0 && qc->pad_len == 0);
4530
4531 idx = 0;
4532 ata_for_each_sg(sg, qc) {
4533 u32 addr, offset;
4534 u32 sg_len, len, blen;
4535
2dcb407e 4536 /* determine if physical DMA addr spans 64K boundary.
d26fc955
AC
4537 * Note h/w doesn't support 64-bit, so we unconditionally
4538 * truncate dma_addr_t to u32.
4539 */
4540 addr = (u32) sg_dma_address(sg);
4541 sg_len = sg_dma_len(sg);
4542
4543 while (sg_len) {
4544 offset = addr & 0xffff;
4545 len = sg_len;
4546 if ((offset + sg_len) > 0x10000)
4547 len = 0x10000 - offset;
4548
4549 blen = len & 0xffff;
4550 ap->prd[idx].addr = cpu_to_le32(addr);
4551 if (blen == 0) {
4552 /* Some PATA chipsets like the CS5530 can't
4553 cope with 0x0000 meaning 64K as the spec says */
4554 ap->prd[idx].flags_len = cpu_to_le32(0x8000);
4555 blen = 0x8000;
4556 ap->prd[++idx].addr = cpu_to_le32(addr + 0x8000);
4557 }
4558 ap->prd[idx].flags_len = cpu_to_le32(blen);
4559 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
4560
4561 idx++;
4562 sg_len -= len;
4563 addr += len;
4564 }
4565 }
4566
4567 if (idx)
4568 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
4569}
4570
1da177e4
LT
4571/**
4572 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
4573 * @qc: Metadata associated with taskfile to check
4574 *
780a87f7
JG
4575 * Allow low-level driver to filter ATA PACKET commands, returning
4576 * a status indicating whether or not it is OK to use DMA for the
4577 * supplied PACKET command.
4578 *
1da177e4 4579 * LOCKING:
cca3974e 4580 * spin_lock_irqsave(host lock)
0cba632b 4581 *
1da177e4
LT
4582 * RETURNS: 0 when ATAPI DMA can be used
4583 * nonzero otherwise
4584 */
4585int ata_check_atapi_dma(struct ata_queued_cmd *qc)
4586{
4587 struct ata_port *ap = qc->ap;
b9a4197e
TH
4588
4589 /* Don't allow DMA if it isn't multiple of 16 bytes. Quite a
4590 * few ATAPI devices choke on such DMA requests.
4591 */
4592 if (unlikely(qc->nbytes & 15))
4593 return 1;
6f23a31d 4594
1da177e4 4595 if (ap->ops->check_atapi_dma)
b9a4197e 4596 return ap->ops->check_atapi_dma(qc);
1da177e4 4597
b9a4197e 4598 return 0;
1da177e4 4599}
b9a4197e 4600
140b5e59
TH
4601/**
4602 * atapi_qc_may_overflow - Check whether data transfer may overflow
4603 * @qc: ATA command in question
4604 *
4605 * ATAPI commands which transfer variable length data to host
4606 * might overflow due to application error or hardare bug. This
4607 * function checks whether overflow should be drained and ignored
4608 * for @qc.
4609 *
4610 * LOCKING:
4611 * None.
4612 *
4613 * RETURNS:
4614 * 1 if @qc may overflow; otherwise, 0.
4615 */
4616static int atapi_qc_may_overflow(struct ata_queued_cmd *qc)
4617{
4618 if (qc->tf.protocol != ATA_PROT_ATAPI &&
4619 qc->tf.protocol != ATA_PROT_ATAPI_DMA)
4620 return 0;
4621
4622 if (qc->tf.flags & ATA_TFLAG_WRITE)
4623 return 0;
4624
4625 switch (qc->cdb[0]) {
4626 case READ_10:
4627 case READ_12:
4628 case WRITE_10:
4629 case WRITE_12:
4630 case GPCMD_READ_CD:
4631 case GPCMD_READ_CD_MSF:
4632 return 0;
4633 }
4634
4635 return 1;
4636}
4637
31cc23b3
TH
4638/**
4639 * ata_std_qc_defer - Check whether a qc needs to be deferred
4640 * @qc: ATA command in question
4641 *
4642 * Non-NCQ commands cannot run with any other command, NCQ or
4643 * not. As upper layer only knows the queue depth, we are
4644 * responsible for maintaining exclusion. This function checks
4645 * whether a new command @qc can be issued.
4646 *
4647 * LOCKING:
4648 * spin_lock_irqsave(host lock)
4649 *
4650 * RETURNS:
4651 * ATA_DEFER_* if deferring is needed, 0 otherwise.
4652 */
4653int ata_std_qc_defer(struct ata_queued_cmd *qc)
4654{
4655 struct ata_link *link = qc->dev->link;
4656
4657 if (qc->tf.protocol == ATA_PROT_NCQ) {
4658 if (!ata_tag_valid(link->active_tag))
4659 return 0;
4660 } else {
4661 if (!ata_tag_valid(link->active_tag) && !link->sactive)
4662 return 0;
4663 }
4664
4665 return ATA_DEFER_LINK;
4666}
4667
1da177e4
LT
4668/**
4669 * ata_qc_prep - Prepare taskfile for submission
4670 * @qc: Metadata associated with taskfile to be prepared
4671 *
780a87f7
JG
4672 * Prepare ATA taskfile for submission.
4673 *
1da177e4 4674 * LOCKING:
cca3974e 4675 * spin_lock_irqsave(host lock)
1da177e4
LT
4676 */
4677void ata_qc_prep(struct ata_queued_cmd *qc)
4678{
4679 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4680 return;
4681
4682 ata_fill_sg(qc);
4683}
4684
d26fc955
AC
4685/**
4686 * ata_dumb_qc_prep - Prepare taskfile for submission
4687 * @qc: Metadata associated with taskfile to be prepared
4688 *
4689 * Prepare ATA taskfile for submission.
4690 *
4691 * LOCKING:
4692 * spin_lock_irqsave(host lock)
4693 */
4694void ata_dumb_qc_prep(struct ata_queued_cmd *qc)
4695{
4696 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
4697 return;
4698
4699 ata_fill_sg_dumb(qc);
4700}
4701
e46834cd
BK
4702void ata_noop_qc_prep(struct ata_queued_cmd *qc) { }
4703
0cba632b
JG
4704/**
4705 * ata_sg_init_one - Associate command with memory buffer
4706 * @qc: Command to be associated
4707 * @buf: Memory buffer
4708 * @buflen: Length of memory buffer, in bytes.
4709 *
4710 * Initialize the data-related elements of queued_cmd @qc
4711 * to point to a single memory buffer, @buf of byte length @buflen.
4712 *
4713 * LOCKING:
cca3974e 4714 * spin_lock_irqsave(host lock)
0cba632b
JG
4715 */
4716
1da177e4
LT
4717void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
4718{
1da177e4
LT
4719 qc->flags |= ATA_QCFLAG_SINGLE;
4720
cedc9a47 4721 qc->__sg = &qc->sgent;
1da177e4 4722 qc->n_elem = 1;
cedc9a47 4723 qc->orig_n_elem = 1;
1da177e4 4724 qc->buf_virt = buf;
233277ca 4725 qc->nbytes = buflen;
87260216 4726 qc->cursg = qc->__sg;
1da177e4 4727
61c0596c 4728 sg_init_one(&qc->sgent, buf, buflen);
1da177e4
LT
4729}
4730
0cba632b
JG
4731/**
4732 * ata_sg_init - Associate command with scatter-gather table.
4733 * @qc: Command to be associated
4734 * @sg: Scatter-gather table.
4735 * @n_elem: Number of elements in s/g table.
4736 *
4737 * Initialize the data-related elements of queued_cmd @qc
4738 * to point to a scatter-gather table @sg, containing @n_elem
4739 * elements.
4740 *
4741 * LOCKING:
cca3974e 4742 * spin_lock_irqsave(host lock)
0cba632b
JG
4743 */
4744
1da177e4
LT
4745void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
4746 unsigned int n_elem)
4747{
4748 qc->flags |= ATA_QCFLAG_SG;
cedc9a47 4749 qc->__sg = sg;
1da177e4 4750 qc->n_elem = n_elem;
cedc9a47 4751 qc->orig_n_elem = n_elem;
87260216 4752 qc->cursg = qc->__sg;
1da177e4
LT
4753}
4754
4755/**
0cba632b
JG
4756 * ata_sg_setup_one - DMA-map the memory buffer associated with a command.
4757 * @qc: Command with memory buffer to be mapped.
4758 *
4759 * DMA-map the memory buffer associated with queued_cmd @qc.
1da177e4
LT
4760 *
4761 * LOCKING:
cca3974e 4762 * spin_lock_irqsave(host lock)
1da177e4
LT
4763 *
4764 * RETURNS:
0cba632b 4765 * Zero on success, negative on error.
1da177e4
LT
4766 */
4767
4768static int ata_sg_setup_one(struct ata_queued_cmd *qc)
4769{
4770 struct ata_port *ap = qc->ap;
4771 int dir = qc->dma_dir;
cedc9a47 4772 struct scatterlist *sg = qc->__sg;
1da177e4 4773 dma_addr_t dma_address;
2e242fa9 4774 int trim_sg = 0;
1da177e4 4775
cedc9a47
JG
4776 /* we must lengthen transfers to end on a 32-bit boundary */
4777 qc->pad_len = sg->length & 3;
4778 if (qc->pad_len) {
4779 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4780 struct scatterlist *psg = &qc->pad_sgent;
4781
a4631474 4782 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4783
4784 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4785
4786 if (qc->tf.flags & ATA_TFLAG_WRITE)
4787 memcpy(pad_buf, qc->buf_virt + sg->length - qc->pad_len,
4788 qc->pad_len);
4789
4790 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4791 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4792 /* trim sg */
4793 sg->length -= qc->pad_len;
2e242fa9
TH
4794 if (sg->length == 0)
4795 trim_sg = 1;
cedc9a47
JG
4796
4797 DPRINTK("padding done, sg->length=%u pad_len=%u\n",
4798 sg->length, qc->pad_len);
4799 }
4800
2e242fa9
TH
4801 if (trim_sg) {
4802 qc->n_elem--;
e1410f2d
JG
4803 goto skip_map;
4804 }
4805
2f1f610b 4806 dma_address = dma_map_single(ap->dev, qc->buf_virt,
32529e01 4807 sg->length, dir);
537a95d9
TH
4808 if (dma_mapping_error(dma_address)) {
4809 /* restore sg */
4810 sg->length += qc->pad_len;
1da177e4 4811 return -1;
537a95d9 4812 }
1da177e4
LT
4813
4814 sg_dma_address(sg) = dma_address;
32529e01 4815 sg_dma_len(sg) = sg->length;
1da177e4 4816
2e242fa9 4817skip_map:
1da177e4
LT
4818 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
4819 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
4820
4821 return 0;
4822}
4823
4824/**
0cba632b
JG
4825 * ata_sg_setup - DMA-map the scatter-gather table associated with a command.
4826 * @qc: Command with scatter-gather table to be mapped.
4827 *
4828 * DMA-map the scatter-gather table associated with queued_cmd @qc.
1da177e4
LT
4829 *
4830 * LOCKING:
cca3974e 4831 * spin_lock_irqsave(host lock)
1da177e4
LT
4832 *
4833 * RETURNS:
0cba632b 4834 * Zero on success, negative on error.
1da177e4
LT
4835 *
4836 */
4837
4838static int ata_sg_setup(struct ata_queued_cmd *qc)
4839{
4840 struct ata_port *ap = qc->ap;
cedc9a47 4841 struct scatterlist *sg = qc->__sg;
87260216 4842 struct scatterlist *lsg = sg_last(qc->__sg, qc->n_elem);
e1410f2d 4843 int n_elem, pre_n_elem, dir, trim_sg = 0;
1da177e4 4844
44877b4e 4845 VPRINTK("ENTER, ata%u\n", ap->print_id);
a4631474 4846 WARN_ON(!(qc->flags & ATA_QCFLAG_SG));
1da177e4 4847
cedc9a47
JG
4848 /* we must lengthen transfers to end on a 32-bit boundary */
4849 qc->pad_len = lsg->length & 3;
4850 if (qc->pad_len) {
4851 void *pad_buf = ap->pad + (qc->tag * ATA_DMA_PAD_SZ);
4852 struct scatterlist *psg = &qc->pad_sgent;
4853 unsigned int offset;
4854
a4631474 4855 WARN_ON(qc->dev->class != ATA_DEV_ATAPI);
cedc9a47
JG
4856
4857 memset(pad_buf, 0, ATA_DMA_PAD_SZ);
4858
4859 /*
4860 * psg->page/offset are used to copy to-be-written
4861 * data in this function or read data in ata_sg_clean.
4862 */
4863 offset = lsg->offset + lsg->length - qc->pad_len;
acd054a5 4864 sg_init_table(psg, 1);
642f1490
JA
4865 sg_set_page(psg, nth_page(sg_page(lsg), offset >> PAGE_SHIFT),
4866 qc->pad_len, offset_in_page(offset));
cedc9a47
JG
4867
4868 if (qc->tf.flags & ATA_TFLAG_WRITE) {
45711f1a 4869 void *addr = kmap_atomic(sg_page(psg), KM_IRQ0);
cedc9a47 4870 memcpy(pad_buf, addr + psg->offset, qc->pad_len);
dfa15988 4871 kunmap_atomic(addr, KM_IRQ0);
cedc9a47
JG
4872 }
4873
4874 sg_dma_address(psg) = ap->pad_dma + (qc->tag * ATA_DMA_PAD_SZ);
4875 sg_dma_len(psg) = ATA_DMA_PAD_SZ;
4876 /* trim last sg */
4877 lsg->length -= qc->pad_len;
e1410f2d
JG
4878 if (lsg->length == 0)
4879 trim_sg = 1;
cedc9a47
JG
4880
4881 DPRINTK("padding done, sg[%d].length=%u pad_len=%u\n",
4882 qc->n_elem - 1, lsg->length, qc->pad_len);
4883 }
4884
e1410f2d
JG
4885 pre_n_elem = qc->n_elem;
4886 if (trim_sg && pre_n_elem)
4887 pre_n_elem--;
4888
4889 if (!pre_n_elem) {
4890 n_elem = 0;
4891 goto skip_map;
4892 }
4893
1da177e4 4894 dir = qc->dma_dir;
2f1f610b 4895 n_elem = dma_map_sg(ap->dev, sg, pre_n_elem, dir);
537a95d9
TH
4896 if (n_elem < 1) {
4897 /* restore last sg */
4898 lsg->length += qc->pad_len;
1da177e4 4899 return -1;
537a95d9 4900 }
1da177e4
LT
4901
4902 DPRINTK("%d sg elements mapped\n", n_elem);
4903
e1410f2d 4904skip_map:
1da177e4
LT
4905 qc->n_elem = n_elem;
4906
4907 return 0;
4908}
4909
0baab86b 4910/**
c893a3ae 4911 * swap_buf_le16 - swap halves of 16-bit words in place
0baab86b
EF
4912 * @buf: Buffer to swap
4913 * @buf_words: Number of 16-bit words in buffer.
4914 *
4915 * Swap halves of 16-bit words if needed to convert from
4916 * little-endian byte order to native cpu byte order, or
4917 * vice-versa.
4918 *
4919 * LOCKING:
6f0ef4fa 4920 * Inherited from caller.
0baab86b 4921 */
1da177e4
LT
4922void swap_buf_le16(u16 *buf, unsigned int buf_words)
4923{
4924#ifdef __BIG_ENDIAN
4925 unsigned int i;
4926
4927 for (i = 0; i < buf_words; i++)
4928 buf[i] = le16_to_cpu(buf[i]);
4929#endif /* __BIG_ENDIAN */
4930}
4931
6ae4cfb5 4932/**
0d5ff566 4933 * ata_data_xfer - Transfer data by PIO
a6b2c5d4 4934 * @adev: device to target
6ae4cfb5
AL
4935 * @buf: data buffer
4936 * @buflen: buffer length
344babaa 4937 * @write_data: read/write
6ae4cfb5
AL
4938 *
4939 * Transfer data from/to the device data register by PIO.
4940 *
4941 * LOCKING:
4942 * Inherited from caller.
6ae4cfb5 4943 */
0d5ff566
TH
4944void ata_data_xfer(struct ata_device *adev, unsigned char *buf,
4945 unsigned int buflen, int write_data)
1da177e4 4946{
9af5c9c9 4947 struct ata_port *ap = adev->link->ap;
6ae4cfb5 4948 unsigned int words = buflen >> 1;
1da177e4 4949
6ae4cfb5 4950 /* Transfer multiple of 2 bytes */
1da177e4 4951 if (write_data)
0d5ff566 4952 iowrite16_rep(ap->ioaddr.data_addr, buf, words);
1da177e4 4953 else
0d5ff566 4954 ioread16_rep(ap->ioaddr.data_addr, buf, words);
6ae4cfb5
AL
4955
4956 /* Transfer trailing 1 byte, if any. */
4957 if (unlikely(buflen & 0x01)) {
4958 u16 align_buf[1] = { 0 };
4959 unsigned char *trailing_buf = buf + buflen - 1;
4960
4961 if (write_data) {
4962 memcpy(align_buf, trailing_buf, 1);
0d5ff566 4963 iowrite16(le16_to_cpu(align_buf[0]), ap->ioaddr.data_addr);
6ae4cfb5 4964 } else {
0d5ff566 4965 align_buf[0] = cpu_to_le16(ioread16(ap->ioaddr.data_addr));
6ae4cfb5
AL
4966 memcpy(trailing_buf, align_buf, 1);
4967 }
4968 }
1da177e4
LT
4969}
4970
75e99585 4971/**
0d5ff566 4972 * ata_data_xfer_noirq - Transfer data by PIO
75e99585
AC
4973 * @adev: device to target
4974 * @buf: data buffer
4975 * @buflen: buffer length
4976 * @write_data: read/write
4977 *
88574551 4978 * Transfer data from/to the device data register by PIO. Do the
75e99585
AC
4979 * transfer with interrupts disabled.
4980 *
4981 * LOCKING:
4982 * Inherited from caller.
4983 */
0d5ff566
TH
4984void ata_data_xfer_noirq(struct ata_device *adev, unsigned char *buf,
4985 unsigned int buflen, int write_data)
75e99585
AC
4986{
4987 unsigned long flags;
4988 local_irq_save(flags);
0d5ff566 4989 ata_data_xfer(adev, buf, buflen, write_data);
75e99585
AC
4990 local_irq_restore(flags);
4991}
4992
4993
6ae4cfb5 4994/**
5a5dbd18 4995 * ata_pio_sector - Transfer a sector of data.
6ae4cfb5
AL
4996 * @qc: Command on going
4997 *
5a5dbd18 4998 * Transfer qc->sect_size bytes of data from/to the ATA device.
6ae4cfb5
AL
4999 *
5000 * LOCKING:
5001 * Inherited from caller.
5002 */
5003
1da177e4
LT
5004static void ata_pio_sector(struct ata_queued_cmd *qc)
5005{
5006 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
1da177e4
LT
5007 struct ata_port *ap = qc->ap;
5008 struct page *page;
5009 unsigned int offset;
5010 unsigned char *buf;
5011
5a5dbd18 5012 if (qc->curbytes == qc->nbytes - qc->sect_size)
14be71f4 5013 ap->hsm_task_state = HSM_ST_LAST;
1da177e4 5014
45711f1a 5015 page = sg_page(qc->cursg);
87260216 5016 offset = qc->cursg->offset + qc->cursg_ofs;
1da177e4
LT
5017
5018 /* get the current page and offset */
5019 page = nth_page(page, (offset >> PAGE_SHIFT));
5020 offset %= PAGE_SIZE;
5021
1da177e4
LT
5022 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5023
91b8b313
AL
5024 if (PageHighMem(page)) {
5025 unsigned long flags;
5026
a6b2c5d4 5027 /* FIXME: use a bounce buffer */
91b8b313
AL
5028 local_irq_save(flags);
5029 buf = kmap_atomic(page, KM_IRQ0);
083958d3 5030
91b8b313 5031 /* do the actual data transfer */
5a5dbd18 5032 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
1da177e4 5033
91b8b313
AL
5034 kunmap_atomic(buf, KM_IRQ0);
5035 local_irq_restore(flags);
5036 } else {
5037 buf = page_address(page);
5a5dbd18 5038 ap->ops->data_xfer(qc->dev, buf + offset, qc->sect_size, do_write);
91b8b313 5039 }
1da177e4 5040
5a5dbd18
ML
5041 qc->curbytes += qc->sect_size;
5042 qc->cursg_ofs += qc->sect_size;
1da177e4 5043
87260216
JA
5044 if (qc->cursg_ofs == qc->cursg->length) {
5045 qc->cursg = sg_next(qc->cursg);
1da177e4
LT
5046 qc->cursg_ofs = 0;
5047 }
1da177e4 5048}
1da177e4 5049
07f6f7d0 5050/**
5a5dbd18 5051 * ata_pio_sectors - Transfer one or many sectors.
07f6f7d0
AL
5052 * @qc: Command on going
5053 *
5a5dbd18 5054 * Transfer one or many sectors of data from/to the
07f6f7d0
AL
5055 * ATA device for the DRQ request.
5056 *
5057 * LOCKING:
5058 * Inherited from caller.
5059 */
1da177e4 5060
07f6f7d0
AL
5061static void ata_pio_sectors(struct ata_queued_cmd *qc)
5062{
5063 if (is_multi_taskfile(&qc->tf)) {
5064 /* READ/WRITE MULTIPLE */
5065 unsigned int nsect;
5066
587005de 5067 WARN_ON(qc->dev->multi_count == 0);
1da177e4 5068
5a5dbd18 5069 nsect = min((qc->nbytes - qc->curbytes) / qc->sect_size,
726f0785 5070 qc->dev->multi_count);
07f6f7d0
AL
5071 while (nsect--)
5072 ata_pio_sector(qc);
5073 } else
5074 ata_pio_sector(qc);
4cc980b3
AL
5075
5076 ata_altstatus(qc->ap); /* flush */
07f6f7d0
AL
5077}
5078
c71c1857
AL
5079/**
5080 * atapi_send_cdb - Write CDB bytes to hardware
5081 * @ap: Port to which ATAPI device is attached.
5082 * @qc: Taskfile currently active
5083 *
5084 * When device has indicated its readiness to accept
5085 * a CDB, this function is called. Send the CDB.
5086 *
5087 * LOCKING:
5088 * caller.
5089 */
5090
5091static void atapi_send_cdb(struct ata_port *ap, struct ata_queued_cmd *qc)
5092{
5093 /* send SCSI cdb */
5094 DPRINTK("send cdb\n");
db024d53 5095 WARN_ON(qc->dev->cdb_len < 12);
c71c1857 5096
a6b2c5d4 5097 ap->ops->data_xfer(qc->dev, qc->cdb, qc->dev->cdb_len, 1);
c71c1857
AL
5098 ata_altstatus(ap); /* flush */
5099
5100 switch (qc->tf.protocol) {
5101 case ATA_PROT_ATAPI:
5102 ap->hsm_task_state = HSM_ST;
5103 break;
5104 case ATA_PROT_ATAPI_NODATA:
5105 ap->hsm_task_state = HSM_ST_LAST;
5106 break;
5107 case ATA_PROT_ATAPI_DMA:
5108 ap->hsm_task_state = HSM_ST_LAST;
5109 /* initiate bmdma */
5110 ap->ops->bmdma_start(qc);
5111 break;
5112 }
1da177e4
LT
5113}
5114
6ae4cfb5
AL
5115/**
5116 * __atapi_pio_bytes - Transfer data from/to the ATAPI device.
5117 * @qc: Command on going
5118 * @bytes: number of bytes
5119 *
5120 * Transfer Transfer data from/to the ATAPI device.
5121 *
5122 * LOCKING:
5123 * Inherited from caller.
5124 *
5125 */
140b5e59 5126static int __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
1da177e4
LT
5127{
5128 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
1da177e4 5129 struct ata_port *ap = qc->ap;
140b5e59
TH
5130 struct ata_eh_info *ehi = &qc->dev->link->eh_info;
5131 struct scatterlist *sg;
1da177e4
LT
5132 struct page *page;
5133 unsigned char *buf;
5134 unsigned int offset, count;
1da177e4
LT
5135
5136next_sg:
140b5e59
TH
5137 sg = qc->cursg;
5138 if (unlikely(!sg)) {
7fb6ec28 5139 /*
563a6e1f
AL
5140 * The end of qc->sg is reached and the device expects
5141 * more data to transfer. In order not to overrun qc->sg
5142 * and fulfill length specified in the byte count register,
5143 * - for read case, discard trailing data from the device
5144 * - for write case, padding zero data to the device
5145 */
5146 u16 pad_buf[1] = { 0 };
563a6e1f
AL
5147 unsigned int i;
5148
140b5e59
TH
5149 if (bytes > qc->curbytes - qc->nbytes + ATAPI_MAX_DRAIN) {
5150 ata_ehi_push_desc(ehi, "too much trailing data "
5151 "buf=%u cur=%u bytes=%u",
5152 qc->nbytes, qc->curbytes, bytes);
5153 return -1;
5154 }
5155
5156 /* overflow is exptected for misc ATAPI commands */
5157 if (bytes && !atapi_qc_may_overflow(qc))
5158 ata_dev_printk(qc->dev, KERN_WARNING, "ATAPI %u bytes "
5159 "trailing data (cdb=%02x nbytes=%u)\n",
5160 bytes, qc->cdb[0], qc->nbytes);
563a6e1f 5161
140b5e59 5162 for (i = 0; i < (bytes + 1) / 2; i++)
2dcb407e 5163 ap->ops->data_xfer(qc->dev, (unsigned char *)pad_buf, 2, do_write);
563a6e1f 5164
140b5e59 5165 qc->curbytes += bytes;
563a6e1f 5166
140b5e59
TH
5167 return 0;
5168 }
1da177e4 5169
45711f1a 5170 page = sg_page(sg);
1da177e4
LT
5171 offset = sg->offset + qc->cursg_ofs;
5172
5173 /* get the current page and offset */
5174 page = nth_page(page, (offset >> PAGE_SHIFT));
5175 offset %= PAGE_SIZE;
5176
6952df03 5177 /* don't overrun current sg */
32529e01 5178 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
5179
5180 /* don't cross page boundaries */
5181 count = min(count, (unsigned int)PAGE_SIZE - offset);
5182
7282aa4b
AL
5183 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
5184
91b8b313
AL
5185 if (PageHighMem(page)) {
5186 unsigned long flags;
5187
a6b2c5d4 5188 /* FIXME: use bounce buffer */
91b8b313
AL
5189 local_irq_save(flags);
5190 buf = kmap_atomic(page, KM_IRQ0);
083958d3 5191
91b8b313 5192 /* do the actual data transfer */
a6b2c5d4 5193 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
7282aa4b 5194
91b8b313
AL
5195 kunmap_atomic(buf, KM_IRQ0);
5196 local_irq_restore(flags);
5197 } else {
5198 buf = page_address(page);
a6b2c5d4 5199 ap->ops->data_xfer(qc->dev, buf + offset, count, do_write);
91b8b313 5200 }
1da177e4
LT
5201
5202 bytes -= count;
140b5e59
TH
5203 if ((count & 1) && bytes)
5204 bytes--;
1da177e4
LT
5205 qc->curbytes += count;
5206 qc->cursg_ofs += count;
5207
32529e01 5208 if (qc->cursg_ofs == sg->length) {
87260216 5209 qc->cursg = sg_next(qc->cursg);
1da177e4
LT
5210 qc->cursg_ofs = 0;
5211 }
5212
563a6e1f 5213 if (bytes)
1da177e4 5214 goto next_sg;
140b5e59
TH
5215
5216 return 0;
1da177e4
LT
5217}
5218
6ae4cfb5
AL
5219/**
5220 * atapi_pio_bytes - Transfer data from/to the ATAPI device.
5221 * @qc: Command on going
5222 *
5223 * Transfer Transfer data from/to the ATAPI device.
5224 *
5225 * LOCKING:
5226 * Inherited from caller.
6ae4cfb5
AL
5227 */
5228
1da177e4
LT
5229static void atapi_pio_bytes(struct ata_queued_cmd *qc)
5230{
5231 struct ata_port *ap = qc->ap;
5232 struct ata_device *dev = qc->dev;
5233 unsigned int ireason, bc_lo, bc_hi, bytes;
5234 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
5235
eec4c3f3
AL
5236 /* Abuse qc->result_tf for temp storage of intermediate TF
5237 * here to save some kernel stack usage.
5238 * For normal completion, qc->result_tf is not relevant. For
5239 * error, qc->result_tf is later overwritten by ata_qc_complete().
5240 * So, the correctness of qc->result_tf is not affected.
5241 */
5242 ap->ops->tf_read(ap, &qc->result_tf);
5243 ireason = qc->result_tf.nsect;
5244 bc_lo = qc->result_tf.lbam;
5245 bc_hi = qc->result_tf.lbah;
1da177e4
LT
5246 bytes = (bc_hi << 8) | bc_lo;
5247
5248 /* shall be cleared to zero, indicating xfer of data */
5249 if (ireason & (1 << 0))
5250 goto err_out;
5251
5252 /* make sure transfer direction matches expected */
5253 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
5254 if (do_write != i_write)
5255 goto err_out;
5256
44877b4e 5257 VPRINTK("ata%u: xfering %d bytes\n", ap->print_id, bytes);
312f7da2 5258
140b5e59
TH
5259 if (__atapi_pio_bytes(qc, bytes))
5260 goto err_out;
4cc980b3 5261 ata_altstatus(ap); /* flush */
1da177e4
LT
5262
5263 return;
5264
5265err_out:
f15a1daf 5266 ata_dev_printk(dev, KERN_INFO, "ATAPI check failed\n");
11a56d24 5267 qc->err_mask |= AC_ERR_HSM;
14be71f4 5268 ap->hsm_task_state = HSM_ST_ERR;
1da177e4
LT
5269}
5270
5271/**
c234fb00
AL
5272 * ata_hsm_ok_in_wq - Check if the qc can be handled in the workqueue.
5273 * @ap: the target ata_port
5274 * @qc: qc on going
1da177e4 5275 *
c234fb00
AL
5276 * RETURNS:
5277 * 1 if ok in workqueue, 0 otherwise.
1da177e4 5278 */
c234fb00
AL
5279
5280static inline int ata_hsm_ok_in_wq(struct ata_port *ap, struct ata_queued_cmd *qc)
1da177e4 5281{
c234fb00
AL
5282 if (qc->tf.flags & ATA_TFLAG_POLLING)
5283 return 1;
1da177e4 5284
c234fb00
AL
5285 if (ap->hsm_task_state == HSM_ST_FIRST) {
5286 if (qc->tf.protocol == ATA_PROT_PIO &&
5287 (qc->tf.flags & ATA_TFLAG_WRITE))
5288 return 1;
1da177e4 5289
405e66b3 5290 if (ata_is_atapi(qc->tf.protocol) &&
c234fb00
AL
5291 !(qc->dev->flags & ATA_DFLAG_CDB_INTR))
5292 return 1;
fe79e683
AL
5293 }
5294
c234fb00
AL
5295 return 0;
5296}
1da177e4 5297
c17ea20d
TH
5298/**
5299 * ata_hsm_qc_complete - finish a qc running on standard HSM
5300 * @qc: Command to complete
5301 * @in_wq: 1 if called from workqueue, 0 otherwise
5302 *
5303 * Finish @qc which is running on standard HSM.
5304 *
5305 * LOCKING:
cca3974e 5306 * If @in_wq is zero, spin_lock_irqsave(host lock).
c17ea20d
TH
5307 * Otherwise, none on entry and grabs host lock.
5308 */
5309static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
5310{
5311 struct ata_port *ap = qc->ap;
5312 unsigned long flags;
5313
5314 if (ap->ops->error_handler) {
5315 if (in_wq) {
ba6a1308 5316 spin_lock_irqsave(ap->lock, flags);
c17ea20d 5317
cca3974e
JG
5318 /* EH might have kicked in while host lock is
5319 * released.
c17ea20d
TH
5320 */
5321 qc = ata_qc_from_tag(ap, qc->tag);
5322 if (qc) {
5323 if (likely(!(qc->err_mask & AC_ERR_HSM))) {
83625006 5324 ap->ops->irq_on(ap);
c17ea20d
TH
5325 ata_qc_complete(qc);
5326 } else
5327 ata_port_freeze(ap);
5328 }
5329
ba6a1308 5330 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
5331 } else {
5332 if (likely(!(qc->err_mask & AC_ERR_HSM)))
5333 ata_qc_complete(qc);
5334 else
5335 ata_port_freeze(ap);
5336 }
5337 } else {
5338 if (in_wq) {
ba6a1308 5339 spin_lock_irqsave(ap->lock, flags);
83625006 5340 ap->ops->irq_on(ap);
c17ea20d 5341 ata_qc_complete(qc);
ba6a1308 5342 spin_unlock_irqrestore(ap->lock, flags);
c17ea20d
TH
5343 } else
5344 ata_qc_complete(qc);
5345 }
5346}
5347
bb5cb290
AL
5348/**
5349 * ata_hsm_move - move the HSM to the next state.
5350 * @ap: the target ata_port
5351 * @qc: qc on going
5352 * @status: current device status
5353 * @in_wq: 1 if called from workqueue, 0 otherwise
5354 *
5355 * RETURNS:
5356 * 1 when poll next status needed, 0 otherwise.
5357 */
9a1004d0
TH
5358int ata_hsm_move(struct ata_port *ap, struct ata_queued_cmd *qc,
5359 u8 status, int in_wq)
e2cec771 5360{
bb5cb290
AL
5361 unsigned long flags = 0;
5362 int poll_next;
5363
6912ccd5
AL
5364 WARN_ON((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
5365
bb5cb290
AL
5366 /* Make sure ata_qc_issue_prot() does not throw things
5367 * like DMA polling into the workqueue. Notice that
5368 * in_wq is not equivalent to (qc->tf.flags & ATA_TFLAG_POLLING).
5369 */
c234fb00 5370 WARN_ON(in_wq != ata_hsm_ok_in_wq(ap, qc));
bb5cb290 5371
e2cec771 5372fsm_start:
999bb6f4 5373 DPRINTK("ata%u: protocol %d task_state %d (dev_stat 0x%X)\n",
44877b4e 5374 ap->print_id, qc->tf.protocol, ap->hsm_task_state, status);
999bb6f4 5375
e2cec771
AL
5376 switch (ap->hsm_task_state) {
5377 case HSM_ST_FIRST:
bb5cb290
AL
5378 /* Send first data block or PACKET CDB */
5379
5380 /* If polling, we will stay in the work queue after
5381 * sending the data. Otherwise, interrupt handler
5382 * takes over after sending the data.
5383 */
5384 poll_next = (qc->tf.flags & ATA_TFLAG_POLLING);
5385
e2cec771 5386 /* check device status */
3655d1d3
AL
5387 if (unlikely((status & ATA_DRQ) == 0)) {
5388 /* handle BSY=0, DRQ=0 as error */
5389 if (likely(status & (ATA_ERR | ATA_DF)))
5390 /* device stops HSM for abort/error */
5391 qc->err_mask |= AC_ERR_DEV;
5392 else
5393 /* HSM violation. Let EH handle this */
5394 qc->err_mask |= AC_ERR_HSM;
5395
14be71f4 5396 ap->hsm_task_state = HSM_ST_ERR;
e2cec771 5397 goto fsm_start;
1da177e4
LT
5398 }
5399
71601958
AL
5400 /* Device should not ask for data transfer (DRQ=1)
5401 * when it finds something wrong.
eee6c32f
AL
5402 * We ignore DRQ here and stop the HSM by
5403 * changing hsm_task_state to HSM_ST_ERR and
5404 * let the EH abort the command or reset the device.
71601958
AL
5405 */
5406 if (unlikely(status & (ATA_ERR | ATA_DF))) {
2d3b8eea
AL
5407 /* Some ATAPI tape drives forget to clear the ERR bit
5408 * when doing the next command (mostly request sense).
5409 * We ignore ERR here to workaround and proceed sending
5410 * the CDB.
5411 */
5412 if (!(qc->dev->horkage & ATA_HORKAGE_STUCK_ERR)) {
5413 ata_port_printk(ap, KERN_WARNING,
5414 "DRQ=1 with device error, "
5415 "dev_stat 0x%X\n", status);
5416 qc->err_mask |= AC_ERR_HSM;
5417 ap->hsm_task_state = HSM_ST_ERR;
5418 goto fsm_start;
5419 }
71601958 5420 }
1da177e4 5421
bb5cb290
AL
5422 /* Send the CDB (atapi) or the first data block (ata pio out).
5423 * During the state transition, interrupt handler shouldn't
5424 * be invoked before the data transfer is complete and
5425 * hsm_task_state is changed. Hence, the following locking.
5426 */
5427 if (in_wq)
ba6a1308 5428 spin_lock_irqsave(ap->lock, flags);
1da177e4 5429
bb5cb290
AL
5430 if (qc->tf.protocol == ATA_PROT_PIO) {
5431 /* PIO data out protocol.
5432 * send first data block.
5433 */
0565c26d 5434
bb5cb290
AL
5435 /* ata_pio_sectors() might change the state
5436 * to HSM_ST_LAST. so, the state is changed here
5437 * before ata_pio_sectors().
5438 */
5439 ap->hsm_task_state = HSM_ST;
5440 ata_pio_sectors(qc);
bb5cb290
AL
5441 } else
5442 /* send CDB */
5443 atapi_send_cdb(ap, qc);
5444
5445 if (in_wq)
ba6a1308 5446 spin_unlock_irqrestore(ap->lock, flags);
bb5cb290
AL
5447
5448 /* if polling, ata_pio_task() handles the rest.
5449 * otherwise, interrupt handler takes over from here.
5450 */
e2cec771 5451 break;
1c848984 5452
e2cec771
AL
5453 case HSM_ST:
5454 /* complete command or read/write the data register */
5455 if (qc->tf.protocol == ATA_PROT_ATAPI) {
5456 /* ATAPI PIO protocol */
5457 if ((status & ATA_DRQ) == 0) {
3655d1d3
AL
5458 /* No more data to transfer or device error.
5459 * Device error will be tagged in HSM_ST_LAST.
5460 */
e2cec771
AL
5461 ap->hsm_task_state = HSM_ST_LAST;
5462 goto fsm_start;
5463 }
1da177e4 5464
71601958
AL
5465 /* Device should not ask for data transfer (DRQ=1)
5466 * when it finds something wrong.
eee6c32f
AL
5467 * We ignore DRQ here and stop the HSM by
5468 * changing hsm_task_state to HSM_ST_ERR and
5469 * let the EH abort the command or reset the device.
71601958
AL
5470 */
5471 if (unlikely(status & (ATA_ERR | ATA_DF))) {
44877b4e
TH
5472 ata_port_printk(ap, KERN_WARNING, "DRQ=1 with "
5473 "device error, dev_stat 0x%X\n",
5474 status);
3655d1d3 5475 qc->err_mask |= AC_ERR_HSM;
eee6c32f
AL
5476 ap->hsm_task_state = HSM_ST_ERR;
5477 goto fsm_start;
71601958 5478 }
1da177e4 5479
e2cec771 5480 atapi_pio_bytes(qc);
7fb6ec28 5481
e2cec771
AL
5482 if (unlikely(ap->hsm_task_state == HSM_ST_ERR))
5483 /* bad ireason reported by device */
5484 goto fsm_start;
1da177e4 5485
e2cec771
AL
5486 } else {
5487 /* ATA PIO protocol */
5488 if (unlikely((status & ATA_DRQ) == 0)) {
5489 /* handle BSY=0, DRQ=0 as error */
3655d1d3
AL
5490 if (likely(status & (ATA_ERR | ATA_DF)))
5491 /* device stops HSM for abort/error */
5492 qc->err_mask |= AC_ERR_DEV;
5493 else
55a8e2c8
TH
5494 /* HSM violation. Let EH handle this.
5495 * Phantom devices also trigger this
5496 * condition. Mark hint.
5497 */
5498 qc->err_mask |= AC_ERR_HSM |
5499 AC_ERR_NODEV_HINT;
3655d1d3 5500
e2cec771
AL
5501 ap->hsm_task_state = HSM_ST_ERR;
5502 goto fsm_start;
5503 }
1da177e4 5504
eee6c32f
AL
5505 /* For PIO reads, some devices may ask for
5506 * data transfer (DRQ=1) alone with ERR=1.
5507 * We respect DRQ here and transfer one
5508 * block of junk data before changing the
5509 * hsm_task_state to HSM_ST_ERR.
5510 *
5511 * For PIO writes, ERR=1 DRQ=1 doesn't make
5512 * sense since the data block has been
5513 * transferred to the device.
71601958
AL
5514 */
5515 if (unlikely(status & (ATA_ERR | ATA_DF))) {
71601958
AL
5516 /* data might be corrputed */
5517 qc->err_mask |= AC_ERR_DEV;
eee6c32f
AL
5518
5519 if (!(qc->tf.flags & ATA_TFLAG_WRITE)) {
5520 ata_pio_sectors(qc);
eee6c32f
AL
5521 status = ata_wait_idle(ap);
5522 }
5523
3655d1d3
AL
5524 if (status & (ATA_BUSY | ATA_DRQ))
5525 qc->err_mask |= AC_ERR_HSM;
5526
eee6c32f
AL
5527 /* ata_pio_sectors() might change the
5528 * state to HSM_ST_LAST. so, the state
5529 * is changed after ata_pio_sectors().
5530 */
5531 ap->hsm_task_state = HSM_ST_ERR;
5532 goto fsm_start;
71601958
AL
5533 }
5534
e2cec771
AL
5535 ata_pio_sectors(qc);
5536
5537 if (ap->hsm_task_state == HSM_ST_LAST &&
5538 (!(qc->tf.flags & ATA_TFLAG_WRITE))) {
5539 /* all data read */
52a32205 5540 status = ata_wait_idle(ap);
e2cec771
AL
5541 goto fsm_start;
5542 }
5543 }
5544
bb5cb290 5545 poll_next = 1;
1da177e4
LT
5546 break;
5547
14be71f4 5548 case HSM_ST_LAST:
6912ccd5
AL
5549 if (unlikely(!ata_ok(status))) {
5550 qc->err_mask |= __ac_err_mask(status);
e2cec771
AL
5551 ap->hsm_task_state = HSM_ST_ERR;
5552 goto fsm_start;
5553 }
5554
5555 /* no more data to transfer */
4332a771 5556 DPRINTK("ata%u: dev %u command complete, drv_stat 0x%x\n",
44877b4e 5557 ap->print_id, qc->dev->devno, status);
e2cec771 5558
6912ccd5
AL
5559 WARN_ON(qc->err_mask);
5560
e2cec771 5561 ap->hsm_task_state = HSM_ST_IDLE;
1da177e4 5562
e2cec771 5563 /* complete taskfile transaction */
c17ea20d 5564 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5565
5566 poll_next = 0;
1da177e4
LT
5567 break;
5568
14be71f4 5569 case HSM_ST_ERR:
e2cec771
AL
5570 /* make sure qc->err_mask is available to
5571 * know what's wrong and recover
5572 */
5573 WARN_ON(qc->err_mask == 0);
5574
5575 ap->hsm_task_state = HSM_ST_IDLE;
bb5cb290 5576
999bb6f4 5577 /* complete taskfile transaction */
c17ea20d 5578 ata_hsm_qc_complete(qc, in_wq);
bb5cb290
AL
5579
5580 poll_next = 0;
e2cec771
AL
5581 break;
5582 default:
bb5cb290 5583 poll_next = 0;
6912ccd5 5584 BUG();
1da177e4
LT
5585 }
5586
bb5cb290 5587 return poll_next;
1da177e4
LT
5588}
5589
65f27f38 5590static void ata_pio_task(struct work_struct *work)
8061f5f0 5591{
65f27f38
DH
5592 struct ata_port *ap =
5593 container_of(work, struct ata_port, port_task.work);
5594 struct ata_queued_cmd *qc = ap->port_task_data;
8061f5f0 5595 u8 status;
a1af3734 5596 int poll_next;
8061f5f0 5597
7fb6ec28 5598fsm_start:
a1af3734 5599 WARN_ON(ap->hsm_task_state == HSM_ST_IDLE);
8061f5f0 5600
a1af3734
AL
5601 /*
5602 * This is purely heuristic. This is a fast path.
5603 * Sometimes when we enter, BSY will be cleared in
5604 * a chk-status or two. If not, the drive is probably seeking
5605 * or something. Snooze for a couple msecs, then
5606 * chk-status again. If still busy, queue delayed work.
5607 */
5608 status = ata_busy_wait(ap, ATA_BUSY, 5);
5609 if (status & ATA_BUSY) {
5610 msleep(2);
5611 status = ata_busy_wait(ap, ATA_BUSY, 10);
5612 if (status & ATA_BUSY) {
31ce6dae 5613 ata_port_queue_task(ap, ata_pio_task, qc, ATA_SHORT_PAUSE);
a1af3734
AL
5614 return;
5615 }
8061f5f0
TH
5616 }
5617
a1af3734
AL
5618 /* move the HSM */
5619 poll_next = ata_hsm_move(ap, qc, status, 1);
8061f5f0 5620
a1af3734
AL
5621 /* another command or interrupt handler
5622 * may be running at this point.
5623 */
5624 if (poll_next)
7fb6ec28 5625 goto fsm_start;
8061f5f0
TH
5626}
5627
1da177e4
LT
5628/**
5629 * ata_qc_new - Request an available ATA command, for queueing
5630 * @ap: Port associated with device @dev
5631 * @dev: Device from whom we request an available command structure
5632 *
5633 * LOCKING:
0cba632b 5634 * None.
1da177e4
LT
5635 */
5636
5637static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
5638{
5639 struct ata_queued_cmd *qc = NULL;
5640 unsigned int i;
5641
e3180499 5642 /* no command while frozen */
b51e9e5d 5643 if (unlikely(ap->pflags & ATA_PFLAG_FROZEN))
e3180499
TH
5644 return NULL;
5645
2ab7db1f
TH
5646 /* the last tag is reserved for internal command. */
5647 for (i = 0; i < ATA_MAX_QUEUE - 1; i++)
6cec4a39 5648 if (!test_and_set_bit(i, &ap->qc_allocated)) {
f69499f4 5649 qc = __ata_qc_from_tag(ap, i);
1da177e4
LT
5650 break;
5651 }
5652
5653 if (qc)
5654 qc->tag = i;
5655
5656 return qc;
5657}
5658
5659/**
5660 * ata_qc_new_init - Request an available ATA command, and initialize it
1da177e4
LT
5661 * @dev: Device from whom we request an available command structure
5662 *
5663 * LOCKING:
0cba632b 5664 * None.
1da177e4
LT
5665 */
5666
3373efd8 5667struct ata_queued_cmd *ata_qc_new_init(struct ata_device *dev)
1da177e4 5668{
9af5c9c9 5669 struct ata_port *ap = dev->link->ap;
1da177e4
LT
5670 struct ata_queued_cmd *qc;
5671
5672 qc = ata_qc_new(ap);
5673 if (qc) {
1da177e4
LT
5674 qc->scsicmd = NULL;
5675 qc->ap = ap;
5676 qc->dev = dev;
1da177e4 5677
2c13b7ce 5678 ata_qc_reinit(qc);
1da177e4
LT
5679 }
5680
5681 return qc;
5682}
5683
1da177e4
LT
5684/**
5685 * ata_qc_free - free unused ata_queued_cmd
5686 * @qc: Command to complete
5687 *
5688 * Designed to free unused ata_queued_cmd object
5689 * in case something prevents using it.
5690 *
5691 * LOCKING:
cca3974e 5692 * spin_lock_irqsave(host lock)
1da177e4
LT
5693 */
5694void ata_qc_free(struct ata_queued_cmd *qc)
5695{
4ba946e9
TH
5696 struct ata_port *ap = qc->ap;
5697 unsigned int tag;
5698
a4631474 5699 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
1da177e4 5700
4ba946e9
TH
5701 qc->flags = 0;
5702 tag = qc->tag;
5703 if (likely(ata_tag_valid(tag))) {
4ba946e9 5704 qc->tag = ATA_TAG_POISON;
6cec4a39 5705 clear_bit(tag, &ap->qc_allocated);
4ba946e9 5706 }
1da177e4
LT
5707}
5708
76014427 5709void __ata_qc_complete(struct ata_queued_cmd *qc)
1da177e4 5710{
dedaf2b0 5711 struct ata_port *ap = qc->ap;
9af5c9c9 5712 struct ata_link *link = qc->dev->link;
dedaf2b0 5713
a4631474
TH
5714 WARN_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
5715 WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
1da177e4
LT
5716
5717 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
5718 ata_sg_clean(qc);
5719
7401abf2 5720 /* command should be marked inactive atomically with qc completion */
da917d69 5721 if (qc->tf.protocol == ATA_PROT_NCQ) {
9af5c9c9 5722 link->sactive &= ~(1 << qc->tag);
da917d69
TH
5723 if (!link->sactive)
5724 ap->nr_active_links--;
5725 } else {
9af5c9c9 5726 link->active_tag = ATA_TAG_POISON;
da917d69
TH
5727 ap->nr_active_links--;
5728 }
5729
5730 /* clear exclusive status */
5731 if (unlikely(qc->flags & ATA_QCFLAG_CLEAR_EXCL &&
5732 ap->excl_link == link))
5733 ap->excl_link = NULL;
7401abf2 5734
3f3791d3
AL
5735 /* atapi: mark qc as inactive to prevent the interrupt handler
5736 * from completing the command twice later, before the error handler
5737 * is called. (when rc != 0 and atapi request sense is needed)
5738 */
5739 qc->flags &= ~ATA_QCFLAG_ACTIVE;
dedaf2b0 5740 ap->qc_active &= ~(1 << qc->tag);
3f3791d3 5741
1da177e4 5742 /* call completion callback */
77853bf2 5743 qc->complete_fn(qc);
1da177e4
LT
5744}
5745
39599a53
TH
5746static void fill_result_tf(struct ata_queued_cmd *qc)
5747{
5748 struct ata_port *ap = qc->ap;
5749
39599a53 5750 qc->result_tf.flags = qc->tf.flags;
4742d54f 5751 ap->ops->tf_read(ap, &qc->result_tf);
39599a53
TH
5752}
5753
00115e0f
TH
5754static void ata_verify_xfer(struct ata_queued_cmd *qc)
5755{
5756 struct ata_device *dev = qc->dev;
5757
5758 if (ata_tag_internal(qc->tag))
5759 return;
5760
5761 if (ata_is_nodata(qc->tf.protocol))
5762 return;
5763
5764 if ((dev->mwdma_mask || dev->udma_mask) && ata_is_pio(qc->tf.protocol))
5765 return;
5766
5767 dev->flags &= ~ATA_DFLAG_DUBIOUS_XFER;
5768}
5769
f686bcb8
TH
5770/**
5771 * ata_qc_complete - Complete an active ATA command
5772 * @qc: Command to complete
5773 * @err_mask: ATA Status register contents
5774 *
5775 * Indicate to the mid and upper layers that an ATA
5776 * command has completed, with either an ok or not-ok status.
5777 *
5778 * LOCKING:
cca3974e 5779 * spin_lock_irqsave(host lock)
f686bcb8
TH
5780 */
5781void ata_qc_complete(struct ata_queued_cmd *qc)
5782{
5783 struct ata_port *ap = qc->ap;
5784
5785 /* XXX: New EH and old EH use different mechanisms to
5786 * synchronize EH with regular execution path.
5787 *
5788 * In new EH, a failed qc is marked with ATA_QCFLAG_FAILED.
5789 * Normal execution path is responsible for not accessing a
5790 * failed qc. libata core enforces the rule by returning NULL
5791 * from ata_qc_from_tag() for failed qcs.
5792 *
5793 * Old EH depends on ata_qc_complete() nullifying completion
5794 * requests if ATA_QCFLAG_EH_SCHEDULED is set. Old EH does
5795 * not synchronize with interrupt handler. Only PIO task is
5796 * taken care of.
5797 */
5798 if (ap->ops->error_handler) {
4dbfa39b
TH
5799 struct ata_device *dev = qc->dev;
5800 struct ata_eh_info *ehi = &dev->link->eh_info;
5801
b51e9e5d 5802 WARN_ON(ap->pflags & ATA_PFLAG_FROZEN);
f686bcb8
TH
5803
5804 if (unlikely(qc->err_mask))
5805 qc->flags |= ATA_QCFLAG_FAILED;
5806
5807 if (unlikely(qc->flags & ATA_QCFLAG_FAILED)) {
5808 if (!ata_tag_internal(qc->tag)) {
5809 /* always fill result TF for failed qc */
39599a53 5810 fill_result_tf(qc);
f686bcb8
TH
5811 ata_qc_schedule_eh(qc);
5812 return;
5813 }
5814 }
5815
5816 /* read result TF if requested */
5817 if (qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5818 fill_result_tf(qc);
f686bcb8 5819
4dbfa39b
TH
5820 /* Some commands need post-processing after successful
5821 * completion.
5822 */
5823 switch (qc->tf.command) {
5824 case ATA_CMD_SET_FEATURES:
5825 if (qc->tf.feature != SETFEATURES_WC_ON &&
5826 qc->tf.feature != SETFEATURES_WC_OFF)
5827 break;
5828 /* fall through */
5829 case ATA_CMD_INIT_DEV_PARAMS: /* CHS translation changed */
5830 case ATA_CMD_SET_MULTI: /* multi_count changed */
5831 /* revalidate device */
5832 ehi->dev_action[dev->devno] |= ATA_EH_REVALIDATE;
5833 ata_port_schedule_eh(ap);
5834 break;
054a5fba
TH
5835
5836 case ATA_CMD_SLEEP:
5837 dev->flags |= ATA_DFLAG_SLEEPING;
5838 break;
4dbfa39b
TH
5839 }
5840
00115e0f
TH
5841 if (unlikely(dev->flags & ATA_DFLAG_DUBIOUS_XFER))
5842 ata_verify_xfer(qc);
5843
f686bcb8
TH
5844 __ata_qc_complete(qc);
5845 } else {
5846 if (qc->flags & ATA_QCFLAG_EH_SCHEDULED)
5847 return;
5848
5849 /* read result TF if failed or requested */
5850 if (qc->err_mask || qc->flags & ATA_QCFLAG_RESULT_TF)
39599a53 5851 fill_result_tf(qc);
f686bcb8
TH
5852
5853 __ata_qc_complete(qc);
5854 }
5855}
5856
dedaf2b0
TH
5857/**
5858 * ata_qc_complete_multiple - Complete multiple qcs successfully
5859 * @ap: port in question
5860 * @qc_active: new qc_active mask
5861 * @finish_qc: LLDD callback invoked before completing a qc
5862 *
5863 * Complete in-flight commands. This functions is meant to be
5864 * called from low-level driver's interrupt routine to complete
5865 * requests normally. ap->qc_active and @qc_active is compared
5866 * and commands are completed accordingly.
5867 *
5868 * LOCKING:
cca3974e 5869 * spin_lock_irqsave(host lock)
dedaf2b0
TH
5870 *
5871 * RETURNS:
5872 * Number of completed commands on success, -errno otherwise.
5873 */
5874int ata_qc_complete_multiple(struct ata_port *ap, u32 qc_active,
5875 void (*finish_qc)(struct ata_queued_cmd *))
5876{
5877 int nr_done = 0;
5878 u32 done_mask;
5879 int i;
5880
5881 done_mask = ap->qc_active ^ qc_active;
5882
5883 if (unlikely(done_mask & qc_active)) {
5884 ata_port_printk(ap, KERN_ERR, "illegal qc_active transition "
5885 "(%08x->%08x)\n", ap->qc_active, qc_active);
5886 return -EINVAL;
5887 }
5888
5889 for (i = 0; i < ATA_MAX_QUEUE; i++) {
5890 struct ata_queued_cmd *qc;
5891
5892 if (!(done_mask & (1 << i)))
5893 continue;
5894
5895 if ((qc = ata_qc_from_tag(ap, i))) {
5896 if (finish_qc)
5897 finish_qc(qc);
5898 ata_qc_complete(qc);
5899 nr_done++;
5900 }
5901 }
5902
5903 return nr_done;
5904}
5905
1da177e4
LT
5906/**
5907 * ata_qc_issue - issue taskfile to device
5908 * @qc: command to issue to device
5909 *
5910 * Prepare an ATA command to submission to device.
5911 * This includes mapping the data into a DMA-able
5912 * area, filling in the S/G table, and finally
5913 * writing the taskfile to hardware, starting the command.
5914 *
5915 * LOCKING:
cca3974e 5916 * spin_lock_irqsave(host lock)
1da177e4 5917 */
8e0e694a 5918void ata_qc_issue(struct ata_queued_cmd *qc)
1da177e4
LT
5919{
5920 struct ata_port *ap = qc->ap;
9af5c9c9 5921 struct ata_link *link = qc->dev->link;
405e66b3 5922 u8 prot = qc->tf.protocol;
1da177e4 5923
dedaf2b0
TH
5924 /* Make sure only one non-NCQ command is outstanding. The
5925 * check is skipped for old EH because it reuses active qc to
5926 * request ATAPI sense.
5927 */
9af5c9c9 5928 WARN_ON(ap->ops->error_handler && ata_tag_valid(link->active_tag));
dedaf2b0 5929
405e66b3 5930 if (prot == ATA_PROT_NCQ) {
9af5c9c9 5931 WARN_ON(link->sactive & (1 << qc->tag));
da917d69
TH
5932
5933 if (!link->sactive)
5934 ap->nr_active_links++;
9af5c9c9 5935 link->sactive |= 1 << qc->tag;
dedaf2b0 5936 } else {
9af5c9c9 5937 WARN_ON(link->sactive);
da917d69
TH
5938
5939 ap->nr_active_links++;
9af5c9c9 5940 link->active_tag = qc->tag;
dedaf2b0
TH
5941 }
5942
e4a70e76 5943 qc->flags |= ATA_QCFLAG_ACTIVE;
dedaf2b0 5944 ap->qc_active |= 1 << qc->tag;
e4a70e76 5945
405e66b3
TH
5946 if (ata_is_dma(prot) || (ata_is_pio(prot) &&
5947 (ap->flags & ATA_FLAG_PIO_DMA))) {
1da177e4
LT
5948 if (qc->flags & ATA_QCFLAG_SG) {
5949 if (ata_sg_setup(qc))
8e436af9 5950 goto sg_err;
1da177e4
LT
5951 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
5952 if (ata_sg_setup_one(qc))
8e436af9 5953 goto sg_err;
1da177e4
LT
5954 }
5955 } else {
5956 qc->flags &= ~ATA_QCFLAG_DMAMAP;
5957 }
5958
054a5fba
TH
5959 /* if device is sleeping, schedule softreset and abort the link */
5960 if (unlikely(qc->dev->flags & ATA_DFLAG_SLEEPING)) {
5961 link->eh_info.action |= ATA_EH_SOFTRESET;
5962 ata_ehi_push_desc(&link->eh_info, "waking up from sleep");
5963 ata_link_abort(link);
5964 return;
5965 }
5966
1da177e4
LT
5967 ap->ops->qc_prep(qc);
5968
8e0e694a
TH
5969 qc->err_mask |= ap->ops->qc_issue(qc);
5970 if (unlikely(qc->err_mask))
5971 goto err;
5972 return;
1da177e4 5973
8e436af9
TH
5974sg_err:
5975 qc->flags &= ~ATA_QCFLAG_DMAMAP;
8e0e694a
TH
5976 qc->err_mask |= AC_ERR_SYSTEM;
5977err:
5978 ata_qc_complete(qc);
1da177e4
LT
5979}
5980
5981/**
5982 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
5983 * @qc: command to issue to device
5984 *
5985 * Using various libata functions and hooks, this function
5986 * starts an ATA command. ATA commands are grouped into
5987 * classes called "protocols", and issuing each type of protocol
5988 * is slightly different.
5989 *
0baab86b
EF
5990 * May be used as the qc_issue() entry in ata_port_operations.
5991 *
1da177e4 5992 * LOCKING:
cca3974e 5993 * spin_lock_irqsave(host lock)
1da177e4
LT
5994 *
5995 * RETURNS:
9a3d9eb0 5996 * Zero on success, AC_ERR_* mask on failure
1da177e4
LT
5997 */
5998
9a3d9eb0 5999unsigned int ata_qc_issue_prot(struct ata_queued_cmd *qc)
1da177e4
LT
6000{
6001 struct ata_port *ap = qc->ap;
6002
e50362ec
AL
6003 /* Use polling pio if the LLD doesn't handle
6004 * interrupt driven pio and atapi CDB interrupt.
6005 */
6006 if (ap->flags & ATA_FLAG_PIO_POLLING) {
6007 switch (qc->tf.protocol) {
6008 case ATA_PROT_PIO:
e3472cbe 6009 case ATA_PROT_NODATA:
e50362ec
AL
6010 case ATA_PROT_ATAPI:
6011 case ATA_PROT_ATAPI_NODATA:
6012 qc->tf.flags |= ATA_TFLAG_POLLING;
6013 break;
6014 case ATA_PROT_ATAPI_DMA:
6015 if (qc->dev->flags & ATA_DFLAG_CDB_INTR)
3a778275 6016 /* see ata_dma_blacklisted() */
e50362ec
AL
6017 BUG();
6018 break;
6019 default:
6020 break;
6021 }
6022 }
6023
312f7da2 6024 /* select the device */
1da177e4
LT
6025 ata_dev_select(ap, qc->dev->devno, 1, 0);
6026
312f7da2 6027 /* start the command */
1da177e4
LT
6028 switch (qc->tf.protocol) {
6029 case ATA_PROT_NODATA:
312f7da2
AL
6030 if (qc->tf.flags & ATA_TFLAG_POLLING)
6031 ata_qc_set_polling(qc);
6032
e5338254 6033 ata_tf_to_host(ap, &qc->tf);
312f7da2
AL
6034 ap->hsm_task_state = HSM_ST_LAST;
6035
6036 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 6037 ata_port_queue_task(ap, ata_pio_task, qc, 0);
312f7da2 6038
1da177e4
LT
6039 break;
6040
6041 case ATA_PROT_DMA:
587005de 6042 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 6043
1da177e4
LT
6044 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
6045 ap->ops->bmdma_setup(qc); /* set up bmdma */
6046 ap->ops->bmdma_start(qc); /* initiate bmdma */
312f7da2 6047 ap->hsm_task_state = HSM_ST_LAST;
1da177e4
LT
6048 break;
6049
312f7da2
AL
6050 case ATA_PROT_PIO:
6051 if (qc->tf.flags & ATA_TFLAG_POLLING)
6052 ata_qc_set_polling(qc);
1da177e4 6053
e5338254 6054 ata_tf_to_host(ap, &qc->tf);
312f7da2 6055
54f00389
AL
6056 if (qc->tf.flags & ATA_TFLAG_WRITE) {
6057 /* PIO data out protocol */
6058 ap->hsm_task_state = HSM_ST_FIRST;
31ce6dae 6059 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
6060
6061 /* always send first data block using
e27486db 6062 * the ata_pio_task() codepath.
54f00389 6063 */
312f7da2 6064 } else {
54f00389
AL
6065 /* PIO data in protocol */
6066 ap->hsm_task_state = HSM_ST;
6067
6068 if (qc->tf.flags & ATA_TFLAG_POLLING)
31ce6dae 6069 ata_port_queue_task(ap, ata_pio_task, qc, 0);
54f00389
AL
6070
6071 /* if polling, ata_pio_task() handles the rest.
6072 * otherwise, interrupt handler takes over from here.
6073 */
312f7da2
AL
6074 }
6075
1da177e4
LT
6076 break;
6077
1da177e4 6078 case ATA_PROT_ATAPI:
1da177e4 6079 case ATA_PROT_ATAPI_NODATA:
312f7da2
AL
6080 if (qc->tf.flags & ATA_TFLAG_POLLING)
6081 ata_qc_set_polling(qc);
6082
e5338254 6083 ata_tf_to_host(ap, &qc->tf);
f6ef65e6 6084
312f7da2
AL
6085 ap->hsm_task_state = HSM_ST_FIRST;
6086
6087 /* send cdb by polling if no cdb interrupt */
6088 if ((!(qc->dev->flags & ATA_DFLAG_CDB_INTR)) ||
6089 (qc->tf.flags & ATA_TFLAG_POLLING))
31ce6dae 6090 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
6091 break;
6092
6093 case ATA_PROT_ATAPI_DMA:
587005de 6094 WARN_ON(qc->tf.flags & ATA_TFLAG_POLLING);
312f7da2 6095
1da177e4
LT
6096 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
6097 ap->ops->bmdma_setup(qc); /* set up bmdma */
312f7da2
AL
6098 ap->hsm_task_state = HSM_ST_FIRST;
6099
6100 /* send cdb by polling if no cdb interrupt */
6101 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
31ce6dae 6102 ata_port_queue_task(ap, ata_pio_task, qc, 0);
1da177e4
LT
6103 break;
6104
6105 default:
6106 WARN_ON(1);
9a3d9eb0 6107 return AC_ERR_SYSTEM;
1da177e4
LT
6108 }
6109
6110 return 0;
6111}
6112
1da177e4
LT
6113/**
6114 * ata_host_intr - Handle host interrupt for given (port, task)
6115 * @ap: Port on which interrupt arrived (possibly...)
6116 * @qc: Taskfile currently active in engine
6117 *
6118 * Handle host interrupt for given queued command. Currently,
6119 * only DMA interrupts are handled. All other commands are
6120 * handled via polling with interrupts disabled (nIEN bit).
6121 *
6122 * LOCKING:
cca3974e 6123 * spin_lock_irqsave(host lock)
1da177e4
LT
6124 *
6125 * RETURNS:
6126 * One if interrupt was handled, zero if not (shared irq).
6127 */
6128
2dcb407e
JG
6129inline unsigned int ata_host_intr(struct ata_port *ap,
6130 struct ata_queued_cmd *qc)
1da177e4 6131{
9af5c9c9 6132 struct ata_eh_info *ehi = &ap->link.eh_info;
312f7da2 6133 u8 status, host_stat = 0;
1da177e4 6134
312f7da2 6135 VPRINTK("ata%u: protocol %d task_state %d\n",
44877b4e 6136 ap->print_id, qc->tf.protocol, ap->hsm_task_state);
1da177e4 6137
312f7da2
AL
6138 /* Check whether we are expecting interrupt in this state */
6139 switch (ap->hsm_task_state) {
6140 case HSM_ST_FIRST:
6912ccd5
AL
6141 /* Some pre-ATAPI-4 devices assert INTRQ
6142 * at this state when ready to receive CDB.
6143 */
1da177e4 6144
312f7da2 6145 /* Check the ATA_DFLAG_CDB_INTR flag is enough here.
405e66b3
TH
6146 * The flag was turned on only for atapi devices. No
6147 * need to check ata_is_atapi(qc->tf.protocol) again.
312f7da2
AL
6148 */
6149 if (!(qc->dev->flags & ATA_DFLAG_CDB_INTR))
1da177e4 6150 goto idle_irq;
1da177e4 6151 break;
312f7da2
AL
6152 case HSM_ST_LAST:
6153 if (qc->tf.protocol == ATA_PROT_DMA ||
6154 qc->tf.protocol == ATA_PROT_ATAPI_DMA) {
6155 /* check status of DMA engine */
6156 host_stat = ap->ops->bmdma_status(ap);
44877b4e
TH
6157 VPRINTK("ata%u: host_stat 0x%X\n",
6158 ap->print_id, host_stat);
312f7da2
AL
6159
6160 /* if it's not our irq... */
6161 if (!(host_stat & ATA_DMA_INTR))
6162 goto idle_irq;
6163
6164 /* before we do anything else, clear DMA-Start bit */
6165 ap->ops->bmdma_stop(qc);
a4f16610
AL
6166
6167 if (unlikely(host_stat & ATA_DMA_ERR)) {
6168 /* error when transfering data to/from memory */
6169 qc->err_mask |= AC_ERR_HOST_BUS;
6170 ap->hsm_task_state = HSM_ST_ERR;
6171 }
312f7da2
AL
6172 }
6173 break;
6174 case HSM_ST:
6175 break;
1da177e4
LT
6176 default:
6177 goto idle_irq;
6178 }
6179
312f7da2
AL
6180 /* check altstatus */
6181 status = ata_altstatus(ap);
6182 if (status & ATA_BUSY)
6183 goto idle_irq;
1da177e4 6184
312f7da2
AL
6185 /* check main status, clearing INTRQ */
6186 status = ata_chk_status(ap);
6187 if (unlikely(status & ATA_BUSY))
6188 goto idle_irq;
1da177e4 6189
312f7da2
AL
6190 /* ack bmdma irq events */
6191 ap->ops->irq_clear(ap);
1da177e4 6192
bb5cb290 6193 ata_hsm_move(ap, qc, status, 0);
ea54763f
TH
6194
6195 if (unlikely(qc->err_mask) && (qc->tf.protocol == ATA_PROT_DMA ||
6196 qc->tf.protocol == ATA_PROT_ATAPI_DMA))
6197 ata_ehi_push_desc(ehi, "BMDMA stat 0x%x", host_stat);
6198
1da177e4
LT
6199 return 1; /* irq handled */
6200
6201idle_irq:
6202 ap->stats.idle_irq++;
6203
6204#ifdef ATA_IRQ_TRAP
6205 if ((ap->stats.idle_irq % 1000) == 0) {
6d32d30f
JG
6206 ata_chk_status(ap);
6207 ap->ops->irq_clear(ap);
f15a1daf 6208 ata_port_printk(ap, KERN_WARNING, "irq trap\n");
23cfce89 6209 return 1;
1da177e4
LT
6210 }
6211#endif
6212 return 0; /* irq not handled */
6213}
6214
6215/**
6216 * ata_interrupt - Default ATA host interrupt handler
0cba632b 6217 * @irq: irq line (unused)
cca3974e 6218 * @dev_instance: pointer to our ata_host information structure
1da177e4 6219 *
0cba632b
JG
6220 * Default interrupt handler for PCI IDE devices. Calls
6221 * ata_host_intr() for each port that is not disabled.
6222 *
1da177e4 6223 * LOCKING:
cca3974e 6224 * Obtains host lock during operation.
1da177e4
LT
6225 *
6226 * RETURNS:
0cba632b 6227 * IRQ_NONE or IRQ_HANDLED.
1da177e4
LT
6228 */
6229
2dcb407e 6230irqreturn_t ata_interrupt(int irq, void *dev_instance)
1da177e4 6231{
cca3974e 6232 struct ata_host *host = dev_instance;
1da177e4
LT
6233 unsigned int i;
6234 unsigned int handled = 0;
6235 unsigned long flags;
6236
6237 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
cca3974e 6238 spin_lock_irqsave(&host->lock, flags);
1da177e4 6239
cca3974e 6240 for (i = 0; i < host->n_ports; i++) {
1da177e4
LT
6241 struct ata_port *ap;
6242
cca3974e 6243 ap = host->ports[i];
c1389503 6244 if (ap &&
029f5468 6245 !(ap->flags & ATA_FLAG_DISABLED)) {
1da177e4
LT
6246 struct ata_queued_cmd *qc;
6247
9af5c9c9 6248 qc = ata_qc_from_tag(ap, ap->link.active_tag);
312f7da2 6249 if (qc && (!(qc->tf.flags & ATA_TFLAG_POLLING)) &&
21b1ed74 6250 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
6251 handled |= ata_host_intr(ap, qc);
6252 }
6253 }
6254
cca3974e 6255 spin_unlock_irqrestore(&host->lock, flags);
1da177e4
LT
6256
6257 return IRQ_RETVAL(handled);
6258}
6259
34bf2170
TH
6260/**
6261 * sata_scr_valid - test whether SCRs are accessible
936fd732 6262 * @link: ATA link to test SCR accessibility for
34bf2170 6263 *
936fd732 6264 * Test whether SCRs are accessible for @link.
34bf2170
TH
6265 *
6266 * LOCKING:
6267 * None.
6268 *
6269 * RETURNS:
6270 * 1 if SCRs are accessible, 0 otherwise.
6271 */
936fd732 6272int sata_scr_valid(struct ata_link *link)
34bf2170 6273{
936fd732
TH
6274 struct ata_port *ap = link->ap;
6275
a16abc0b 6276 return (ap->flags & ATA_FLAG_SATA) && ap->ops->scr_read;
34bf2170
TH
6277}
6278
6279/**
6280 * sata_scr_read - read SCR register of the specified port
936fd732 6281 * @link: ATA link to read SCR for
34bf2170
TH
6282 * @reg: SCR to read
6283 * @val: Place to store read value
6284 *
936fd732 6285 * Read SCR register @reg of @link into *@val. This function is
633273a3
TH
6286 * guaranteed to succeed if @link is ap->link, the cable type of
6287 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
6288 *
6289 * LOCKING:
633273a3 6290 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6291 *
6292 * RETURNS:
6293 * 0 on success, negative errno on failure.
6294 */
936fd732 6295int sata_scr_read(struct ata_link *link, int reg, u32 *val)
34bf2170 6296{
633273a3
TH
6297 if (ata_is_host_link(link)) {
6298 struct ata_port *ap = link->ap;
936fd732 6299
633273a3
TH
6300 if (sata_scr_valid(link))
6301 return ap->ops->scr_read(ap, reg, val);
6302 return -EOPNOTSUPP;
6303 }
6304
6305 return sata_pmp_scr_read(link, reg, val);
34bf2170
TH
6306}
6307
6308/**
6309 * sata_scr_write - write SCR register of the specified port
936fd732 6310 * @link: ATA link to write SCR for
34bf2170
TH
6311 * @reg: SCR to write
6312 * @val: value to write
6313 *
936fd732 6314 * Write @val to SCR register @reg of @link. This function is
633273a3
TH
6315 * guaranteed to succeed if @link is ap->link, the cable type of
6316 * the port is SATA and the port implements ->scr_read.
34bf2170
TH
6317 *
6318 * LOCKING:
633273a3 6319 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6320 *
6321 * RETURNS:
6322 * 0 on success, negative errno on failure.
6323 */
936fd732 6324int sata_scr_write(struct ata_link *link, int reg, u32 val)
34bf2170 6325{
633273a3
TH
6326 if (ata_is_host_link(link)) {
6327 struct ata_port *ap = link->ap;
6328
6329 if (sata_scr_valid(link))
6330 return ap->ops->scr_write(ap, reg, val);
6331 return -EOPNOTSUPP;
6332 }
936fd732 6333
633273a3 6334 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
6335}
6336
6337/**
6338 * sata_scr_write_flush - write SCR register of the specified port and flush
936fd732 6339 * @link: ATA link to write SCR for
34bf2170
TH
6340 * @reg: SCR to write
6341 * @val: value to write
6342 *
6343 * This function is identical to sata_scr_write() except that this
6344 * function performs flush after writing to the register.
6345 *
6346 * LOCKING:
633273a3 6347 * None if @link is ap->link. Kernel thread context otherwise.
34bf2170
TH
6348 *
6349 * RETURNS:
6350 * 0 on success, negative errno on failure.
6351 */
936fd732 6352int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
34bf2170 6353{
633273a3
TH
6354 if (ata_is_host_link(link)) {
6355 struct ata_port *ap = link->ap;
6356 int rc;
da3dbb17 6357
633273a3
TH
6358 if (sata_scr_valid(link)) {
6359 rc = ap->ops->scr_write(ap, reg, val);
6360 if (rc == 0)
6361 rc = ap->ops->scr_read(ap, reg, &val);
6362 return rc;
6363 }
6364 return -EOPNOTSUPP;
34bf2170 6365 }
633273a3
TH
6366
6367 return sata_pmp_scr_write(link, reg, val);
34bf2170
TH
6368}
6369
6370/**
936fd732
TH
6371 * ata_link_online - test whether the given link is online
6372 * @link: ATA link to test
34bf2170 6373 *
936fd732
TH
6374 * Test whether @link is online. Note that this function returns
6375 * 0 if online status of @link cannot be obtained, so
6376 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
6377 *
6378 * LOCKING:
6379 * None.
6380 *
6381 * RETURNS:
6382 * 1 if the port online status is available and online.
6383 */
936fd732 6384int ata_link_online(struct ata_link *link)
34bf2170
TH
6385{
6386 u32 sstatus;
6387
936fd732
TH
6388 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6389 (sstatus & 0xf) == 0x3)
34bf2170
TH
6390 return 1;
6391 return 0;
6392}
6393
6394/**
936fd732
TH
6395 * ata_link_offline - test whether the given link is offline
6396 * @link: ATA link to test
34bf2170 6397 *
936fd732
TH
6398 * Test whether @link is offline. Note that this function
6399 * returns 0 if offline status of @link cannot be obtained, so
6400 * ata_link_online(link) != !ata_link_offline(link).
34bf2170
TH
6401 *
6402 * LOCKING:
6403 * None.
6404 *
6405 * RETURNS:
6406 * 1 if the port offline status is available and offline.
6407 */
936fd732 6408int ata_link_offline(struct ata_link *link)
34bf2170
TH
6409{
6410 u32 sstatus;
6411
936fd732
TH
6412 if (sata_scr_read(link, SCR_STATUS, &sstatus) == 0 &&
6413 (sstatus & 0xf) != 0x3)
34bf2170
TH
6414 return 1;
6415 return 0;
6416}
0baab86b 6417
77b08fb5 6418int ata_flush_cache(struct ata_device *dev)
9b847548 6419{
977e6b9f 6420 unsigned int err_mask;
9b847548
JA
6421 u8 cmd;
6422
6423 if (!ata_try_flush_cache(dev))
6424 return 0;
6425
6fc49adb 6426 if (dev->flags & ATA_DFLAG_FLUSH_EXT)
9b847548
JA
6427 cmd = ATA_CMD_FLUSH_EXT;
6428 else
6429 cmd = ATA_CMD_FLUSH;
6430
4f34337b
AC
6431 /* This is wrong. On a failed flush we get back the LBA of the lost
6432 sector and we should (assuming it wasn't aborted as unknown) issue
2dcb407e 6433 a further flush command to continue the writeback until it
4f34337b 6434 does not error */
977e6b9f
TH
6435 err_mask = ata_do_simple_cmd(dev, cmd);
6436 if (err_mask) {
6437 ata_dev_printk(dev, KERN_ERR, "failed to flush cache\n");
6438 return -EIO;
6439 }
6440
6441 return 0;
9b847548
JA
6442}
6443
6ffa01d8 6444#ifdef CONFIG_PM
cca3974e
JG
6445static int ata_host_request_pm(struct ata_host *host, pm_message_t mesg,
6446 unsigned int action, unsigned int ehi_flags,
6447 int wait)
500530f6
TH
6448{
6449 unsigned long flags;
6450 int i, rc;
6451
cca3974e
JG
6452 for (i = 0; i < host->n_ports; i++) {
6453 struct ata_port *ap = host->ports[i];
e3667ebf 6454 struct ata_link *link;
500530f6
TH
6455
6456 /* Previous resume operation might still be in
6457 * progress. Wait for PM_PENDING to clear.
6458 */
6459 if (ap->pflags & ATA_PFLAG_PM_PENDING) {
6460 ata_port_wait_eh(ap);
6461 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6462 }
6463
6464 /* request PM ops to EH */
6465 spin_lock_irqsave(ap->lock, flags);
6466
6467 ap->pm_mesg = mesg;
6468 if (wait) {
6469 rc = 0;
6470 ap->pm_result = &rc;
6471 }
6472
6473 ap->pflags |= ATA_PFLAG_PM_PENDING;
e3667ebf
TH
6474 __ata_port_for_each_link(link, ap) {
6475 link->eh_info.action |= action;
6476 link->eh_info.flags |= ehi_flags;
6477 }
500530f6
TH
6478
6479 ata_port_schedule_eh(ap);
6480
6481 spin_unlock_irqrestore(ap->lock, flags);
6482
6483 /* wait and check result */
6484 if (wait) {
6485 ata_port_wait_eh(ap);
6486 WARN_ON(ap->pflags & ATA_PFLAG_PM_PENDING);
6487 if (rc)
6488 return rc;
6489 }
6490 }
6491
6492 return 0;
6493}
6494
6495/**
cca3974e
JG
6496 * ata_host_suspend - suspend host
6497 * @host: host to suspend
500530f6
TH
6498 * @mesg: PM message
6499 *
cca3974e 6500 * Suspend @host. Actual operation is performed by EH. This
500530f6
TH
6501 * function requests EH to perform PM operations and waits for EH
6502 * to finish.
6503 *
6504 * LOCKING:
6505 * Kernel thread context (may sleep).
6506 *
6507 * RETURNS:
6508 * 0 on success, -errno on failure.
6509 */
cca3974e 6510int ata_host_suspend(struct ata_host *host, pm_message_t mesg)
500530f6 6511{
9666f400 6512 int rc;
500530f6 6513
ca77329f
KCA
6514 /*
6515 * disable link pm on all ports before requesting
6516 * any pm activity
6517 */
6518 ata_lpm_enable(host);
6519
cca3974e 6520 rc = ata_host_request_pm(host, mesg, 0, ATA_EHI_QUIET, 1);
9666f400
TH
6521 if (rc == 0)
6522 host->dev->power.power_state = mesg;
500530f6
TH
6523 return rc;
6524}
6525
6526/**
cca3974e
JG
6527 * ata_host_resume - resume host
6528 * @host: host to resume
500530f6 6529 *
cca3974e 6530 * Resume @host. Actual operation is performed by EH. This
500530f6
TH
6531 * function requests EH to perform PM operations and returns.
6532 * Note that all resume operations are performed parallely.
6533 *
6534 * LOCKING:
6535 * Kernel thread context (may sleep).
6536 */
cca3974e 6537void ata_host_resume(struct ata_host *host)
500530f6 6538{
cca3974e
JG
6539 ata_host_request_pm(host, PMSG_ON, ATA_EH_SOFTRESET,
6540 ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET, 0);
6541 host->dev->power.power_state = PMSG_ON;
ca77329f
KCA
6542
6543 /* reenable link pm */
6544 ata_lpm_disable(host);
500530f6 6545}
6ffa01d8 6546#endif
500530f6 6547
c893a3ae
RD
6548/**
6549 * ata_port_start - Set port up for dma.
6550 * @ap: Port to initialize
6551 *
6552 * Called just after data structures for each port are
6553 * initialized. Allocates space for PRD table.
6554 *
6555 * May be used as the port_start() entry in ata_port_operations.
6556 *
6557 * LOCKING:
6558 * Inherited from caller.
6559 */
f0d36efd 6560int ata_port_start(struct ata_port *ap)
1da177e4 6561{
2f1f610b 6562 struct device *dev = ap->dev;
6037d6bb 6563 int rc;
1da177e4 6564
f0d36efd
TH
6565 ap->prd = dmam_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma,
6566 GFP_KERNEL);
1da177e4
LT
6567 if (!ap->prd)
6568 return -ENOMEM;
6569
6037d6bb 6570 rc = ata_pad_alloc(ap, dev);
f0d36efd 6571 if (rc)
6037d6bb 6572 return rc;
1da177e4 6573
f0d36efd
TH
6574 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd,
6575 (unsigned long long)ap->prd_dma);
1da177e4
LT
6576 return 0;
6577}
6578
3ef3b43d
TH
6579/**
6580 * ata_dev_init - Initialize an ata_device structure
6581 * @dev: Device structure to initialize
6582 *
6583 * Initialize @dev in preparation for probing.
6584 *
6585 * LOCKING:
6586 * Inherited from caller.
6587 */
6588void ata_dev_init(struct ata_device *dev)
6589{
9af5c9c9
TH
6590 struct ata_link *link = dev->link;
6591 struct ata_port *ap = link->ap;
72fa4b74
TH
6592 unsigned long flags;
6593
5a04bf4b 6594 /* SATA spd limit is bound to the first device */
9af5c9c9
TH
6595 link->sata_spd_limit = link->hw_sata_spd_limit;
6596 link->sata_spd = 0;
5a04bf4b 6597
72fa4b74
TH
6598 /* High bits of dev->flags are used to record warm plug
6599 * requests which occur asynchronously. Synchronize using
cca3974e 6600 * host lock.
72fa4b74 6601 */
ba6a1308 6602 spin_lock_irqsave(ap->lock, flags);
72fa4b74 6603 dev->flags &= ~ATA_DFLAG_INIT_MASK;
3dcc323f 6604 dev->horkage = 0;
ba6a1308 6605 spin_unlock_irqrestore(ap->lock, flags);
3ef3b43d 6606
72fa4b74
TH
6607 memset((void *)dev + ATA_DEVICE_CLEAR_OFFSET, 0,
6608 sizeof(*dev) - ATA_DEVICE_CLEAR_OFFSET);
3ef3b43d
TH
6609 dev->pio_mask = UINT_MAX;
6610 dev->mwdma_mask = UINT_MAX;
6611 dev->udma_mask = UINT_MAX;
6612}
6613
4fb37a25
TH
6614/**
6615 * ata_link_init - Initialize an ata_link structure
6616 * @ap: ATA port link is attached to
6617 * @link: Link structure to initialize
8989805d 6618 * @pmp: Port multiplier port number
4fb37a25
TH
6619 *
6620 * Initialize @link.
6621 *
6622 * LOCKING:
6623 * Kernel thread context (may sleep)
6624 */
fb7fd614 6625void ata_link_init(struct ata_port *ap, struct ata_link *link, int pmp)
4fb37a25
TH
6626{
6627 int i;
6628
6629 /* clear everything except for devices */
6630 memset(link, 0, offsetof(struct ata_link, device[0]));
6631
6632 link->ap = ap;
8989805d 6633 link->pmp = pmp;
4fb37a25
TH
6634 link->active_tag = ATA_TAG_POISON;
6635 link->hw_sata_spd_limit = UINT_MAX;
6636
6637 /* can't use iterator, ap isn't initialized yet */
6638 for (i = 0; i < ATA_MAX_DEVICES; i++) {
6639 struct ata_device *dev = &link->device[i];
6640
6641 dev->link = link;
6642 dev->devno = dev - link->device;
6643 ata_dev_init(dev);
6644 }
6645}
6646
6647/**
6648 * sata_link_init_spd - Initialize link->sata_spd_limit
6649 * @link: Link to configure sata_spd_limit for
6650 *
6651 * Initialize @link->[hw_]sata_spd_limit to the currently
6652 * configured value.
6653 *
6654 * LOCKING:
6655 * Kernel thread context (may sleep).
6656 *
6657 * RETURNS:
6658 * 0 on success, -errno on failure.
6659 */
fb7fd614 6660int sata_link_init_spd(struct ata_link *link)
4fb37a25
TH
6661{
6662 u32 scontrol, spd;
6663 int rc;
6664
6665 rc = sata_scr_read(link, SCR_CONTROL, &scontrol);
6666 if (rc)
6667 return rc;
6668
6669 spd = (scontrol >> 4) & 0xf;
6670 if (spd)
6671 link->hw_sata_spd_limit &= (1 << spd) - 1;
6672
6673 link->sata_spd_limit = link->hw_sata_spd_limit;
6674
6675 return 0;
6676}
6677
1da177e4 6678/**
f3187195
TH
6679 * ata_port_alloc - allocate and initialize basic ATA port resources
6680 * @host: ATA host this allocated port belongs to
1da177e4 6681 *
f3187195
TH
6682 * Allocate and initialize basic ATA port resources.
6683 *
6684 * RETURNS:
6685 * Allocate ATA port on success, NULL on failure.
0cba632b 6686 *
1da177e4 6687 * LOCKING:
f3187195 6688 * Inherited from calling layer (may sleep).
1da177e4 6689 */
f3187195 6690struct ata_port *ata_port_alloc(struct ata_host *host)
1da177e4 6691{
f3187195 6692 struct ata_port *ap;
1da177e4 6693
f3187195
TH
6694 DPRINTK("ENTER\n");
6695
6696 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
6697 if (!ap)
6698 return NULL;
6699
f4d6d004 6700 ap->pflags |= ATA_PFLAG_INITIALIZING;
cca3974e 6701 ap->lock = &host->lock;
198e0fed 6702 ap->flags = ATA_FLAG_DISABLED;
f3187195 6703 ap->print_id = -1;
1da177e4 6704 ap->ctl = ATA_DEVCTL_OBS;
cca3974e 6705 ap->host = host;
f3187195 6706 ap->dev = host->dev;
1da177e4 6707 ap->last_ctl = 0xFF;
bd5d825c
BP
6708
6709#if defined(ATA_VERBOSE_DEBUG)
6710 /* turn on all debugging levels */
6711 ap->msg_enable = 0x00FF;
6712#elif defined(ATA_DEBUG)
6713 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_INFO | ATA_MSG_CTL | ATA_MSG_WARN | ATA_MSG_ERR;
88574551 6714#else
0dd4b21f 6715 ap->msg_enable = ATA_MSG_DRV | ATA_MSG_ERR | ATA_MSG_WARN;
bd5d825c 6716#endif
1da177e4 6717
65f27f38
DH
6718 INIT_DELAYED_WORK(&ap->port_task, NULL);
6719 INIT_DELAYED_WORK(&ap->hotplug_task, ata_scsi_hotplug);
6720 INIT_WORK(&ap->scsi_rescan_task, ata_scsi_dev_rescan);
a72ec4ce 6721 INIT_LIST_HEAD(&ap->eh_done_q);
c6cf9e99 6722 init_waitqueue_head(&ap->eh_wait_q);
5ddf24c5
TH
6723 init_timer_deferrable(&ap->fastdrain_timer);
6724 ap->fastdrain_timer.function = ata_eh_fastdrain_timerfn;
6725 ap->fastdrain_timer.data = (unsigned long)ap;
1da177e4 6726
838df628 6727 ap->cbl = ATA_CBL_NONE;
838df628 6728
8989805d 6729 ata_link_init(ap, &ap->link, 0);
1da177e4
LT
6730
6731#ifdef ATA_IRQ_TRAP
6732 ap->stats.unhandled_irq = 1;
6733 ap->stats.idle_irq = 1;
6734#endif
1da177e4 6735 return ap;
1da177e4
LT
6736}
6737
f0d36efd
TH
6738static void ata_host_release(struct device *gendev, void *res)
6739{
6740 struct ata_host *host = dev_get_drvdata(gendev);
6741 int i;
6742
1aa506e4
TH
6743 for (i = 0; i < host->n_ports; i++) {
6744 struct ata_port *ap = host->ports[i];
6745
4911487a
TH
6746 if (!ap)
6747 continue;
6748
6749 if (ap->scsi_host)
1aa506e4
TH
6750 scsi_host_put(ap->scsi_host);
6751
633273a3 6752 kfree(ap->pmp_link);
4911487a 6753 kfree(ap);
1aa506e4
TH
6754 host->ports[i] = NULL;
6755 }
6756
1aa56cca 6757 dev_set_drvdata(gendev, NULL);
f0d36efd
TH
6758}
6759
f3187195
TH
6760/**
6761 * ata_host_alloc - allocate and init basic ATA host resources
6762 * @dev: generic device this host is associated with
6763 * @max_ports: maximum number of ATA ports associated with this host
6764 *
6765 * Allocate and initialize basic ATA host resources. LLD calls
6766 * this function to allocate a host, initializes it fully and
6767 * attaches it using ata_host_register().
6768 *
6769 * @max_ports ports are allocated and host->n_ports is
6770 * initialized to @max_ports. The caller is allowed to decrease
6771 * host->n_ports before calling ata_host_register(). The unused
6772 * ports will be automatically freed on registration.
6773 *
6774 * RETURNS:
6775 * Allocate ATA host on success, NULL on failure.
6776 *
6777 * LOCKING:
6778 * Inherited from calling layer (may sleep).
6779 */
6780struct ata_host *ata_host_alloc(struct device *dev, int max_ports)
6781{
6782 struct ata_host *host;
6783 size_t sz;
6784 int i;
6785
6786 DPRINTK("ENTER\n");
6787
6788 if (!devres_open_group(dev, NULL, GFP_KERNEL))
6789 return NULL;
6790
6791 /* alloc a container for our list of ATA ports (buses) */
6792 sz = sizeof(struct ata_host) + (max_ports + 1) * sizeof(void *);
6793 /* alloc a container for our list of ATA ports (buses) */
6794 host = devres_alloc(ata_host_release, sz, GFP_KERNEL);
6795 if (!host)
6796 goto err_out;
6797
6798 devres_add(dev, host);
6799 dev_set_drvdata(dev, host);
6800
6801 spin_lock_init(&host->lock);
6802 host->dev = dev;
6803 host->n_ports = max_ports;
6804
6805 /* allocate ports bound to this host */
6806 for (i = 0; i < max_ports; i++) {
6807 struct ata_port *ap;
6808
6809 ap = ata_port_alloc(host);
6810 if (!ap)
6811 goto err_out;
6812
6813 ap->port_no = i;
6814 host->ports[i] = ap;
6815 }
6816
6817 devres_remove_group(dev, NULL);
6818 return host;
6819
6820 err_out:
6821 devres_release_group(dev, NULL);
6822 return NULL;
6823}
6824
f5cda257
TH
6825/**
6826 * ata_host_alloc_pinfo - alloc host and init with port_info array
6827 * @dev: generic device this host is associated with
6828 * @ppi: array of ATA port_info to initialize host with
6829 * @n_ports: number of ATA ports attached to this host
6830 *
6831 * Allocate ATA host and initialize with info from @ppi. If NULL
6832 * terminated, @ppi may contain fewer entries than @n_ports. The
6833 * last entry will be used for the remaining ports.
6834 *
6835 * RETURNS:
6836 * Allocate ATA host on success, NULL on failure.
6837 *
6838 * LOCKING:
6839 * Inherited from calling layer (may sleep).
6840 */
6841struct ata_host *ata_host_alloc_pinfo(struct device *dev,
6842 const struct ata_port_info * const * ppi,
6843 int n_ports)
6844{
6845 const struct ata_port_info *pi;
6846 struct ata_host *host;
6847 int i, j;
6848
6849 host = ata_host_alloc(dev, n_ports);
6850 if (!host)
6851 return NULL;
6852
6853 for (i = 0, j = 0, pi = NULL; i < host->n_ports; i++) {
6854 struct ata_port *ap = host->ports[i];
6855
6856 if (ppi[j])
6857 pi = ppi[j++];
6858
6859 ap->pio_mask = pi->pio_mask;
6860 ap->mwdma_mask = pi->mwdma_mask;
6861 ap->udma_mask = pi->udma_mask;
6862 ap->flags |= pi->flags;
0c88758b 6863 ap->link.flags |= pi->link_flags;
f5cda257
TH
6864 ap->ops = pi->port_ops;
6865
6866 if (!host->ops && (pi->port_ops != &ata_dummy_port_ops))
6867 host->ops = pi->port_ops;
6868 if (!host->private_data && pi->private_data)
6869 host->private_data = pi->private_data;
6870 }
6871
6872 return host;
6873}
6874
32ebbc0c
TH
6875static void ata_host_stop(struct device *gendev, void *res)
6876{
6877 struct ata_host *host = dev_get_drvdata(gendev);
6878 int i;
6879
6880 WARN_ON(!(host->flags & ATA_HOST_STARTED));
6881
6882 for (i = 0; i < host->n_ports; i++) {
6883 struct ata_port *ap = host->ports[i];
6884
6885 if (ap->ops->port_stop)
6886 ap->ops->port_stop(ap);
6887 }
6888
6889 if (host->ops->host_stop)
6890 host->ops->host_stop(host);
6891}
6892
ecef7253
TH
6893/**
6894 * ata_host_start - start and freeze ports of an ATA host
6895 * @host: ATA host to start ports for
6896 *
6897 * Start and then freeze ports of @host. Started status is
6898 * recorded in host->flags, so this function can be called
6899 * multiple times. Ports are guaranteed to get started only
f3187195
TH
6900 * once. If host->ops isn't initialized yet, its set to the
6901 * first non-dummy port ops.
ecef7253
TH
6902 *
6903 * LOCKING:
6904 * Inherited from calling layer (may sleep).
6905 *
6906 * RETURNS:
6907 * 0 if all ports are started successfully, -errno otherwise.
6908 */
6909int ata_host_start(struct ata_host *host)
6910{
32ebbc0c
TH
6911 int have_stop = 0;
6912 void *start_dr = NULL;
ecef7253
TH
6913 int i, rc;
6914
6915 if (host->flags & ATA_HOST_STARTED)
6916 return 0;
6917
6918 for (i = 0; i < host->n_ports; i++) {
6919 struct ata_port *ap = host->ports[i];
6920
f3187195
TH
6921 if (!host->ops && !ata_port_is_dummy(ap))
6922 host->ops = ap->ops;
6923
32ebbc0c
TH
6924 if (ap->ops->port_stop)
6925 have_stop = 1;
6926 }
6927
6928 if (host->ops->host_stop)
6929 have_stop = 1;
6930
6931 if (have_stop) {
6932 start_dr = devres_alloc(ata_host_stop, 0, GFP_KERNEL);
6933 if (!start_dr)
6934 return -ENOMEM;
6935 }
6936
6937 for (i = 0; i < host->n_ports; i++) {
6938 struct ata_port *ap = host->ports[i];
6939
ecef7253
TH
6940 if (ap->ops->port_start) {
6941 rc = ap->ops->port_start(ap);
6942 if (rc) {
0f9fe9b7 6943 if (rc != -ENODEV)
0f757743
AM
6944 dev_printk(KERN_ERR, host->dev,
6945 "failed to start port %d "
6946 "(errno=%d)\n", i, rc);
ecef7253
TH
6947 goto err_out;
6948 }
6949 }
ecef7253
TH
6950 ata_eh_freeze_port(ap);
6951 }
6952
32ebbc0c
TH
6953 if (start_dr)
6954 devres_add(host->dev, start_dr);
ecef7253
TH
6955 host->flags |= ATA_HOST_STARTED;
6956 return 0;
6957
6958 err_out:
6959 while (--i >= 0) {
6960 struct ata_port *ap = host->ports[i];
6961
6962 if (ap->ops->port_stop)
6963 ap->ops->port_stop(ap);
6964 }
32ebbc0c 6965 devres_free(start_dr);
ecef7253
TH
6966 return rc;
6967}
6968
b03732f0 6969/**
cca3974e
JG
6970 * ata_sas_host_init - Initialize a host struct
6971 * @host: host to initialize
6972 * @dev: device host is attached to
6973 * @flags: host flags
6974 * @ops: port_ops
b03732f0
BK
6975 *
6976 * LOCKING:
6977 * PCI/etc. bus probe sem.
6978 *
6979 */
f3187195 6980/* KILLME - the only user left is ipr */
cca3974e
JG
6981void ata_host_init(struct ata_host *host, struct device *dev,
6982 unsigned long flags, const struct ata_port_operations *ops)
b03732f0 6983{
cca3974e
JG
6984 spin_lock_init(&host->lock);
6985 host->dev = dev;
6986 host->flags = flags;
6987 host->ops = ops;
b03732f0
BK
6988}
6989
f3187195
TH
6990/**
6991 * ata_host_register - register initialized ATA host
6992 * @host: ATA host to register
6993 * @sht: template for SCSI host
6994 *
6995 * Register initialized ATA host. @host is allocated using
6996 * ata_host_alloc() and fully initialized by LLD. This function
6997 * starts ports, registers @host with ATA and SCSI layers and
6998 * probe registered devices.
6999 *
7000 * LOCKING:
7001 * Inherited from calling layer (may sleep).
7002 *
7003 * RETURNS:
7004 * 0 on success, -errno otherwise.
7005 */
7006int ata_host_register(struct ata_host *host, struct scsi_host_template *sht)
7007{
7008 int i, rc;
7009
7010 /* host must have been started */
7011 if (!(host->flags & ATA_HOST_STARTED)) {
7012 dev_printk(KERN_ERR, host->dev,
7013 "BUG: trying to register unstarted host\n");
7014 WARN_ON(1);
7015 return -EINVAL;
7016 }
7017
7018 /* Blow away unused ports. This happens when LLD can't
7019 * determine the exact number of ports to allocate at
7020 * allocation time.
7021 */
7022 for (i = host->n_ports; host->ports[i]; i++)
7023 kfree(host->ports[i]);
7024
7025 /* give ports names and add SCSI hosts */
7026 for (i = 0; i < host->n_ports; i++)
7027 host->ports[i]->print_id = ata_print_id++;
7028
7029 rc = ata_scsi_add_hosts(host, sht);
7030 if (rc)
7031 return rc;
7032
fafbae87
TH
7033 /* associate with ACPI nodes */
7034 ata_acpi_associate(host);
7035
f3187195
TH
7036 /* set cable, sata_spd_limit and report */
7037 for (i = 0; i < host->n_ports; i++) {
7038 struct ata_port *ap = host->ports[i];
f3187195
TH
7039 unsigned long xfer_mask;
7040
7041 /* set SATA cable type if still unset */
7042 if (ap->cbl == ATA_CBL_NONE && (ap->flags & ATA_FLAG_SATA))
7043 ap->cbl = ATA_CBL_SATA;
7044
7045 /* init sata_spd_limit to the current value */
4fb37a25 7046 sata_link_init_spd(&ap->link);
f3187195 7047
cbcdd875 7048 /* print per-port info to dmesg */
f3187195
TH
7049 xfer_mask = ata_pack_xfermask(ap->pio_mask, ap->mwdma_mask,
7050 ap->udma_mask);
7051
abf6e8ed 7052 if (!ata_port_is_dummy(ap)) {
cbcdd875
TH
7053 ata_port_printk(ap, KERN_INFO,
7054 "%cATA max %s %s\n",
a16abc0b 7055 (ap->flags & ATA_FLAG_SATA) ? 'S' : 'P',
f3187195 7056 ata_mode_string(xfer_mask),
cbcdd875 7057 ap->link.eh_info.desc);
abf6e8ed
TH
7058 ata_ehi_clear_desc(&ap->link.eh_info);
7059 } else
f3187195
TH
7060 ata_port_printk(ap, KERN_INFO, "DUMMY\n");
7061 }
7062
7063 /* perform each probe synchronously */
7064 DPRINTK("probe begin\n");
7065 for (i = 0; i < host->n_ports; i++) {
7066 struct ata_port *ap = host->ports[i];
7067 int rc;
7068
7069 /* probe */
7070 if (ap->ops->error_handler) {
9af5c9c9 7071 struct ata_eh_info *ehi = &ap->link.eh_info;
f3187195
TH
7072 unsigned long flags;
7073
7074 ata_port_probe(ap);
7075
7076 /* kick EH for boot probing */
7077 spin_lock_irqsave(ap->lock, flags);
7078
f58229f8
TH
7079 ehi->probe_mask =
7080 (1 << ata_link_max_devices(&ap->link)) - 1;
f3187195
TH
7081 ehi->action |= ATA_EH_SOFTRESET;
7082 ehi->flags |= ATA_EHI_NO_AUTOPSY | ATA_EHI_QUIET;
7083
f4d6d004 7084 ap->pflags &= ~ATA_PFLAG_INITIALIZING;
f3187195
TH
7085 ap->pflags |= ATA_PFLAG_LOADING;
7086 ata_port_schedule_eh(ap);
7087
7088 spin_unlock_irqrestore(ap->lock, flags);
7089
7090 /* wait for EH to finish */
7091 ata_port_wait_eh(ap);
7092 } else {
7093 DPRINTK("ata%u: bus probe begin\n", ap->print_id);
7094 rc = ata_bus_probe(ap);
7095 DPRINTK("ata%u: bus probe end\n", ap->print_id);
7096
7097 if (rc) {
7098 /* FIXME: do something useful here?
7099 * Current libata behavior will
7100 * tear down everything when
7101 * the module is removed
7102 * or the h/w is unplugged.
7103 */
7104 }
7105 }
7106 }
7107
7108 /* probes are done, now scan each port's disk(s) */
7109 DPRINTK("host probe begin\n");
7110 for (i = 0; i < host->n_ports; i++) {
7111 struct ata_port *ap = host->ports[i];
7112
1ae46317 7113 ata_scsi_scan_host(ap, 1);
ca77329f 7114 ata_lpm_schedule(ap, ap->pm_policy);
f3187195
TH
7115 }
7116
7117 return 0;
7118}
7119
f5cda257
TH
7120/**
7121 * ata_host_activate - start host, request IRQ and register it
7122 * @host: target ATA host
7123 * @irq: IRQ to request
7124 * @irq_handler: irq_handler used when requesting IRQ
7125 * @irq_flags: irq_flags used when requesting IRQ
7126 * @sht: scsi_host_template to use when registering the host
7127 *
7128 * After allocating an ATA host and initializing it, most libata
7129 * LLDs perform three steps to activate the host - start host,
7130 * request IRQ and register it. This helper takes necessasry
7131 * arguments and performs the three steps in one go.
7132 *
3d46b2e2
PM
7133 * An invalid IRQ skips the IRQ registration and expects the host to
7134 * have set polling mode on the port. In this case, @irq_handler
7135 * should be NULL.
7136 *
f5cda257
TH
7137 * LOCKING:
7138 * Inherited from calling layer (may sleep).
7139 *
7140 * RETURNS:
7141 * 0 on success, -errno otherwise.
7142 */
7143int ata_host_activate(struct ata_host *host, int irq,
7144 irq_handler_t irq_handler, unsigned long irq_flags,
7145 struct scsi_host_template *sht)
7146{
cbcdd875 7147 int i, rc;
f5cda257
TH
7148
7149 rc = ata_host_start(host);
7150 if (rc)
7151 return rc;
7152
3d46b2e2
PM
7153 /* Special case for polling mode */
7154 if (!irq) {
7155 WARN_ON(irq_handler);
7156 return ata_host_register(host, sht);
7157 }
7158
f5cda257
TH
7159 rc = devm_request_irq(host->dev, irq, irq_handler, irq_flags,
7160 dev_driver_string(host->dev), host);
7161 if (rc)
7162 return rc;
7163
cbcdd875
TH
7164 for (i = 0; i < host->n_ports; i++)
7165 ata_port_desc(host->ports[i], "irq %d", irq);
4031826b 7166
f5cda257
TH
7167 rc = ata_host_register(host, sht);
7168 /* if failed, just free the IRQ and leave ports alone */
7169 if (rc)
7170 devm_free_irq(host->dev, irq, host);
7171
7172 return rc;
7173}
7174
720ba126
TH
7175/**
7176 * ata_port_detach - Detach ATA port in prepration of device removal
7177 * @ap: ATA port to be detached
7178 *
7179 * Detach all ATA devices and the associated SCSI devices of @ap;
7180 * then, remove the associated SCSI host. @ap is guaranteed to
7181 * be quiescent on return from this function.
7182 *
7183 * LOCKING:
7184 * Kernel thread context (may sleep).
7185 */
741b7763 7186static void ata_port_detach(struct ata_port *ap)
720ba126
TH
7187{
7188 unsigned long flags;
41bda9c9 7189 struct ata_link *link;
f58229f8 7190 struct ata_device *dev;
720ba126
TH
7191
7192 if (!ap->ops->error_handler)
c3cf30a9 7193 goto skip_eh;
720ba126
TH
7194
7195 /* tell EH we're leaving & flush EH */
ba6a1308 7196 spin_lock_irqsave(ap->lock, flags);
b51e9e5d 7197 ap->pflags |= ATA_PFLAG_UNLOADING;
ba6a1308 7198 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
7199
7200 ata_port_wait_eh(ap);
7201
7f9ad9b8
TH
7202 /* EH is now guaranteed to see UNLOADING - EH context belongs
7203 * to us. Disable all existing devices.
720ba126 7204 */
41bda9c9
TH
7205 ata_port_for_each_link(link, ap) {
7206 ata_link_for_each_dev(dev, link)
7207 ata_dev_disable(dev);
7208 }
720ba126 7209
720ba126
TH
7210 /* Final freeze & EH. All in-flight commands are aborted. EH
7211 * will be skipped and retrials will be terminated with bad
7212 * target.
7213 */
ba6a1308 7214 spin_lock_irqsave(ap->lock, flags);
720ba126 7215 ata_port_freeze(ap); /* won't be thawed */
ba6a1308 7216 spin_unlock_irqrestore(ap->lock, flags);
720ba126
TH
7217
7218 ata_port_wait_eh(ap);
45a66c1c 7219 cancel_rearming_delayed_work(&ap->hotplug_task);
720ba126 7220
c3cf30a9 7221 skip_eh:
720ba126 7222 /* remove the associated SCSI host */
cca3974e 7223 scsi_remove_host(ap->scsi_host);
720ba126
TH
7224}
7225
0529c159
TH
7226/**
7227 * ata_host_detach - Detach all ports of an ATA host
7228 * @host: Host to detach
7229 *
7230 * Detach all ports of @host.
7231 *
7232 * LOCKING:
7233 * Kernel thread context (may sleep).
7234 */
7235void ata_host_detach(struct ata_host *host)
7236{
7237 int i;
7238
7239 for (i = 0; i < host->n_ports; i++)
7240 ata_port_detach(host->ports[i]);
562f0c2d
TH
7241
7242 /* the host is dead now, dissociate ACPI */
7243 ata_acpi_dissociate(host);
0529c159
TH
7244}
7245
1da177e4
LT
7246/**
7247 * ata_std_ports - initialize ioaddr with standard port offsets.
7248 * @ioaddr: IO address structure to be initialized
0baab86b
EF
7249 *
7250 * Utility function which initializes data_addr, error_addr,
7251 * feature_addr, nsect_addr, lbal_addr, lbam_addr, lbah_addr,
7252 * device_addr, status_addr, and command_addr to standard offsets
7253 * relative to cmd_addr.
7254 *
7255 * Does not set ctl_addr, altstatus_addr, bmdma_addr, or scr_addr.
1da177e4 7256 */
0baab86b 7257
1da177e4
LT
7258void ata_std_ports(struct ata_ioports *ioaddr)
7259{
7260 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
7261 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
7262 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
7263 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
7264 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
7265 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
7266 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
7267 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
7268 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
7269 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
7270}
7271
0baab86b 7272
374b1873
JG
7273#ifdef CONFIG_PCI
7274
1da177e4
LT
7275/**
7276 * ata_pci_remove_one - PCI layer callback for device removal
7277 * @pdev: PCI device that was removed
7278 *
b878ca5d
TH
7279 * PCI layer indicates to libata via this hook that hot-unplug or
7280 * module unload event has occurred. Detach all ports. Resource
7281 * release is handled via devres.
1da177e4
LT
7282 *
7283 * LOCKING:
7284 * Inherited from PCI layer (may sleep).
7285 */
f0d36efd 7286void ata_pci_remove_one(struct pci_dev *pdev)
1da177e4 7287{
2855568b 7288 struct device *dev = &pdev->dev;
cca3974e 7289 struct ata_host *host = dev_get_drvdata(dev);
1da177e4 7290
b878ca5d 7291 ata_host_detach(host);
1da177e4
LT
7292}
7293
7294/* move to PCI subsystem */
057ace5e 7295int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits)
1da177e4
LT
7296{
7297 unsigned long tmp = 0;
7298
7299 switch (bits->width) {
7300 case 1: {
7301 u8 tmp8 = 0;
7302 pci_read_config_byte(pdev, bits->reg, &tmp8);
7303 tmp = tmp8;
7304 break;
7305 }
7306 case 2: {
7307 u16 tmp16 = 0;
7308 pci_read_config_word(pdev, bits->reg, &tmp16);
7309 tmp = tmp16;
7310 break;
7311 }
7312 case 4: {
7313 u32 tmp32 = 0;
7314 pci_read_config_dword(pdev, bits->reg, &tmp32);
7315 tmp = tmp32;
7316 break;
7317 }
7318
7319 default:
7320 return -EINVAL;
7321 }
7322
7323 tmp &= bits->mask;
7324
7325 return (tmp == bits->val) ? 1 : 0;
7326}
9b847548 7327
6ffa01d8 7328#ifdef CONFIG_PM
3c5100c1 7329void ata_pci_device_do_suspend(struct pci_dev *pdev, pm_message_t mesg)
9b847548
JA
7330{
7331 pci_save_state(pdev);
4c90d971 7332 pci_disable_device(pdev);
500530f6 7333
4c90d971 7334 if (mesg.event == PM_EVENT_SUSPEND)
500530f6 7335 pci_set_power_state(pdev, PCI_D3hot);
9b847548
JA
7336}
7337
553c4aa6 7338int ata_pci_device_do_resume(struct pci_dev *pdev)
9b847548 7339{
553c4aa6
TH
7340 int rc;
7341
9b847548
JA
7342 pci_set_power_state(pdev, PCI_D0);
7343 pci_restore_state(pdev);
553c4aa6 7344
b878ca5d 7345 rc = pcim_enable_device(pdev);
553c4aa6
TH
7346 if (rc) {
7347 dev_printk(KERN_ERR, &pdev->dev,
7348 "failed to enable device after resume (%d)\n", rc);
7349 return rc;
7350 }
7351
9b847548 7352 pci_set_master(pdev);
553c4aa6 7353 return 0;
500530f6
TH
7354}
7355
3c5100c1 7356int ata_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
500530f6 7357{
cca3974e 7358 struct ata_host *host = dev_get_drvdata(&pdev->dev);
500530f6
TH
7359 int rc = 0;
7360
cca3974e 7361 rc = ata_host_suspend(host, mesg);
500530f6
TH
7362 if (rc)
7363 return rc;
7364
3c5100c1 7365 ata_pci_device_do_suspend(pdev, mesg);
500530f6
TH
7366
7367 return 0;
7368}
7369
7370int ata_pci_device_resume(struct pci_dev *pdev)
7371{
cca3974e 7372 struct ata_host *host = dev_get_drvdata(&pdev->dev);
553c4aa6 7373 int rc;
500530f6 7374
553c4aa6
TH
7375 rc = ata_pci_device_do_resume(pdev);
7376 if (rc == 0)
7377 ata_host_resume(host);
7378 return rc;
9b847548 7379}
6ffa01d8
TH
7380#endif /* CONFIG_PM */
7381
1da177e4
LT
7382#endif /* CONFIG_PCI */
7383
7384
1da177e4
LT
7385static int __init ata_init(void)
7386{
a8601e5f 7387 ata_probe_timeout *= HZ;
1da177e4
LT
7388 ata_wq = create_workqueue("ata");
7389 if (!ata_wq)
7390 return -ENOMEM;
7391
453b07ac
TH
7392 ata_aux_wq = create_singlethread_workqueue("ata_aux");
7393 if (!ata_aux_wq) {
7394 destroy_workqueue(ata_wq);
7395 return -ENOMEM;
7396 }
7397
1da177e4
LT
7398 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
7399 return 0;
7400}
7401
7402static void __exit ata_exit(void)
7403{
7404 destroy_workqueue(ata_wq);
453b07ac 7405 destroy_workqueue(ata_aux_wq);
1da177e4
LT
7406}
7407
a4625085 7408subsys_initcall(ata_init);
1da177e4
LT
7409module_exit(ata_exit);
7410
67846b30 7411static unsigned long ratelimit_time;
34af946a 7412static DEFINE_SPINLOCK(ata_ratelimit_lock);
67846b30
JG
7413
7414int ata_ratelimit(void)
7415{
7416 int rc;
7417 unsigned long flags;
7418
7419 spin_lock_irqsave(&ata_ratelimit_lock, flags);
7420
7421 if (time_after(jiffies, ratelimit_time)) {
7422 rc = 1;
7423 ratelimit_time = jiffies + (HZ/5);
7424 } else
7425 rc = 0;
7426
7427 spin_unlock_irqrestore(&ata_ratelimit_lock, flags);
7428
7429 return rc;
7430}
7431
c22daff4
TH
7432/**
7433 * ata_wait_register - wait until register value changes
7434 * @reg: IO-mapped register
7435 * @mask: Mask to apply to read register value
7436 * @val: Wait condition
7437 * @interval_msec: polling interval in milliseconds
7438 * @timeout_msec: timeout in milliseconds
7439 *
7440 * Waiting for some bits of register to change is a common
7441 * operation for ATA controllers. This function reads 32bit LE
7442 * IO-mapped register @reg and tests for the following condition.
7443 *
7444 * (*@reg & mask) != val
7445 *
7446 * If the condition is met, it returns; otherwise, the process is
7447 * repeated after @interval_msec until timeout.
7448 *
7449 * LOCKING:
7450 * Kernel thread context (may sleep)
7451 *
7452 * RETURNS:
7453 * The final register value.
7454 */
7455u32 ata_wait_register(void __iomem *reg, u32 mask, u32 val,
7456 unsigned long interval_msec,
7457 unsigned long timeout_msec)
7458{
7459 unsigned long timeout;
7460 u32 tmp;
7461
7462 tmp = ioread32(reg);
7463
7464 /* Calculate timeout _after_ the first read to make sure
7465 * preceding writes reach the controller before starting to
7466 * eat away the timeout.
7467 */
7468 timeout = jiffies + (timeout_msec * HZ) / 1000;
7469
7470 while ((tmp & mask) == val && time_before(jiffies, timeout)) {
7471 msleep(interval_msec);
7472 tmp = ioread32(reg);
7473 }
7474
7475 return tmp;
7476}
7477
dd5b06c4
TH
7478/*
7479 * Dummy port_ops
7480 */
7481static void ata_dummy_noret(struct ata_port *ap) { }
7482static int ata_dummy_ret0(struct ata_port *ap) { return 0; }
7483static void ata_dummy_qc_noret(struct ata_queued_cmd *qc) { }
7484
7485static u8 ata_dummy_check_status(struct ata_port *ap)
7486{
7487 return ATA_DRDY;
7488}
7489
7490static unsigned int ata_dummy_qc_issue(struct ata_queued_cmd *qc)
7491{
7492 return AC_ERR_SYSTEM;
7493}
7494
7495const struct ata_port_operations ata_dummy_port_ops = {
dd5b06c4
TH
7496 .check_status = ata_dummy_check_status,
7497 .check_altstatus = ata_dummy_check_status,
7498 .dev_select = ata_noop_dev_select,
7499 .qc_prep = ata_noop_qc_prep,
7500 .qc_issue = ata_dummy_qc_issue,
7501 .freeze = ata_dummy_noret,
7502 .thaw = ata_dummy_noret,
7503 .error_handler = ata_dummy_noret,
7504 .post_internal_cmd = ata_dummy_qc_noret,
7505 .irq_clear = ata_dummy_noret,
7506 .port_start = ata_dummy_ret0,
7507 .port_stop = ata_dummy_noret,
7508};
7509
21b0ad4f
TH
7510const struct ata_port_info ata_dummy_port_info = {
7511 .port_ops = &ata_dummy_port_ops,
7512};
7513
1da177e4
LT
7514/*
7515 * libata is essentially a library of internal helper functions for
7516 * low-level ATA host controller drivers. As such, the API/ABI is
7517 * likely to change as new drivers are added and updated.
7518 * Do not depend on ABI/API stability.
7519 */
e9c83914
TH
7520EXPORT_SYMBOL_GPL(sata_deb_timing_normal);
7521EXPORT_SYMBOL_GPL(sata_deb_timing_hotplug);
7522EXPORT_SYMBOL_GPL(sata_deb_timing_long);
dd5b06c4 7523EXPORT_SYMBOL_GPL(ata_dummy_port_ops);
21b0ad4f 7524EXPORT_SYMBOL_GPL(ata_dummy_port_info);
1da177e4
LT
7525EXPORT_SYMBOL_GPL(ata_std_bios_param);
7526EXPORT_SYMBOL_GPL(ata_std_ports);
cca3974e 7527EXPORT_SYMBOL_GPL(ata_host_init);
f3187195 7528EXPORT_SYMBOL_GPL(ata_host_alloc);
f5cda257 7529EXPORT_SYMBOL_GPL(ata_host_alloc_pinfo);
ecef7253 7530EXPORT_SYMBOL_GPL(ata_host_start);
f3187195 7531EXPORT_SYMBOL_GPL(ata_host_register);
f5cda257 7532EXPORT_SYMBOL_GPL(ata_host_activate);
0529c159 7533EXPORT_SYMBOL_GPL(ata_host_detach);
1da177e4
LT
7534EXPORT_SYMBOL_GPL(ata_sg_init);
7535EXPORT_SYMBOL_GPL(ata_sg_init_one);
9a1004d0 7536EXPORT_SYMBOL_GPL(ata_hsm_move);
f686bcb8 7537EXPORT_SYMBOL_GPL(ata_qc_complete);
dedaf2b0 7538EXPORT_SYMBOL_GPL(ata_qc_complete_multiple);
1da177e4 7539EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
1da177e4
LT
7540EXPORT_SYMBOL_GPL(ata_tf_load);
7541EXPORT_SYMBOL_GPL(ata_tf_read);
7542EXPORT_SYMBOL_GPL(ata_noop_dev_select);
7543EXPORT_SYMBOL_GPL(ata_std_dev_select);
43727fbc 7544EXPORT_SYMBOL_GPL(sata_print_link_status);
1da177e4
LT
7545EXPORT_SYMBOL_GPL(ata_tf_to_fis);
7546EXPORT_SYMBOL_GPL(ata_tf_from_fis);
6357357c
TH
7547EXPORT_SYMBOL_GPL(ata_pack_xfermask);
7548EXPORT_SYMBOL_GPL(ata_unpack_xfermask);
7549EXPORT_SYMBOL_GPL(ata_xfer_mask2mode);
7550EXPORT_SYMBOL_GPL(ata_xfer_mode2mask);
7551EXPORT_SYMBOL_GPL(ata_xfer_mode2shift);
7552EXPORT_SYMBOL_GPL(ata_mode_string);
7553EXPORT_SYMBOL_GPL(ata_id_xfermask);
1da177e4
LT
7554EXPORT_SYMBOL_GPL(ata_check_status);
7555EXPORT_SYMBOL_GPL(ata_altstatus);
1da177e4
LT
7556EXPORT_SYMBOL_GPL(ata_exec_command);
7557EXPORT_SYMBOL_GPL(ata_port_start);
d92e74d3 7558EXPORT_SYMBOL_GPL(ata_sff_port_start);
1da177e4 7559EXPORT_SYMBOL_GPL(ata_interrupt);
04351821 7560EXPORT_SYMBOL_GPL(ata_do_set_mode);
0d5ff566
TH
7561EXPORT_SYMBOL_GPL(ata_data_xfer);
7562EXPORT_SYMBOL_GPL(ata_data_xfer_noirq);
31cc23b3 7563EXPORT_SYMBOL_GPL(ata_std_qc_defer);
1da177e4 7564EXPORT_SYMBOL_GPL(ata_qc_prep);
d26fc955 7565EXPORT_SYMBOL_GPL(ata_dumb_qc_prep);
e46834cd 7566EXPORT_SYMBOL_GPL(ata_noop_qc_prep);
1da177e4
LT
7567EXPORT_SYMBOL_GPL(ata_bmdma_setup);
7568EXPORT_SYMBOL_GPL(ata_bmdma_start);
7569EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
7570EXPORT_SYMBOL_GPL(ata_bmdma_status);
7571EXPORT_SYMBOL_GPL(ata_bmdma_stop);
6d97dbd7
TH
7572EXPORT_SYMBOL_GPL(ata_bmdma_freeze);
7573EXPORT_SYMBOL_GPL(ata_bmdma_thaw);
7574EXPORT_SYMBOL_GPL(ata_bmdma_drive_eh);
7575EXPORT_SYMBOL_GPL(ata_bmdma_error_handler);
7576EXPORT_SYMBOL_GPL(ata_bmdma_post_internal_cmd);
1da177e4 7577EXPORT_SYMBOL_GPL(ata_port_probe);
10305f0f 7578EXPORT_SYMBOL_GPL(ata_dev_disable);
3c567b7d 7579EXPORT_SYMBOL_GPL(sata_set_spd);
936fd732
TH
7580EXPORT_SYMBOL_GPL(sata_link_debounce);
7581EXPORT_SYMBOL_GPL(sata_link_resume);
1da177e4 7582EXPORT_SYMBOL_GPL(ata_bus_reset);
f5914a46 7583EXPORT_SYMBOL_GPL(ata_std_prereset);
c2bd5804 7584EXPORT_SYMBOL_GPL(ata_std_softreset);
cc0680a5 7585EXPORT_SYMBOL_GPL(sata_link_hardreset);
c2bd5804
TH
7586EXPORT_SYMBOL_GPL(sata_std_hardreset);
7587EXPORT_SYMBOL_GPL(ata_std_postreset);
2e9edbf8
JG
7588EXPORT_SYMBOL_GPL(ata_dev_classify);
7589EXPORT_SYMBOL_GPL(ata_dev_pair);
1da177e4 7590EXPORT_SYMBOL_GPL(ata_port_disable);
67846b30 7591EXPORT_SYMBOL_GPL(ata_ratelimit);
c22daff4 7592EXPORT_SYMBOL_GPL(ata_wait_register);
6f8b9958 7593EXPORT_SYMBOL_GPL(ata_busy_sleep);
88ff6eaf 7594EXPORT_SYMBOL_GPL(ata_wait_after_reset);
d4b2bab4 7595EXPORT_SYMBOL_GPL(ata_wait_ready);
86e45b6b 7596EXPORT_SYMBOL_GPL(ata_port_queue_task);
1da177e4
LT
7597EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
7598EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
1da177e4 7599EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
83c47bcb 7600EXPORT_SYMBOL_GPL(ata_scsi_slave_destroy);
a6e6ce8e 7601EXPORT_SYMBOL_GPL(ata_scsi_change_queue_depth);
1da177e4 7602EXPORT_SYMBOL_GPL(ata_host_intr);
34bf2170
TH
7603EXPORT_SYMBOL_GPL(sata_scr_valid);
7604EXPORT_SYMBOL_GPL(sata_scr_read);
7605EXPORT_SYMBOL_GPL(sata_scr_write);
7606EXPORT_SYMBOL_GPL(sata_scr_write_flush);
936fd732
TH
7607EXPORT_SYMBOL_GPL(ata_link_online);
7608EXPORT_SYMBOL_GPL(ata_link_offline);
6ffa01d8 7609#ifdef CONFIG_PM
cca3974e
JG
7610EXPORT_SYMBOL_GPL(ata_host_suspend);
7611EXPORT_SYMBOL_GPL(ata_host_resume);
6ffa01d8 7612#endif /* CONFIG_PM */
6a62a04d
TH
7613EXPORT_SYMBOL_GPL(ata_id_string);
7614EXPORT_SYMBOL_GPL(ata_id_c_string);
1da177e4
LT
7615EXPORT_SYMBOL_GPL(ata_scsi_simulate);
7616
1bc4ccff 7617EXPORT_SYMBOL_GPL(ata_pio_need_iordy);
6357357c 7618EXPORT_SYMBOL_GPL(ata_timing_find_mode);
452503f9
AC
7619EXPORT_SYMBOL_GPL(ata_timing_compute);
7620EXPORT_SYMBOL_GPL(ata_timing_merge);
7621
1da177e4
LT
7622#ifdef CONFIG_PCI
7623EXPORT_SYMBOL_GPL(pci_test_config_bits);
d583bc18 7624EXPORT_SYMBOL_GPL(ata_pci_init_sff_host);
1626aeb8 7625EXPORT_SYMBOL_GPL(ata_pci_init_bmdma);
d583bc18 7626EXPORT_SYMBOL_GPL(ata_pci_prepare_sff_host);
1da177e4
LT
7627EXPORT_SYMBOL_GPL(ata_pci_init_one);
7628EXPORT_SYMBOL_GPL(ata_pci_remove_one);
6ffa01d8 7629#ifdef CONFIG_PM
500530f6
TH
7630EXPORT_SYMBOL_GPL(ata_pci_device_do_suspend);
7631EXPORT_SYMBOL_GPL(ata_pci_device_do_resume);
9b847548
JA
7632EXPORT_SYMBOL_GPL(ata_pci_device_suspend);
7633EXPORT_SYMBOL_GPL(ata_pci_device_resume);
6ffa01d8 7634#endif /* CONFIG_PM */
67951ade
AC
7635EXPORT_SYMBOL_GPL(ata_pci_default_filter);
7636EXPORT_SYMBOL_GPL(ata_pci_clear_simplex);
1da177e4 7637#endif /* CONFIG_PCI */
9b847548 7638
31f88384 7639EXPORT_SYMBOL_GPL(sata_pmp_qc_defer_cmd_switch);
3af9a77a
TH
7640EXPORT_SYMBOL_GPL(sata_pmp_std_prereset);
7641EXPORT_SYMBOL_GPL(sata_pmp_std_hardreset);
7642EXPORT_SYMBOL_GPL(sata_pmp_std_postreset);
7643EXPORT_SYMBOL_GPL(sata_pmp_do_eh);
7644
b64bbc39
TH
7645EXPORT_SYMBOL_GPL(__ata_ehi_push_desc);
7646EXPORT_SYMBOL_GPL(ata_ehi_push_desc);
7647EXPORT_SYMBOL_GPL(ata_ehi_clear_desc);
cbcdd875
TH
7648EXPORT_SYMBOL_GPL(ata_port_desc);
7649#ifdef CONFIG_PCI
7650EXPORT_SYMBOL_GPL(ata_port_pbar_desc);
7651#endif /* CONFIG_PCI */
7b70fc03 7652EXPORT_SYMBOL_GPL(ata_port_schedule_eh);
dbd82616 7653EXPORT_SYMBOL_GPL(ata_link_abort);
7b70fc03 7654EXPORT_SYMBOL_GPL(ata_port_abort);
e3180499 7655EXPORT_SYMBOL_GPL(ata_port_freeze);
7d77b247 7656EXPORT_SYMBOL_GPL(sata_async_notification);
e3180499
TH
7657EXPORT_SYMBOL_GPL(ata_eh_freeze_port);
7658EXPORT_SYMBOL_GPL(ata_eh_thaw_port);
ece1d636
TH
7659EXPORT_SYMBOL_GPL(ata_eh_qc_complete);
7660EXPORT_SYMBOL_GPL(ata_eh_qc_retry);
022bdb07 7661EXPORT_SYMBOL_GPL(ata_do_eh);
83625006 7662EXPORT_SYMBOL_GPL(ata_irq_on);
a619f981 7663EXPORT_SYMBOL_GPL(ata_dev_try_classify);
be0d18df
AC
7664
7665EXPORT_SYMBOL_GPL(ata_cable_40wire);
7666EXPORT_SYMBOL_GPL(ata_cable_80wire);
7667EXPORT_SYMBOL_GPL(ata_cable_unknown);
7668EXPORT_SYMBOL_GPL(ata_cable_sata);