]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/scsi/libata-core.c
libata: more doc updates
[net-next-2.6.git] / drivers / scsi / libata-core.c
CommitLineData
1da177e4
LT
1/*
2 libata-core.c - helper library for ATA
3
4 Copyright 2003-2004 Red Hat, Inc. All rights reserved.
5 Copyright 2003-2004 Jeff Garzik
6
7 The contents of this file are subject to the Open
8 Software License version 1.1 that can be found at
9 http://www.opensource.org/licenses/osl-1.1.txt and is included herein
10 by reference.
11
12 Alternatively, the contents of this file may be used under the terms
13 of the GNU General Public License version 2 (the "GPL") as distributed
14 in the kernel source COPYING file, in which case the provisions of
15 the GPL are applicable instead of the above. If you wish to allow
16 the use of your version of this file only under the terms of the
17 GPL and not to allow others to use your version of this file under
18 the OSL, indicate your decision by deleting the provisions above and
19 replace them with the notice and other provisions required by the GPL.
20 If you do not delete the provisions above, a recipient may use your
21 version of this file under either the OSL or the GPL.
22
23 */
24
25#include <linux/config.h>
26#include <linux/kernel.h>
27#include <linux/module.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/list.h>
31#include <linux/mm.h>
32#include <linux/highmem.h>
33#include <linux/spinlock.h>
34#include <linux/blkdev.h>
35#include <linux/delay.h>
36#include <linux/timer.h>
37#include <linux/interrupt.h>
38#include <linux/completion.h>
39#include <linux/suspend.h>
40#include <linux/workqueue.h>
41#include <scsi/scsi.h>
42#include "scsi.h"
43#include "scsi_priv.h"
44#include <scsi/scsi_host.h>
45#include <linux/libata.h>
46#include <asm/io.h>
47#include <asm/semaphore.h>
48#include <asm/byteorder.h>
49
50#include "libata.h"
51
52static unsigned int ata_busy_sleep (struct ata_port *ap,
53 unsigned long tmout_pat,
54 unsigned long tmout);
55static void ata_set_mode(struct ata_port *ap);
56static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev);
57static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift);
58static int fgb(u32 bitmap);
59static int ata_choose_xfer_mode(struct ata_port *ap,
60 u8 *xfer_mode_out,
61 unsigned int *xfer_shift_out);
62static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat);
63static void __ata_qc_complete(struct ata_queued_cmd *qc);
64
65static unsigned int ata_unique_id = 1;
66static struct workqueue_struct *ata_wq;
67
68MODULE_AUTHOR("Jeff Garzik");
69MODULE_DESCRIPTION("Library module for ATA devices");
70MODULE_LICENSE("GPL");
71MODULE_VERSION(DRV_VERSION);
72
73/**
74 * ata_tf_load - send taskfile registers to host controller
75 * @ap: Port to which output is sent
76 * @tf: ATA taskfile register set
77 *
78 * Outputs ATA taskfile to standard ATA host controller.
79 *
80 * LOCKING:
81 * Inherited from caller.
82 */
83
84static void ata_tf_load_pio(struct ata_port *ap, struct ata_taskfile *tf)
85{
86 struct ata_ioports *ioaddr = &ap->ioaddr;
87 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
88
89 if (tf->ctl != ap->last_ctl) {
90 outb(tf->ctl, ioaddr->ctl_addr);
91 ap->last_ctl = tf->ctl;
92 ata_wait_idle(ap);
93 }
94
95 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
96 outb(tf->hob_feature, ioaddr->feature_addr);
97 outb(tf->hob_nsect, ioaddr->nsect_addr);
98 outb(tf->hob_lbal, ioaddr->lbal_addr);
99 outb(tf->hob_lbam, ioaddr->lbam_addr);
100 outb(tf->hob_lbah, ioaddr->lbah_addr);
101 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
102 tf->hob_feature,
103 tf->hob_nsect,
104 tf->hob_lbal,
105 tf->hob_lbam,
106 tf->hob_lbah);
107 }
108
109 if (is_addr) {
110 outb(tf->feature, ioaddr->feature_addr);
111 outb(tf->nsect, ioaddr->nsect_addr);
112 outb(tf->lbal, ioaddr->lbal_addr);
113 outb(tf->lbam, ioaddr->lbam_addr);
114 outb(tf->lbah, ioaddr->lbah_addr);
115 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
116 tf->feature,
117 tf->nsect,
118 tf->lbal,
119 tf->lbam,
120 tf->lbah);
121 }
122
123 if (tf->flags & ATA_TFLAG_DEVICE) {
124 outb(tf->device, ioaddr->device_addr);
125 VPRINTK("device 0x%X\n", tf->device);
126 }
127
128 ata_wait_idle(ap);
129}
130
131/**
132 * ata_tf_load_mmio - send taskfile registers to host controller
133 * @ap: Port to which output is sent
134 * @tf: ATA taskfile register set
135 *
136 * Outputs ATA taskfile to standard ATA host controller using MMIO.
137 *
138 * LOCKING:
139 * Inherited from caller.
140 */
141
142static void ata_tf_load_mmio(struct ata_port *ap, struct ata_taskfile *tf)
143{
144 struct ata_ioports *ioaddr = &ap->ioaddr;
145 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
146
147 if (tf->ctl != ap->last_ctl) {
148 writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
149 ap->last_ctl = tf->ctl;
150 ata_wait_idle(ap);
151 }
152
153 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
154 writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr);
155 writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr);
156 writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr);
157 writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr);
158 writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr);
159 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
160 tf->hob_feature,
161 tf->hob_nsect,
162 tf->hob_lbal,
163 tf->hob_lbam,
164 tf->hob_lbah);
165 }
166
167 if (is_addr) {
168 writeb(tf->feature, (void __iomem *) ioaddr->feature_addr);
169 writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
170 writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
171 writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
172 writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
173 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
174 tf->feature,
175 tf->nsect,
176 tf->lbal,
177 tf->lbam,
178 tf->lbah);
179 }
180
181 if (tf->flags & ATA_TFLAG_DEVICE) {
182 writeb(tf->device, (void __iomem *) ioaddr->device_addr);
183 VPRINTK("device 0x%X\n", tf->device);
184 }
185
186 ata_wait_idle(ap);
187}
188
189void ata_tf_load(struct ata_port *ap, struct ata_taskfile *tf)
190{
191 if (ap->flags & ATA_FLAG_MMIO)
192 ata_tf_load_mmio(ap, tf);
193 else
194 ata_tf_load_pio(ap, tf);
195}
196
197/**
198 * ata_exec_command - issue ATA command to host controller
199 * @ap: port to which command is being issued
200 * @tf: ATA taskfile register set
201 *
202 * Issues PIO/MMIO write to ATA command register, with proper
203 * synchronization with interrupt handler / other threads.
204 *
205 * LOCKING:
206 * spin_lock_irqsave(host_set lock)
207 */
208
209static void ata_exec_command_pio(struct ata_port *ap, struct ata_taskfile *tf)
210{
211 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
212
213 outb(tf->command, ap->ioaddr.command_addr);
214 ata_pause(ap);
215}
216
217
218/**
219 * ata_exec_command_mmio - issue ATA command to host controller
220 * @ap: port to which command is being issued
221 * @tf: ATA taskfile register set
222 *
223 * Issues MMIO write to ATA command register, with proper
224 * synchronization with interrupt handler / other threads.
225 *
226 * LOCKING:
227 * spin_lock_irqsave(host_set lock)
228 */
229
230static void ata_exec_command_mmio(struct ata_port *ap, struct ata_taskfile *tf)
231{
232 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
233
234 writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr);
235 ata_pause(ap);
236}
237
238void ata_exec_command(struct ata_port *ap, struct ata_taskfile *tf)
239{
240 if (ap->flags & ATA_FLAG_MMIO)
241 ata_exec_command_mmio(ap, tf);
242 else
243 ata_exec_command_pio(ap, tf);
244}
245
246/**
247 * ata_exec - issue ATA command to host controller
248 * @ap: port to which command is being issued
249 * @tf: ATA taskfile register set
250 *
251 * Issues PIO/MMIO write to ATA command register, with proper
252 * synchronization with interrupt handler / other threads.
253 *
254 * LOCKING:
255 * Obtains host_set lock.
256 */
257
258static inline void ata_exec(struct ata_port *ap, struct ata_taskfile *tf)
259{
260 unsigned long flags;
261
262 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
263 spin_lock_irqsave(&ap->host_set->lock, flags);
264 ap->ops->exec_command(ap, tf);
265 spin_unlock_irqrestore(&ap->host_set->lock, flags);
266}
267
268/**
269 * ata_tf_to_host - issue ATA taskfile to host controller
270 * @ap: port to which command is being issued
271 * @tf: ATA taskfile register set
272 *
273 * Issues ATA taskfile register set to ATA host controller,
274 * with proper synchronization with interrupt handler and
275 * other threads.
276 *
277 * LOCKING:
278 * Obtains host_set lock.
279 */
280
281static void ata_tf_to_host(struct ata_port *ap, struct ata_taskfile *tf)
282{
283 ap->ops->tf_load(ap, tf);
284
285 ata_exec(ap, tf);
286}
287
288/**
289 * ata_tf_to_host_nolock - issue ATA taskfile to host controller
290 * @ap: port to which command is being issued
291 * @tf: ATA taskfile register set
292 *
293 * Issues ATA taskfile register set to ATA host controller,
294 * with proper synchronization with interrupt handler and
295 * other threads.
296 *
297 * LOCKING:
298 * spin_lock_irqsave(host_set lock)
299 */
300
301void ata_tf_to_host_nolock(struct ata_port *ap, struct ata_taskfile *tf)
302{
303 ap->ops->tf_load(ap, tf);
304 ap->ops->exec_command(ap, tf);
305}
306
307/**
308 * ata_tf_read - input device's ATA taskfile shadow registers
309 * @ap: Port from which input is read
310 * @tf: ATA taskfile register set for storing input
311 *
312 * Reads ATA taskfile registers for currently-selected device
313 * into @tf.
314 *
315 * LOCKING:
316 * Inherited from caller.
317 */
318
319static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
320{
321 struct ata_ioports *ioaddr = &ap->ioaddr;
322
323 tf->nsect = inb(ioaddr->nsect_addr);
324 tf->lbal = inb(ioaddr->lbal_addr);
325 tf->lbam = inb(ioaddr->lbam_addr);
326 tf->lbah = inb(ioaddr->lbah_addr);
327 tf->device = inb(ioaddr->device_addr);
328
329 if (tf->flags & ATA_TFLAG_LBA48) {
330 outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
331 tf->hob_feature = inb(ioaddr->error_addr);
332 tf->hob_nsect = inb(ioaddr->nsect_addr);
333 tf->hob_lbal = inb(ioaddr->lbal_addr);
334 tf->hob_lbam = inb(ioaddr->lbam_addr);
335 tf->hob_lbah = inb(ioaddr->lbah_addr);
336 }
337}
338
339/**
340 * ata_tf_read_mmio - input device's ATA taskfile shadow registers
341 * @ap: Port from which input is read
342 * @tf: ATA taskfile register set for storing input
343 *
344 * Reads ATA taskfile registers for currently-selected device
345 * into @tf via MMIO.
346 *
347 * LOCKING:
348 * Inherited from caller.
349 */
350
351static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
352{
353 struct ata_ioports *ioaddr = &ap->ioaddr;
354
355 tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
356 tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
357 tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
358 tf->lbah = readb((void __iomem *)ioaddr->lbah_addr);
359 tf->device = readb((void __iomem *)ioaddr->device_addr);
360
361 if (tf->flags & ATA_TFLAG_LBA48) {
362 writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr);
363 tf->hob_feature = readb((void __iomem *)ioaddr->error_addr);
364 tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr);
365 tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr);
366 tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr);
367 tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr);
368 }
369}
370
371void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
372{
373 if (ap->flags & ATA_FLAG_MMIO)
374 ata_tf_read_mmio(ap, tf);
375 else
376 ata_tf_read_pio(ap, tf);
377}
378
379/**
380 * ata_check_status_pio - Read device status reg & clear interrupt
381 * @ap: port where the device is
382 *
383 * Reads ATA taskfile status register for currently-selected device
384 * and return it's value. This also clears pending interrupts
385 * from this device
386 *
387 * LOCKING:
388 * Inherited from caller.
389 */
390static u8 ata_check_status_pio(struct ata_port *ap)
391{
392 return inb(ap->ioaddr.status_addr);
393}
394
395/**
396 * ata_check_status_mmio - Read device status reg & clear interrupt
397 * @ap: port where the device is
398 *
399 * Reads ATA taskfile status register for currently-selected device
400 * via MMIO and return it's value. This also clears pending interrupts
401 * from this device
402 *
403 * LOCKING:
404 * Inherited from caller.
405 */
406static u8 ata_check_status_mmio(struct ata_port *ap)
407{
408 return readb((void __iomem *) ap->ioaddr.status_addr);
409}
410
411u8 ata_check_status(struct ata_port *ap)
412{
413 if (ap->flags & ATA_FLAG_MMIO)
414 return ata_check_status_mmio(ap);
415 return ata_check_status_pio(ap);
416}
417
418u8 ata_altstatus(struct ata_port *ap)
419{
420 if (ap->ops->check_altstatus)
421 return ap->ops->check_altstatus(ap);
422
423 if (ap->flags & ATA_FLAG_MMIO)
424 return readb((void __iomem *)ap->ioaddr.altstatus_addr);
425 return inb(ap->ioaddr.altstatus_addr);
426}
427
428u8 ata_chk_err(struct ata_port *ap)
429{
430 if (ap->ops->check_err)
431 return ap->ops->check_err(ap);
432
433 if (ap->flags & ATA_FLAG_MMIO) {
434 return readb((void __iomem *) ap->ioaddr.error_addr);
435 }
436 return inb(ap->ioaddr.error_addr);
437}
438
439/**
440 * ata_tf_to_fis - Convert ATA taskfile to SATA FIS structure
441 * @tf: Taskfile to convert
442 * @fis: Buffer into which data will output
443 * @pmp: Port multiplier port
444 *
445 * Converts a standard ATA taskfile to a Serial ATA
446 * FIS structure (Register - Host to Device).
447 *
448 * LOCKING:
449 * Inherited from caller.
450 */
451
452void ata_tf_to_fis(struct ata_taskfile *tf, u8 *fis, u8 pmp)
453{
454 fis[0] = 0x27; /* Register - Host to Device FIS */
455 fis[1] = (pmp & 0xf) | (1 << 7); /* Port multiplier number,
456 bit 7 indicates Command FIS */
457 fis[2] = tf->command;
458 fis[3] = tf->feature;
459
460 fis[4] = tf->lbal;
461 fis[5] = tf->lbam;
462 fis[6] = tf->lbah;
463 fis[7] = tf->device;
464
465 fis[8] = tf->hob_lbal;
466 fis[9] = tf->hob_lbam;
467 fis[10] = tf->hob_lbah;
468 fis[11] = tf->hob_feature;
469
470 fis[12] = tf->nsect;
471 fis[13] = tf->hob_nsect;
472 fis[14] = 0;
473 fis[15] = tf->ctl;
474
475 fis[16] = 0;
476 fis[17] = 0;
477 fis[18] = 0;
478 fis[19] = 0;
479}
480
481/**
482 * ata_tf_from_fis - Convert SATA FIS to ATA taskfile
483 * @fis: Buffer from which data will be input
484 * @tf: Taskfile to output
485 *
486 * Converts a standard ATA taskfile to a Serial ATA
487 * FIS structure (Register - Host to Device).
488 *
489 * LOCKING:
490 * Inherited from caller.
491 */
492
493void ata_tf_from_fis(u8 *fis, struct ata_taskfile *tf)
494{
495 tf->command = fis[2]; /* status */
496 tf->feature = fis[3]; /* error */
497
498 tf->lbal = fis[4];
499 tf->lbam = fis[5];
500 tf->lbah = fis[6];
501 tf->device = fis[7];
502
503 tf->hob_lbal = fis[8];
504 tf->hob_lbam = fis[9];
505 tf->hob_lbah = fis[10];
506
507 tf->nsect = fis[12];
508 tf->hob_nsect = fis[13];
509}
510
511/**
512 * ata_prot_to_cmd - determine which read/write opcodes to use
513 * @protocol: ATA_PROT_xxx taskfile protocol
514 * @lba48: true is lba48 is present
515 *
516 * Given necessary input, determine which read/write commands
517 * to use to transfer data.
518 *
519 * LOCKING:
520 * None.
521 */
522static int ata_prot_to_cmd(int protocol, int lba48)
523{
524 int rcmd = 0, wcmd = 0;
525
526 switch (protocol) {
527 case ATA_PROT_PIO:
528 if (lba48) {
529 rcmd = ATA_CMD_PIO_READ_EXT;
530 wcmd = ATA_CMD_PIO_WRITE_EXT;
531 } else {
532 rcmd = ATA_CMD_PIO_READ;
533 wcmd = ATA_CMD_PIO_WRITE;
534 }
535 break;
536
537 case ATA_PROT_DMA:
538 if (lba48) {
539 rcmd = ATA_CMD_READ_EXT;
540 wcmd = ATA_CMD_WRITE_EXT;
541 } else {
542 rcmd = ATA_CMD_READ;
543 wcmd = ATA_CMD_WRITE;
544 }
545 break;
546
547 default:
548 return -1;
549 }
550
551 return rcmd | (wcmd << 8);
552}
553
554/**
555 * ata_dev_set_protocol - set taskfile protocol and r/w commands
556 * @dev: device to examine and configure
557 *
558 * Examine the device configuration, after we have
559 * read the identify-device page and configured the
560 * data transfer mode. Set internal state related to
561 * the ATA taskfile protocol (pio, pio mult, dma, etc.)
562 * and calculate the proper read/write commands to use.
563 *
564 * LOCKING:
565 * caller.
566 */
567static void ata_dev_set_protocol(struct ata_device *dev)
568{
569 int pio = (dev->flags & ATA_DFLAG_PIO);
570 int lba48 = (dev->flags & ATA_DFLAG_LBA48);
571 int proto, cmd;
572
573 if (pio)
574 proto = dev->xfer_protocol = ATA_PROT_PIO;
575 else
576 proto = dev->xfer_protocol = ATA_PROT_DMA;
577
578 cmd = ata_prot_to_cmd(proto, lba48);
579 if (cmd < 0)
580 BUG();
581
582 dev->read_cmd = cmd & 0xff;
583 dev->write_cmd = (cmd >> 8) & 0xff;
584}
585
586static const char * xfer_mode_str[] = {
587 "UDMA/16",
588 "UDMA/25",
589 "UDMA/33",
590 "UDMA/44",
591 "UDMA/66",
592 "UDMA/100",
593 "UDMA/133",
594 "UDMA7",
595 "MWDMA0",
596 "MWDMA1",
597 "MWDMA2",
598 "PIO0",
599 "PIO1",
600 "PIO2",
601 "PIO3",
602 "PIO4",
603};
604
605/**
606 * ata_udma_string - convert UDMA bit offset to string
607 * @mask: mask of bits supported; only highest bit counts.
608 *
609 * Determine string which represents the highest speed
610 * (highest bit in @udma_mask).
611 *
612 * LOCKING:
613 * None.
614 *
615 * RETURNS:
616 * Constant C string representing highest speed listed in
617 * @udma_mask, or the constant C string "<n/a>".
618 */
619
620static const char *ata_mode_string(unsigned int mask)
621{
622 int i;
623
624 for (i = 7; i >= 0; i--)
625 if (mask & (1 << i))
626 goto out;
627 for (i = ATA_SHIFT_MWDMA + 2; i >= ATA_SHIFT_MWDMA; i--)
628 if (mask & (1 << i))
629 goto out;
630 for (i = ATA_SHIFT_PIO + 4; i >= ATA_SHIFT_PIO; i--)
631 if (mask & (1 << i))
632 goto out;
633
634 return "<n/a>";
635
636out:
637 return xfer_mode_str[i];
638}
639
640/**
641 * ata_pio_devchk - PATA device presence detection
642 * @ap: ATA channel to examine
643 * @device: Device to examine (starting at zero)
644 *
645 * This technique was originally described in
646 * Hale Landis's ATADRVR (www.ata-atapi.com), and
647 * later found its way into the ATA/ATAPI spec.
648 *
649 * Write a pattern to the ATA shadow registers,
650 * and if a device is present, it will respond by
651 * correctly storing and echoing back the
652 * ATA shadow register contents.
653 *
654 * LOCKING:
655 * caller.
656 */
657
658static unsigned int ata_pio_devchk(struct ata_port *ap,
659 unsigned int device)
660{
661 struct ata_ioports *ioaddr = &ap->ioaddr;
662 u8 nsect, lbal;
663
664 ap->ops->dev_select(ap, device);
665
666 outb(0x55, ioaddr->nsect_addr);
667 outb(0xaa, ioaddr->lbal_addr);
668
669 outb(0xaa, ioaddr->nsect_addr);
670 outb(0x55, ioaddr->lbal_addr);
671
672 outb(0x55, ioaddr->nsect_addr);
673 outb(0xaa, ioaddr->lbal_addr);
674
675 nsect = inb(ioaddr->nsect_addr);
676 lbal = inb(ioaddr->lbal_addr);
677
678 if ((nsect == 0x55) && (lbal == 0xaa))
679 return 1; /* we found a device */
680
681 return 0; /* nothing found */
682}
683
684/**
685 * ata_mmio_devchk - PATA device presence detection
686 * @ap: ATA channel to examine
687 * @device: Device to examine (starting at zero)
688 *
689 * This technique was originally described in
690 * Hale Landis's ATADRVR (www.ata-atapi.com), and
691 * later found its way into the ATA/ATAPI spec.
692 *
693 * Write a pattern to the ATA shadow registers,
694 * and if a device is present, it will respond by
695 * correctly storing and echoing back the
696 * ATA shadow register contents.
697 *
698 * LOCKING:
699 * caller.
700 */
701
702static unsigned int ata_mmio_devchk(struct ata_port *ap,
703 unsigned int device)
704{
705 struct ata_ioports *ioaddr = &ap->ioaddr;
706 u8 nsect, lbal;
707
708 ap->ops->dev_select(ap, device);
709
710 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
711 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
712
713 writeb(0xaa, (void __iomem *) ioaddr->nsect_addr);
714 writeb(0x55, (void __iomem *) ioaddr->lbal_addr);
715
716 writeb(0x55, (void __iomem *) ioaddr->nsect_addr);
717 writeb(0xaa, (void __iomem *) ioaddr->lbal_addr);
718
719 nsect = readb((void __iomem *) ioaddr->nsect_addr);
720 lbal = readb((void __iomem *) ioaddr->lbal_addr);
721
722 if ((nsect == 0x55) && (lbal == 0xaa))
723 return 1; /* we found a device */
724
725 return 0; /* nothing found */
726}
727
728/**
729 * ata_devchk - PATA device presence detection
730 * @ap: ATA channel to examine
731 * @device: Device to examine (starting at zero)
732 *
733 * Dispatch ATA device presence detection, depending
734 * on whether we are using PIO or MMIO to talk to the
735 * ATA shadow registers.
736 *
737 * LOCKING:
738 * caller.
739 */
740
741static unsigned int ata_devchk(struct ata_port *ap,
742 unsigned int device)
743{
744 if (ap->flags & ATA_FLAG_MMIO)
745 return ata_mmio_devchk(ap, device);
746 return ata_pio_devchk(ap, device);
747}
748
749/**
750 * ata_dev_classify - determine device type based on ATA-spec signature
751 * @tf: ATA taskfile register set for device to be identified
752 *
753 * Determine from taskfile register contents whether a device is
754 * ATA or ATAPI, as per "Signature and persistence" section
755 * of ATA/PI spec (volume 1, sect 5.14).
756 *
757 * LOCKING:
758 * None.
759 *
760 * RETURNS:
761 * Device type, %ATA_DEV_ATA, %ATA_DEV_ATAPI, or %ATA_DEV_UNKNOWN
762 * the event of failure.
763 */
764
765unsigned int ata_dev_classify(struct ata_taskfile *tf)
766{
767 /* Apple's open source Darwin code hints that some devices only
768 * put a proper signature into the LBA mid/high registers,
769 * So, we only check those. It's sufficient for uniqueness.
770 */
771
772 if (((tf->lbam == 0) && (tf->lbah == 0)) ||
773 ((tf->lbam == 0x3c) && (tf->lbah == 0xc3))) {
774 DPRINTK("found ATA device by sig\n");
775 return ATA_DEV_ATA;
776 }
777
778 if (((tf->lbam == 0x14) && (tf->lbah == 0xeb)) ||
779 ((tf->lbam == 0x69) && (tf->lbah == 0x96))) {
780 DPRINTK("found ATAPI device by sig\n");
781 return ATA_DEV_ATAPI;
782 }
783
784 DPRINTK("unknown device\n");
785 return ATA_DEV_UNKNOWN;
786}
787
788/**
789 * ata_dev_try_classify - Parse returned ATA device signature
790 * @ap: ATA channel to examine
791 * @device: Device to examine (starting at zero)
792 *
793 * After an event -- SRST, E.D.D., or SATA COMRESET -- occurs,
794 * an ATA/ATAPI-defined set of values is placed in the ATA
795 * shadow registers, indicating the results of device detection
796 * and diagnostics.
797 *
798 * Select the ATA device, and read the values from the ATA shadow
799 * registers. Then parse according to the Error register value,
800 * and the spec-defined values examined by ata_dev_classify().
801 *
802 * LOCKING:
803 * caller.
804 */
805
806static u8 ata_dev_try_classify(struct ata_port *ap, unsigned int device)
807{
808 struct ata_device *dev = &ap->device[device];
809 struct ata_taskfile tf;
810 unsigned int class;
811 u8 err;
812
813 ap->ops->dev_select(ap, device);
814
815 memset(&tf, 0, sizeof(tf));
816
817 err = ata_chk_err(ap);
818 ap->ops->tf_read(ap, &tf);
819
820 dev->class = ATA_DEV_NONE;
821
822 /* see if device passed diags */
823 if (err == 1)
824 /* do nothing */ ;
825 else if ((device == 0) && (err == 0x81))
826 /* do nothing */ ;
827 else
828 return err;
829
830 /* determine if device if ATA or ATAPI */
831 class = ata_dev_classify(&tf);
832 if (class == ATA_DEV_UNKNOWN)
833 return err;
834 if ((class == ATA_DEV_ATA) && (ata_chk_status(ap) == 0))
835 return err;
836
837 dev->class = class;
838
839 return err;
840}
841
842/**
843 * ata_dev_id_string - Convert IDENTIFY DEVICE page into string
844 * @id: IDENTIFY DEVICE results we will examine
845 * @s: string into which data is output
846 * @ofs: offset into identify device page
847 * @len: length of string to return. must be an even number.
848 *
849 * The strings in the IDENTIFY DEVICE page are broken up into
850 * 16-bit chunks. Run through the string, and output each
851 * 8-bit chunk linearly, regardless of platform.
852 *
853 * LOCKING:
854 * caller.
855 */
856
857void ata_dev_id_string(u16 *id, unsigned char *s,
858 unsigned int ofs, unsigned int len)
859{
860 unsigned int c;
861
862 while (len > 0) {
863 c = id[ofs] >> 8;
864 *s = c;
865 s++;
866
867 c = id[ofs] & 0xff;
868 *s = c;
869 s++;
870
871 ofs++;
872 len -= 2;
873 }
874}
875
876void ata_noop_dev_select (struct ata_port *ap, unsigned int device)
877{
878}
879
880/**
881 * ata_std_dev_select - Select device 0/1 on ATA bus
882 * @ap: ATA channel to manipulate
883 * @device: ATA device (numbered from zero) to select
884 *
885 * Use the method defined in the ATA specification to
886 * make either device 0, or device 1, active on the
887 * ATA channel.
888 *
889 * LOCKING:
890 * caller.
891 */
892
893void ata_std_dev_select (struct ata_port *ap, unsigned int device)
894{
895 u8 tmp;
896
897 if (device == 0)
898 tmp = ATA_DEVICE_OBS;
899 else
900 tmp = ATA_DEVICE_OBS | ATA_DEV1;
901
902 if (ap->flags & ATA_FLAG_MMIO) {
903 writeb(tmp, (void __iomem *) ap->ioaddr.device_addr);
904 } else {
905 outb(tmp, ap->ioaddr.device_addr);
906 }
907 ata_pause(ap); /* needed; also flushes, for mmio */
908}
909
910/**
911 * ata_dev_select - Select device 0/1 on ATA bus
912 * @ap: ATA channel to manipulate
913 * @device: ATA device (numbered from zero) to select
914 * @wait: non-zero to wait for Status register BSY bit to clear
915 * @can_sleep: non-zero if context allows sleeping
916 *
917 * Use the method defined in the ATA specification to
918 * make either device 0, or device 1, active on the
919 * ATA channel.
920 *
921 * This is a high-level version of ata_std_dev_select(),
922 * which additionally provides the services of inserting
923 * the proper pauses and status polling, where needed.
924 *
925 * LOCKING:
926 * caller.
927 */
928
929void ata_dev_select(struct ata_port *ap, unsigned int device,
930 unsigned int wait, unsigned int can_sleep)
931{
932 VPRINTK("ENTER, ata%u: device %u, wait %u\n",
933 ap->id, device, wait);
934
935 if (wait)
936 ata_wait_idle(ap);
937
938 ap->ops->dev_select(ap, device);
939
940 if (wait) {
941 if (can_sleep && ap->device[device].class == ATA_DEV_ATAPI)
942 msleep(150);
943 ata_wait_idle(ap);
944 }
945}
946
947/**
948 * ata_dump_id - IDENTIFY DEVICE info debugging output
949 * @dev: Device whose IDENTIFY DEVICE page we will dump
950 *
951 * Dump selected 16-bit words from a detected device's
952 * IDENTIFY PAGE page.
953 *
954 * LOCKING:
955 * caller.
956 */
957
958static inline void ata_dump_id(struct ata_device *dev)
959{
960 DPRINTK("49==0x%04x "
961 "53==0x%04x "
962 "63==0x%04x "
963 "64==0x%04x "
964 "75==0x%04x \n",
965 dev->id[49],
966 dev->id[53],
967 dev->id[63],
968 dev->id[64],
969 dev->id[75]);
970 DPRINTK("80==0x%04x "
971 "81==0x%04x "
972 "82==0x%04x "
973 "83==0x%04x "
974 "84==0x%04x \n",
975 dev->id[80],
976 dev->id[81],
977 dev->id[82],
978 dev->id[83],
979 dev->id[84]);
980 DPRINTK("88==0x%04x "
981 "93==0x%04x\n",
982 dev->id[88],
983 dev->id[93]);
984}
985
986/**
987 * ata_dev_identify - obtain IDENTIFY x DEVICE page
988 * @ap: port on which device we wish to probe resides
989 * @device: device bus address, starting at zero
990 *
991 * Following bus reset, we issue the IDENTIFY [PACKET] DEVICE
992 * command, and read back the 512-byte device information page.
993 * The device information page is fed to us via the standard
994 * PIO-IN protocol, but we hand-code it here. (TODO: investigate
995 * using standard PIO-IN paths)
996 *
997 * After reading the device information page, we use several
998 * bits of information from it to initialize data structures
999 * that will be used during the lifetime of the ata_device.
1000 * Other data from the info page is used to disqualify certain
1001 * older ATA devices we do not wish to support.
1002 *
1003 * LOCKING:
1004 * Inherited from caller. Some functions called by this function
1005 * obtain the host_set lock.
1006 */
1007
1008static void ata_dev_identify(struct ata_port *ap, unsigned int device)
1009{
1010 struct ata_device *dev = &ap->device[device];
1011 unsigned int i;
1012 u16 tmp;
1013 unsigned long xfer_modes;
1014 u8 status;
1015 unsigned int using_edd;
1016 DECLARE_COMPLETION(wait);
1017 struct ata_queued_cmd *qc;
1018 unsigned long flags;
1019 int rc;
1020
1021 if (!ata_dev_present(dev)) {
1022 DPRINTK("ENTER/EXIT (host %u, dev %u) -- nodev\n",
1023 ap->id, device);
1024 return;
1025 }
1026
1027 if (ap->flags & (ATA_FLAG_SRST | ATA_FLAG_SATA_RESET))
1028 using_edd = 0;
1029 else
1030 using_edd = 1;
1031
1032 DPRINTK("ENTER, host %u, dev %u\n", ap->id, device);
1033
1034 assert (dev->class == ATA_DEV_ATA || dev->class == ATA_DEV_ATAPI ||
1035 dev->class == ATA_DEV_NONE);
1036
1037 ata_dev_select(ap, device, 1, 1); /* select device 0/1 */
1038
1039 qc = ata_qc_new_init(ap, dev);
1040 BUG_ON(qc == NULL);
1041
1042 ata_sg_init_one(qc, dev->id, sizeof(dev->id));
1043 qc->dma_dir = DMA_FROM_DEVICE;
1044 qc->tf.protocol = ATA_PROT_PIO;
1045 qc->nsect = 1;
1046
1047retry:
1048 if (dev->class == ATA_DEV_ATA) {
1049 qc->tf.command = ATA_CMD_ID_ATA;
1050 DPRINTK("do ATA identify\n");
1051 } else {
1052 qc->tf.command = ATA_CMD_ID_ATAPI;
1053 DPRINTK("do ATAPI identify\n");
1054 }
1055
1056 qc->waiting = &wait;
1057 qc->complete_fn = ata_qc_complete_noop;
1058
1059 spin_lock_irqsave(&ap->host_set->lock, flags);
1060 rc = ata_qc_issue(qc);
1061 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1062
1063 if (rc)
1064 goto err_out;
1065 else
1066 wait_for_completion(&wait);
1067
1068 status = ata_chk_status(ap);
1069 if (status & ATA_ERR) {
1070 /*
1071 * arg! EDD works for all test cases, but seems to return
1072 * the ATA signature for some ATAPI devices. Until the
1073 * reason for this is found and fixed, we fix up the mess
1074 * here. If IDENTIFY DEVICE returns command aborted
1075 * (as ATAPI devices do), then we issue an
1076 * IDENTIFY PACKET DEVICE.
1077 *
1078 * ATA software reset (SRST, the default) does not appear
1079 * to have this problem.
1080 */
1081 if ((using_edd) && (qc->tf.command == ATA_CMD_ID_ATA)) {
1082 u8 err = ata_chk_err(ap);
1083 if (err & ATA_ABORTED) {
1084 dev->class = ATA_DEV_ATAPI;
1085 qc->cursg = 0;
1086 qc->cursg_ofs = 0;
1087 qc->cursect = 0;
1088 qc->nsect = 1;
1089 goto retry;
1090 }
1091 }
1092 goto err_out;
1093 }
1094
1095 swap_buf_le16(dev->id, ATA_ID_WORDS);
1096
1097 /* print device capabilities */
1098 printk(KERN_DEBUG "ata%u: dev %u cfg "
1099 "49:%04x 82:%04x 83:%04x 84:%04x 85:%04x 86:%04x 87:%04x 88:%04x\n",
1100 ap->id, device, dev->id[49],
1101 dev->id[82], dev->id[83], dev->id[84],
1102 dev->id[85], dev->id[86], dev->id[87],
1103 dev->id[88]);
1104
1105 /*
1106 * common ATA, ATAPI feature tests
1107 */
1108
1109 /* we require LBA and DMA support (bits 8 & 9 of word 49) */
1110 if (!ata_id_has_dma(dev->id) || !ata_id_has_lba(dev->id)) {
1111 printk(KERN_DEBUG "ata%u: no dma/lba\n", ap->id);
1112 goto err_out_nosup;
1113 }
1114
1115 /* quick-n-dirty find max transfer mode; for printk only */
1116 xfer_modes = dev->id[ATA_ID_UDMA_MODES];
1117 if (!xfer_modes)
1118 xfer_modes = (dev->id[ATA_ID_MWDMA_MODES]) << ATA_SHIFT_MWDMA;
1119 if (!xfer_modes) {
1120 xfer_modes = (dev->id[ATA_ID_PIO_MODES]) << (ATA_SHIFT_PIO + 3);
1121 xfer_modes |= (0x7 << ATA_SHIFT_PIO);
1122 }
1123
1124 ata_dump_id(dev);
1125
1126 /* ATA-specific feature tests */
1127 if (dev->class == ATA_DEV_ATA) {
1128 if (!ata_id_is_ata(dev->id)) /* sanity check */
1129 goto err_out_nosup;
1130
1131 tmp = dev->id[ATA_ID_MAJOR_VER];
1132 for (i = 14; i >= 1; i--)
1133 if (tmp & (1 << i))
1134 break;
1135
1136 /* we require at least ATA-3 */
1137 if (i < 3) {
1138 printk(KERN_DEBUG "ata%u: no ATA-3\n", ap->id);
1139 goto err_out_nosup;
1140 }
1141
1142 if (ata_id_has_lba48(dev->id)) {
1143 dev->flags |= ATA_DFLAG_LBA48;
1144 dev->n_sectors = ata_id_u64(dev->id, 100);
1145 } else {
1146 dev->n_sectors = ata_id_u32(dev->id, 60);
1147 }
1148
1149 ap->host->max_cmd_len = 16;
1150
1151 /* print device info to dmesg */
1152 printk(KERN_INFO "ata%u: dev %u ATA, max %s, %Lu sectors:%s\n",
1153 ap->id, device,
1154 ata_mode_string(xfer_modes),
1155 (unsigned long long)dev->n_sectors,
1156 dev->flags & ATA_DFLAG_LBA48 ? " lba48" : "");
1157 }
1158
1159 /* ATAPI-specific feature tests */
1160 else {
1161 if (ata_id_is_ata(dev->id)) /* sanity check */
1162 goto err_out_nosup;
1163
1164 rc = atapi_cdb_len(dev->id);
1165 if ((rc < 12) || (rc > ATAPI_CDB_LEN)) {
1166 printk(KERN_WARNING "ata%u: unsupported CDB len\n", ap->id);
1167 goto err_out_nosup;
1168 }
1169 ap->cdb_len = (unsigned int) rc;
1170 ap->host->max_cmd_len = (unsigned char) ap->cdb_len;
1171
1172 /* print device info to dmesg */
1173 printk(KERN_INFO "ata%u: dev %u ATAPI, max %s\n",
1174 ap->id, device,
1175 ata_mode_string(xfer_modes));
1176 }
1177
1178 DPRINTK("EXIT, drv_stat = 0x%x\n", ata_chk_status(ap));
1179 return;
1180
1181err_out_nosup:
1182 printk(KERN_WARNING "ata%u: dev %u not supported, ignoring\n",
1183 ap->id, device);
1184err_out:
1185 dev->class++; /* converts ATA_DEV_xxx into ATA_DEV_xxx_UNSUP */
1186 DPRINTK("EXIT, err\n");
1187}
1188
1189/**
1190 * ata_bus_probe - Reset and probe ATA bus
1191 * @ap: Bus to probe
1192 *
1193 * LOCKING:
1194 *
1195 * RETURNS:
1196 * Zero on success, non-zero on error.
1197 */
1198
1199static int ata_bus_probe(struct ata_port *ap)
1200{
1201 unsigned int i, found = 0;
1202
1203 ap->ops->phy_reset(ap);
1204 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1205 goto err_out;
1206
1207 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1208 ata_dev_identify(ap, i);
1209 if (ata_dev_present(&ap->device[i])) {
1210 found = 1;
1211 if (ap->ops->dev_config)
1212 ap->ops->dev_config(ap, &ap->device[i]);
1213 }
1214 }
1215
1216 if ((!found) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1217 goto err_out_disable;
1218
1219 ata_set_mode(ap);
1220 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1221 goto err_out_disable;
1222
1223 return 0;
1224
1225err_out_disable:
1226 ap->ops->port_disable(ap);
1227err_out:
1228 return -1;
1229}
1230
1231/**
1232 * ata_port_probe -
1233 * @ap:
1234 *
1235 * LOCKING:
1236 */
1237
1238void ata_port_probe(struct ata_port *ap)
1239{
1240 ap->flags &= ~ATA_FLAG_PORT_DISABLED;
1241}
1242
1243/**
780a87f7
JG
1244 * __sata_phy_reset - Wake/reset a low-level SATA PHY
1245 * @ap: SATA port associated with target SATA PHY.
1da177e4 1246 *
780a87f7
JG
1247 * This function issues commands to standard SATA Sxxx
1248 * PHY registers, to wake up the phy (and device), and
1249 * clear any reset condition.
1250 *
1251 * LOCKING: None. Serialized during ata_bus_probe().
1da177e4
LT
1252 *
1253 */
1254void __sata_phy_reset(struct ata_port *ap)
1255{
1256 u32 sstatus;
1257 unsigned long timeout = jiffies + (HZ * 5);
1258
1259 if (ap->flags & ATA_FLAG_SATA_RESET) {
cdcca89e
BR
1260 /* issue phy wake/reset */
1261 scr_write_flush(ap, SCR_CONTROL, 0x301);
1da177e4
LT
1262 udelay(400); /* FIXME: a guess */
1263 }
cdcca89e 1264 scr_write_flush(ap, SCR_CONTROL, 0x300); /* phy wake/clear reset */
1da177e4
LT
1265
1266 /* wait for phy to become ready, if necessary */
1267 do {
1268 msleep(200);
1269 sstatus = scr_read(ap, SCR_STATUS);
1270 if ((sstatus & 0xf) != 1)
1271 break;
1272 } while (time_before(jiffies, timeout));
1273
1274 /* TODO: phy layer with polling, timeouts, etc. */
1275 if (sata_dev_present(ap))
1276 ata_port_probe(ap);
1277 else {
1278 sstatus = scr_read(ap, SCR_STATUS);
1279 printk(KERN_INFO "ata%u: no device found (phy stat %08x)\n",
1280 ap->id, sstatus);
1281 ata_port_disable(ap);
1282 }
1283
1284 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1285 return;
1286
1287 if (ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT)) {
1288 ata_port_disable(ap);
1289 return;
1290 }
1291
1292 ap->cbl = ATA_CBL_SATA;
1293}
1294
1295/**
780a87f7
JG
1296 * sata_phy_reset - Reset SATA bus.
1297 * @ap: SATA port associated with target SATA PHY.
1da177e4 1298 *
780a87f7
JG
1299 * This function resets the SATA bus, and then probes
1300 * the bus for devices.
1301 *
1302 * LOCKING: None. Serialized during ata_bus_probe().
1da177e4
LT
1303 *
1304 */
1305void sata_phy_reset(struct ata_port *ap)
1306{
1307 __sata_phy_reset(ap);
1308 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1309 return;
1310 ata_bus_reset(ap);
1311}
1312
1313/**
780a87f7
JG
1314 * ata_port_disable - Disable port.
1315 * @ap: Port to be disabled.
1da177e4 1316 *
780a87f7
JG
1317 * Modify @ap data structure such that the system
1318 * thinks that the entire port is disabled, and should
1319 * never attempt to probe or communicate with devices
1320 * on this port.
1321 *
1322 * LOCKING: host_set lock, or some other form of
1323 * serialization.
1da177e4
LT
1324 */
1325
1326void ata_port_disable(struct ata_port *ap)
1327{
1328 ap->device[0].class = ATA_DEV_NONE;
1329 ap->device[1].class = ATA_DEV_NONE;
1330 ap->flags |= ATA_FLAG_PORT_DISABLED;
1331}
1332
1333static struct {
1334 unsigned int shift;
1335 u8 base;
1336} xfer_mode_classes[] = {
1337 { ATA_SHIFT_UDMA, XFER_UDMA_0 },
1338 { ATA_SHIFT_MWDMA, XFER_MW_DMA_0 },
1339 { ATA_SHIFT_PIO, XFER_PIO_0 },
1340};
1341
1342static inline u8 base_from_shift(unsigned int shift)
1343{
1344 int i;
1345
1346 for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++)
1347 if (xfer_mode_classes[i].shift == shift)
1348 return xfer_mode_classes[i].base;
1349
1350 return 0xff;
1351}
1352
1353static void ata_dev_set_mode(struct ata_port *ap, struct ata_device *dev)
1354{
1355 int ofs, idx;
1356 u8 base;
1357
1358 if (!ata_dev_present(dev) || (ap->flags & ATA_FLAG_PORT_DISABLED))
1359 return;
1360
1361 if (dev->xfer_shift == ATA_SHIFT_PIO)
1362 dev->flags |= ATA_DFLAG_PIO;
1363
1364 ata_dev_set_xfermode(ap, dev);
1365
1366 base = base_from_shift(dev->xfer_shift);
1367 ofs = dev->xfer_mode - base;
1368 idx = ofs + dev->xfer_shift;
1369 WARN_ON(idx >= ARRAY_SIZE(xfer_mode_str));
1370
1371 DPRINTK("idx=%d xfer_shift=%u, xfer_mode=0x%x, base=0x%x, offset=%d\n",
1372 idx, dev->xfer_shift, (int)dev->xfer_mode, (int)base, ofs);
1373
1374 printk(KERN_INFO "ata%u: dev %u configured for %s\n",
1375 ap->id, dev->devno, xfer_mode_str[idx]);
1376}
1377
1378static int ata_host_set_pio(struct ata_port *ap)
1379{
1380 unsigned int mask;
1381 int x, i;
1382 u8 base, xfer_mode;
1383
1384 mask = ata_get_mode_mask(ap, ATA_SHIFT_PIO);
1385 x = fgb(mask);
1386 if (x < 0) {
1387 printk(KERN_WARNING "ata%u: no PIO support\n", ap->id);
1388 return -1;
1389 }
1390
1391 base = base_from_shift(ATA_SHIFT_PIO);
1392 xfer_mode = base + x;
1393
1394 DPRINTK("base 0x%x xfer_mode 0x%x mask 0x%x x %d\n",
1395 (int)base, (int)xfer_mode, mask, x);
1396
1397 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1398 struct ata_device *dev = &ap->device[i];
1399 if (ata_dev_present(dev)) {
1400 dev->pio_mode = xfer_mode;
1401 dev->xfer_mode = xfer_mode;
1402 dev->xfer_shift = ATA_SHIFT_PIO;
1403 if (ap->ops->set_piomode)
1404 ap->ops->set_piomode(ap, dev);
1405 }
1406 }
1407
1408 return 0;
1409}
1410
1411static void ata_host_set_dma(struct ata_port *ap, u8 xfer_mode,
1412 unsigned int xfer_shift)
1413{
1414 int i;
1415
1416 for (i = 0; i < ATA_MAX_DEVICES; i++) {
1417 struct ata_device *dev = &ap->device[i];
1418 if (ata_dev_present(dev)) {
1419 dev->dma_mode = xfer_mode;
1420 dev->xfer_mode = xfer_mode;
1421 dev->xfer_shift = xfer_shift;
1422 if (ap->ops->set_dmamode)
1423 ap->ops->set_dmamode(ap, dev);
1424 }
1425 }
1426}
1427
1428/**
1429 * ata_set_mode - Program timings and issue SET FEATURES - XFER
1430 * @ap: port on which timings will be programmed
1431 *
780a87f7
JG
1432 * Set ATA device disk transfer mode (PIO3, UDMA6, etc.).
1433 *
1434 * LOCKING: None. Serialized during ata_bus_probe().
1da177e4
LT
1435 *
1436 */
1437static void ata_set_mode(struct ata_port *ap)
1438{
1439 unsigned int i, xfer_shift;
1440 u8 xfer_mode;
1441 int rc;
1442
1443 /* step 1: always set host PIO timings */
1444 rc = ata_host_set_pio(ap);
1445 if (rc)
1446 goto err_out;
1447
1448 /* step 2: choose the best data xfer mode */
1449 xfer_mode = xfer_shift = 0;
1450 rc = ata_choose_xfer_mode(ap, &xfer_mode, &xfer_shift);
1451 if (rc)
1452 goto err_out;
1453
1454 /* step 3: if that xfer mode isn't PIO, set host DMA timings */
1455 if (xfer_shift != ATA_SHIFT_PIO)
1456 ata_host_set_dma(ap, xfer_mode, xfer_shift);
1457
1458 /* step 4: update devices' xfer mode */
1459 ata_dev_set_mode(ap, &ap->device[0]);
1460 ata_dev_set_mode(ap, &ap->device[1]);
1461
1462 if (ap->flags & ATA_FLAG_PORT_DISABLED)
1463 return;
1464
1465 if (ap->ops->post_set_mode)
1466 ap->ops->post_set_mode(ap);
1467
1468 for (i = 0; i < 2; i++) {
1469 struct ata_device *dev = &ap->device[i];
1470 ata_dev_set_protocol(dev);
1471 }
1472
1473 return;
1474
1475err_out:
1476 ata_port_disable(ap);
1477}
1478
1479/**
1480 * ata_busy_sleep - sleep until BSY clears, or timeout
1481 * @ap: port containing status register to be polled
1482 * @tmout_pat: impatience timeout
1483 * @tmout: overall timeout
1484 *
780a87f7
JG
1485 * Sleep until ATA Status register bit BSY clears,
1486 * or a timeout occurs.
1487 *
1488 * LOCKING: None.
1da177e4
LT
1489 *
1490 */
1491
1492static unsigned int ata_busy_sleep (struct ata_port *ap,
1493 unsigned long tmout_pat,
1494 unsigned long tmout)
1495{
1496 unsigned long timer_start, timeout;
1497 u8 status;
1498
1499 status = ata_busy_wait(ap, ATA_BUSY, 300);
1500 timer_start = jiffies;
1501 timeout = timer_start + tmout_pat;
1502 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1503 msleep(50);
1504 status = ata_busy_wait(ap, ATA_BUSY, 3);
1505 }
1506
1507 if (status & ATA_BUSY)
1508 printk(KERN_WARNING "ata%u is slow to respond, "
1509 "please be patient\n", ap->id);
1510
1511 timeout = timer_start + tmout;
1512 while ((status & ATA_BUSY) && (time_before(jiffies, timeout))) {
1513 msleep(50);
1514 status = ata_chk_status(ap);
1515 }
1516
1517 if (status & ATA_BUSY) {
1518 printk(KERN_ERR "ata%u failed to respond (%lu secs)\n",
1519 ap->id, tmout / HZ);
1520 return 1;
1521 }
1522
1523 return 0;
1524}
1525
1526static void ata_bus_post_reset(struct ata_port *ap, unsigned int devmask)
1527{
1528 struct ata_ioports *ioaddr = &ap->ioaddr;
1529 unsigned int dev0 = devmask & (1 << 0);
1530 unsigned int dev1 = devmask & (1 << 1);
1531 unsigned long timeout;
1532
1533 /* if device 0 was found in ata_devchk, wait for its
1534 * BSY bit to clear
1535 */
1536 if (dev0)
1537 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1538
1539 /* if device 1 was found in ata_devchk, wait for
1540 * register access, then wait for BSY to clear
1541 */
1542 timeout = jiffies + ATA_TMOUT_BOOT;
1543 while (dev1) {
1544 u8 nsect, lbal;
1545
1546 ap->ops->dev_select(ap, 1);
1547 if (ap->flags & ATA_FLAG_MMIO) {
1548 nsect = readb((void __iomem *) ioaddr->nsect_addr);
1549 lbal = readb((void __iomem *) ioaddr->lbal_addr);
1550 } else {
1551 nsect = inb(ioaddr->nsect_addr);
1552 lbal = inb(ioaddr->lbal_addr);
1553 }
1554 if ((nsect == 1) && (lbal == 1))
1555 break;
1556 if (time_after(jiffies, timeout)) {
1557 dev1 = 0;
1558 break;
1559 }
1560 msleep(50); /* give drive a breather */
1561 }
1562 if (dev1)
1563 ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1564
1565 /* is all this really necessary? */
1566 ap->ops->dev_select(ap, 0);
1567 if (dev1)
1568 ap->ops->dev_select(ap, 1);
1569 if (dev0)
1570 ap->ops->dev_select(ap, 0);
1571}
1572
1573/**
1574 * ata_bus_edd -
1575 * @ap:
1576 *
780a87f7 1577 * LOCKING: None. Serialized during ata_bus_probe().
1da177e4
LT
1578 *
1579 */
1580
1581static unsigned int ata_bus_edd(struct ata_port *ap)
1582{
1583 struct ata_taskfile tf;
1584
1585 /* set up execute-device-diag (bus reset) taskfile */
1586 /* also, take interrupts to a known state (disabled) */
1587 DPRINTK("execute-device-diag\n");
1588 ata_tf_init(ap, &tf, 0);
1589 tf.ctl |= ATA_NIEN;
1590 tf.command = ATA_CMD_EDD;
1591 tf.protocol = ATA_PROT_NODATA;
1592
1593 /* do bus reset */
1594 ata_tf_to_host(ap, &tf);
1595
1596 /* spec says at least 2ms. but who knows with those
1597 * crazy ATAPI devices...
1598 */
1599 msleep(150);
1600
1601 return ata_busy_sleep(ap, ATA_TMOUT_BOOT_QUICK, ATA_TMOUT_BOOT);
1602}
1603
1604static unsigned int ata_bus_softreset(struct ata_port *ap,
1605 unsigned int devmask)
1606{
1607 struct ata_ioports *ioaddr = &ap->ioaddr;
1608
1609 DPRINTK("ata%u: bus reset via SRST\n", ap->id);
1610
1611 /* software reset. causes dev0 to be selected */
1612 if (ap->flags & ATA_FLAG_MMIO) {
1613 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1614 udelay(20); /* FIXME: flush */
1615 writeb(ap->ctl | ATA_SRST, (void __iomem *) ioaddr->ctl_addr);
1616 udelay(20); /* FIXME: flush */
1617 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1618 } else {
1619 outb(ap->ctl, ioaddr->ctl_addr);
1620 udelay(10);
1621 outb(ap->ctl | ATA_SRST, ioaddr->ctl_addr);
1622 udelay(10);
1623 outb(ap->ctl, ioaddr->ctl_addr);
1624 }
1625
1626 /* spec mandates ">= 2ms" before checking status.
1627 * We wait 150ms, because that was the magic delay used for
1628 * ATAPI devices in Hale Landis's ATADRVR, for the period of time
1629 * between when the ATA command register is written, and then
1630 * status is checked. Because waiting for "a while" before
1631 * checking status is fine, post SRST, we perform this magic
1632 * delay here as well.
1633 */
1634 msleep(150);
1635
1636 ata_bus_post_reset(ap, devmask);
1637
1638 return 0;
1639}
1640
1641/**
1642 * ata_bus_reset - reset host port and associated ATA channel
1643 * @ap: port to reset
1644 *
1645 * This is typically the first time we actually start issuing
1646 * commands to the ATA channel. We wait for BSY to clear, then
1647 * issue EXECUTE DEVICE DIAGNOSTIC command, polling for its
1648 * result. Determine what devices, if any, are on the channel
1649 * by looking at the device 0/1 error register. Look at the signature
1650 * stored in each device's taskfile registers, to determine if
1651 * the device is ATA or ATAPI.
1652 *
1653 * LOCKING:
1654 * Inherited from caller. Some functions called by this function
1655 * obtain the host_set lock.
1656 *
1657 * SIDE EFFECTS:
1658 * Sets ATA_FLAG_PORT_DISABLED if bus reset fails.
1659 */
1660
1661void ata_bus_reset(struct ata_port *ap)
1662{
1663 struct ata_ioports *ioaddr = &ap->ioaddr;
1664 unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
1665 u8 err;
1666 unsigned int dev0, dev1 = 0, rc = 0, devmask = 0;
1667
1668 DPRINTK("ENTER, host %u, port %u\n", ap->id, ap->port_no);
1669
1670 /* determine if device 0/1 are present */
1671 if (ap->flags & ATA_FLAG_SATA_RESET)
1672 dev0 = 1;
1673 else {
1674 dev0 = ata_devchk(ap, 0);
1675 if (slave_possible)
1676 dev1 = ata_devchk(ap, 1);
1677 }
1678
1679 if (dev0)
1680 devmask |= (1 << 0);
1681 if (dev1)
1682 devmask |= (1 << 1);
1683
1684 /* select device 0 again */
1685 ap->ops->dev_select(ap, 0);
1686
1687 /* issue bus reset */
1688 if (ap->flags & ATA_FLAG_SRST)
1689 rc = ata_bus_softreset(ap, devmask);
1690 else if ((ap->flags & ATA_FLAG_SATA_RESET) == 0) {
1691 /* set up device control */
1692 if (ap->flags & ATA_FLAG_MMIO)
1693 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1694 else
1695 outb(ap->ctl, ioaddr->ctl_addr);
1696 rc = ata_bus_edd(ap);
1697 }
1698
1699 if (rc)
1700 goto err_out;
1701
1702 /*
1703 * determine by signature whether we have ATA or ATAPI devices
1704 */
1705 err = ata_dev_try_classify(ap, 0);
1706 if ((slave_possible) && (err != 0x81))
1707 ata_dev_try_classify(ap, 1);
1708
1709 /* re-enable interrupts */
1710 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
1711 ata_irq_on(ap);
1712
1713 /* is double-select really necessary? */
1714 if (ap->device[1].class != ATA_DEV_NONE)
1715 ap->ops->dev_select(ap, 1);
1716 if (ap->device[0].class != ATA_DEV_NONE)
1717 ap->ops->dev_select(ap, 0);
1718
1719 /* if no devices were detected, disable this port */
1720 if ((ap->device[0].class == ATA_DEV_NONE) &&
1721 (ap->device[1].class == ATA_DEV_NONE))
1722 goto err_out;
1723
1724 if (ap->flags & (ATA_FLAG_SATA_RESET | ATA_FLAG_SRST)) {
1725 /* set up device control for ATA_FLAG_SATA_RESET */
1726 if (ap->flags & ATA_FLAG_MMIO)
1727 writeb(ap->ctl, (void __iomem *) ioaddr->ctl_addr);
1728 else
1729 outb(ap->ctl, ioaddr->ctl_addr);
1730 }
1731
1732 DPRINTK("EXIT\n");
1733 return;
1734
1735err_out:
1736 printk(KERN_ERR "ata%u: disabling port\n", ap->id);
1737 ap->ops->port_disable(ap);
1738
1739 DPRINTK("EXIT\n");
1740}
1741
1742static void ata_pr_blacklisted(struct ata_port *ap, struct ata_device *dev)
1743{
1744 printk(KERN_WARNING "ata%u: dev %u is on DMA blacklist, disabling DMA\n",
1745 ap->id, dev->devno);
1746}
1747
1748static const char * ata_dma_blacklist [] = {
1749 "WDC AC11000H",
1750 "WDC AC22100H",
1751 "WDC AC32500H",
1752 "WDC AC33100H",
1753 "WDC AC31600H",
1754 "WDC AC32100H",
1755 "WDC AC23200L",
1756 "Compaq CRD-8241B",
1757 "CRD-8400B",
1758 "CRD-8480B",
1759 "CRD-8482B",
1760 "CRD-84",
1761 "SanDisk SDP3B",
1762 "SanDisk SDP3B-64",
1763 "SANYO CD-ROM CRD",
1764 "HITACHI CDR-8",
1765 "HITACHI CDR-8335",
1766 "HITACHI CDR-8435",
1767 "Toshiba CD-ROM XM-6202B",
1768 "CD-532E-A",
1769 "E-IDE CD-ROM CR-840",
1770 "CD-ROM Drive/F5A",
1771 "WPI CDD-820",
1772 "SAMSUNG CD-ROM SC-148C",
1773 "SAMSUNG CD-ROM SC",
1774 "SanDisk SDP3B-64",
1775 "SAMSUNG CD-ROM SN-124",
1776 "ATAPI CD-ROM DRIVE 40X MAXIMUM",
1777 "_NEC DV5800A",
1778};
1779
1780static int ata_dma_blacklisted(struct ata_port *ap, struct ata_device *dev)
1781{
1782 unsigned char model_num[40];
1783 char *s;
1784 unsigned int len;
1785 int i;
1786
1787 ata_dev_id_string(dev->id, model_num, ATA_ID_PROD_OFS,
1788 sizeof(model_num));
1789 s = &model_num[0];
1790 len = strnlen(s, sizeof(model_num));
1791
1792 /* ATAPI specifies that empty space is blank-filled; remove blanks */
1793 while ((len > 0) && (s[len - 1] == ' ')) {
1794 len--;
1795 s[len] = 0;
1796 }
1797
1798 for (i = 0; i < ARRAY_SIZE(ata_dma_blacklist); i++)
1799 if (!strncmp(ata_dma_blacklist[i], s, len))
1800 return 1;
1801
1802 return 0;
1803}
1804
1805static unsigned int ata_get_mode_mask(struct ata_port *ap, int shift)
1806{
1807 struct ata_device *master, *slave;
1808 unsigned int mask;
1809
1810 master = &ap->device[0];
1811 slave = &ap->device[1];
1812
1813 assert (ata_dev_present(master) || ata_dev_present(slave));
1814
1815 if (shift == ATA_SHIFT_UDMA) {
1816 mask = ap->udma_mask;
1817 if (ata_dev_present(master)) {
1818 mask &= (master->id[ATA_ID_UDMA_MODES] & 0xff);
1819 if (ata_dma_blacklisted(ap, master)) {
1820 mask = 0;
1821 ata_pr_blacklisted(ap, master);
1822 }
1823 }
1824 if (ata_dev_present(slave)) {
1825 mask &= (slave->id[ATA_ID_UDMA_MODES] & 0xff);
1826 if (ata_dma_blacklisted(ap, slave)) {
1827 mask = 0;
1828 ata_pr_blacklisted(ap, slave);
1829 }
1830 }
1831 }
1832 else if (shift == ATA_SHIFT_MWDMA) {
1833 mask = ap->mwdma_mask;
1834 if (ata_dev_present(master)) {
1835 mask &= (master->id[ATA_ID_MWDMA_MODES] & 0x07);
1836 if (ata_dma_blacklisted(ap, master)) {
1837 mask = 0;
1838 ata_pr_blacklisted(ap, master);
1839 }
1840 }
1841 if (ata_dev_present(slave)) {
1842 mask &= (slave->id[ATA_ID_MWDMA_MODES] & 0x07);
1843 if (ata_dma_blacklisted(ap, slave)) {
1844 mask = 0;
1845 ata_pr_blacklisted(ap, slave);
1846 }
1847 }
1848 }
1849 else if (shift == ATA_SHIFT_PIO) {
1850 mask = ap->pio_mask;
1851 if (ata_dev_present(master)) {
1852 /* spec doesn't return explicit support for
1853 * PIO0-2, so we fake it
1854 */
1855 u16 tmp_mode = master->id[ATA_ID_PIO_MODES] & 0x03;
1856 tmp_mode <<= 3;
1857 tmp_mode |= 0x7;
1858 mask &= tmp_mode;
1859 }
1860 if (ata_dev_present(slave)) {
1861 /* spec doesn't return explicit support for
1862 * PIO0-2, so we fake it
1863 */
1864 u16 tmp_mode = slave->id[ATA_ID_PIO_MODES] & 0x03;
1865 tmp_mode <<= 3;
1866 tmp_mode |= 0x7;
1867 mask &= tmp_mode;
1868 }
1869 }
1870 else {
1871 mask = 0xffffffff; /* shut up compiler warning */
1872 BUG();
1873 }
1874
1875 return mask;
1876}
1877
1878/* find greatest bit */
1879static int fgb(u32 bitmap)
1880{
1881 unsigned int i;
1882 int x = -1;
1883
1884 for (i = 0; i < 32; i++)
1885 if (bitmap & (1 << i))
1886 x = i;
1887
1888 return x;
1889}
1890
1891/**
1892 * ata_choose_xfer_mode - attempt to find best transfer mode
1893 * @ap: Port for which an xfer mode will be selected
1894 * @xfer_mode_out: (output) SET FEATURES - XFER MODE code
1895 * @xfer_shift_out: (output) bit shift that selects this mode
1896 *
1897 * LOCKING:
1898 *
1899 * RETURNS:
1900 * Zero on success, negative on error.
1901 */
1902
1903static int ata_choose_xfer_mode(struct ata_port *ap,
1904 u8 *xfer_mode_out,
1905 unsigned int *xfer_shift_out)
1906{
1907 unsigned int mask, shift;
1908 int x, i;
1909
1910 for (i = 0; i < ARRAY_SIZE(xfer_mode_classes); i++) {
1911 shift = xfer_mode_classes[i].shift;
1912 mask = ata_get_mode_mask(ap, shift);
1913
1914 x = fgb(mask);
1915 if (x >= 0) {
1916 *xfer_mode_out = xfer_mode_classes[i].base + x;
1917 *xfer_shift_out = shift;
1918 return 0;
1919 }
1920 }
1921
1922 return -1;
1923}
1924
1925/**
1926 * ata_dev_set_xfermode - Issue SET FEATURES - XFER MODE command
1927 * @ap: Port associated with device @dev
1928 * @dev: Device to which command will be sent
1929 *
780a87f7
JG
1930 * Issue SET FEATURES - XFER MODE command to device @dev
1931 * on port @ap.
1932 *
1933 * LOCKING: None. Serialized during ata_bus_probe().
1da177e4
LT
1934 */
1935
1936static void ata_dev_set_xfermode(struct ata_port *ap, struct ata_device *dev)
1937{
1938 DECLARE_COMPLETION(wait);
1939 struct ata_queued_cmd *qc;
1940 int rc;
1941 unsigned long flags;
1942
1943 /* set up set-features taskfile */
1944 DPRINTK("set features - xfer mode\n");
1945
1946 qc = ata_qc_new_init(ap, dev);
1947 BUG_ON(qc == NULL);
1948
1949 qc->tf.command = ATA_CMD_SET_FEATURES;
1950 qc->tf.feature = SETFEATURES_XFER;
1951 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
1952 qc->tf.protocol = ATA_PROT_NODATA;
1953 qc->tf.nsect = dev->xfer_mode;
1954
1955 qc->waiting = &wait;
1956 qc->complete_fn = ata_qc_complete_noop;
1957
1958 spin_lock_irqsave(&ap->host_set->lock, flags);
1959 rc = ata_qc_issue(qc);
1960 spin_unlock_irqrestore(&ap->host_set->lock, flags);
1961
1962 if (rc)
1963 ata_port_disable(ap);
1964 else
1965 wait_for_completion(&wait);
1966
1967 DPRINTK("EXIT\n");
1968}
1969
1970/**
1971 * ata_sg_clean -
1972 * @qc:
1973 *
1974 * LOCKING:
1975 */
1976
1977static void ata_sg_clean(struct ata_queued_cmd *qc)
1978{
1979 struct ata_port *ap = qc->ap;
1980 struct scatterlist *sg = qc->sg;
1981 int dir = qc->dma_dir;
1982
1983 assert(qc->flags & ATA_QCFLAG_DMAMAP);
1984 assert(sg != NULL);
1985
1986 if (qc->flags & ATA_QCFLAG_SINGLE)
1987 assert(qc->n_elem == 1);
1988
1989 DPRINTK("unmapping %u sg elements\n", qc->n_elem);
1990
1991 if (qc->flags & ATA_QCFLAG_SG)
1992 dma_unmap_sg(ap->host_set->dev, sg, qc->n_elem, dir);
1993 else
1994 dma_unmap_single(ap->host_set->dev, sg_dma_address(&sg[0]),
1995 sg_dma_len(&sg[0]), dir);
1996
1997 qc->flags &= ~ATA_QCFLAG_DMAMAP;
1998 qc->sg = NULL;
1999}
2000
2001/**
2002 * ata_fill_sg - Fill PCI IDE PRD table
2003 * @qc: Metadata associated with taskfile to be transferred
2004 *
780a87f7
JG
2005 * Fill PCI IDE PRD (scatter-gather) table with segments
2006 * associated with the current disk command.
2007 *
1da177e4 2008 * LOCKING:
780a87f7 2009 * spin_lock_irqsave(host_set lock)
1da177e4
LT
2010 *
2011 */
2012static void ata_fill_sg(struct ata_queued_cmd *qc)
2013{
2014 struct scatterlist *sg = qc->sg;
2015 struct ata_port *ap = qc->ap;
2016 unsigned int idx, nelem;
2017
2018 assert(sg != NULL);
2019 assert(qc->n_elem > 0);
2020
2021 idx = 0;
2022 for (nelem = qc->n_elem; nelem; nelem--,sg++) {
2023 u32 addr, offset;
2024 u32 sg_len, len;
2025
2026 /* determine if physical DMA addr spans 64K boundary.
2027 * Note h/w doesn't support 64-bit, so we unconditionally
2028 * truncate dma_addr_t to u32.
2029 */
2030 addr = (u32) sg_dma_address(sg);
2031 sg_len = sg_dma_len(sg);
2032
2033 while (sg_len) {
2034 offset = addr & 0xffff;
2035 len = sg_len;
2036 if ((offset + sg_len) > 0x10000)
2037 len = 0x10000 - offset;
2038
2039 ap->prd[idx].addr = cpu_to_le32(addr);
2040 ap->prd[idx].flags_len = cpu_to_le32(len & 0xffff);
2041 VPRINTK("PRD[%u] = (0x%X, 0x%X)\n", idx, addr, len);
2042
2043 idx++;
2044 sg_len -= len;
2045 addr += len;
2046 }
2047 }
2048
2049 if (idx)
2050 ap->prd[idx - 1].flags_len |= cpu_to_le32(ATA_PRD_EOT);
2051}
2052/**
2053 * ata_check_atapi_dma - Check whether ATAPI DMA can be supported
2054 * @qc: Metadata associated with taskfile to check
2055 *
780a87f7
JG
2056 * Allow low-level driver to filter ATA PACKET commands, returning
2057 * a status indicating whether or not it is OK to use DMA for the
2058 * supplied PACKET command.
2059 *
1da177e4
LT
2060 * LOCKING:
2061 * RETURNS: 0 when ATAPI DMA can be used
2062 * nonzero otherwise
2063 */
2064int ata_check_atapi_dma(struct ata_queued_cmd *qc)
2065{
2066 struct ata_port *ap = qc->ap;
2067 int rc = 0; /* Assume ATAPI DMA is OK by default */
2068
2069 if (ap->ops->check_atapi_dma)
2070 rc = ap->ops->check_atapi_dma(qc);
2071
2072 return rc;
2073}
2074/**
2075 * ata_qc_prep - Prepare taskfile for submission
2076 * @qc: Metadata associated with taskfile to be prepared
2077 *
780a87f7
JG
2078 * Prepare ATA taskfile for submission.
2079 *
1da177e4
LT
2080 * LOCKING:
2081 * spin_lock_irqsave(host_set lock)
2082 */
2083void ata_qc_prep(struct ata_queued_cmd *qc)
2084{
2085 if (!(qc->flags & ATA_QCFLAG_DMAMAP))
2086 return;
2087
2088 ata_fill_sg(qc);
2089}
2090
2091void ata_sg_init_one(struct ata_queued_cmd *qc, void *buf, unsigned int buflen)
2092{
2093 struct scatterlist *sg;
2094
2095 qc->flags |= ATA_QCFLAG_SINGLE;
2096
2097 memset(&qc->sgent, 0, sizeof(qc->sgent));
2098 qc->sg = &qc->sgent;
2099 qc->n_elem = 1;
2100 qc->buf_virt = buf;
2101
2102 sg = qc->sg;
2103 sg->page = virt_to_page(buf);
2104 sg->offset = (unsigned long) buf & ~PAGE_MASK;
32529e01 2105 sg->length = buflen;
1da177e4
LT
2106}
2107
2108void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
2109 unsigned int n_elem)
2110{
2111 qc->flags |= ATA_QCFLAG_SG;
2112 qc->sg = sg;
2113 qc->n_elem = n_elem;
2114}
2115
2116/**
2117 * ata_sg_setup_one -
2118 * @qc:
2119 *
2120 * LOCKING:
2121 * spin_lock_irqsave(host_set lock)
2122 *
2123 * RETURNS:
2124 *
2125 */
2126
2127static int ata_sg_setup_one(struct ata_queued_cmd *qc)
2128{
2129 struct ata_port *ap = qc->ap;
2130 int dir = qc->dma_dir;
2131 struct scatterlist *sg = qc->sg;
2132 dma_addr_t dma_address;
2133
2134 dma_address = dma_map_single(ap->host_set->dev, qc->buf_virt,
32529e01 2135 sg->length, dir);
1da177e4
LT
2136 if (dma_mapping_error(dma_address))
2137 return -1;
2138
2139 sg_dma_address(sg) = dma_address;
32529e01 2140 sg_dma_len(sg) = sg->length;
1da177e4
LT
2141
2142 DPRINTK("mapped buffer of %d bytes for %s\n", sg_dma_len(sg),
2143 qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
2144
2145 return 0;
2146}
2147
2148/**
2149 * ata_sg_setup -
2150 * @qc:
2151 *
2152 * LOCKING:
2153 * spin_lock_irqsave(host_set lock)
2154 *
2155 * RETURNS:
2156 *
2157 */
2158
2159static int ata_sg_setup(struct ata_queued_cmd *qc)
2160{
2161 struct ata_port *ap = qc->ap;
2162 struct scatterlist *sg = qc->sg;
2163 int n_elem, dir;
2164
2165 VPRINTK("ENTER, ata%u\n", ap->id);
2166 assert(qc->flags & ATA_QCFLAG_SG);
2167
2168 dir = qc->dma_dir;
2169 n_elem = dma_map_sg(ap->host_set->dev, sg, qc->n_elem, dir);
2170 if (n_elem < 1)
2171 return -1;
2172
2173 DPRINTK("%d sg elements mapped\n", n_elem);
2174
2175 qc->n_elem = n_elem;
2176
2177 return 0;
2178}
2179
2180/**
2181 * ata_pio_poll -
2182 * @ap:
2183 *
2184 * LOCKING:
2185 *
2186 * RETURNS:
2187 *
2188 */
2189
2190static unsigned long ata_pio_poll(struct ata_port *ap)
2191{
2192 u8 status;
2193 unsigned int poll_state = PIO_ST_UNKNOWN;
2194 unsigned int reg_state = PIO_ST_UNKNOWN;
2195 const unsigned int tmout_state = PIO_ST_TMOUT;
2196
2197 switch (ap->pio_task_state) {
2198 case PIO_ST:
2199 case PIO_ST_POLL:
2200 poll_state = PIO_ST_POLL;
2201 reg_state = PIO_ST;
2202 break;
2203 case PIO_ST_LAST:
2204 case PIO_ST_LAST_POLL:
2205 poll_state = PIO_ST_LAST_POLL;
2206 reg_state = PIO_ST_LAST;
2207 break;
2208 default:
2209 BUG();
2210 break;
2211 }
2212
2213 status = ata_chk_status(ap);
2214 if (status & ATA_BUSY) {
2215 if (time_after(jiffies, ap->pio_task_timeout)) {
2216 ap->pio_task_state = tmout_state;
2217 return 0;
2218 }
2219 ap->pio_task_state = poll_state;
2220 return ATA_SHORT_PAUSE;
2221 }
2222
2223 ap->pio_task_state = reg_state;
2224 return 0;
2225}
2226
2227/**
2228 * ata_pio_complete -
2229 * @ap:
2230 *
2231 * LOCKING:
2232 */
2233
2234static void ata_pio_complete (struct ata_port *ap)
2235{
2236 struct ata_queued_cmd *qc;
2237 u8 drv_stat;
2238
2239 /*
2240 * This is purely hueristic. This is a fast path.
2241 * Sometimes when we enter, BSY will be cleared in
2242 * a chk-status or two. If not, the drive is probably seeking
2243 * or something. Snooze for a couple msecs, then
2244 * chk-status again. If still busy, fall back to
2245 * PIO_ST_POLL state.
2246 */
2247 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
2248 if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
2249 msleep(2);
2250 drv_stat = ata_busy_wait(ap, ATA_BUSY | ATA_DRQ, 10);
2251 if (drv_stat & (ATA_BUSY | ATA_DRQ)) {
2252 ap->pio_task_state = PIO_ST_LAST_POLL;
2253 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
2254 return;
2255 }
2256 }
2257
2258 drv_stat = ata_wait_idle(ap);
2259 if (!ata_ok(drv_stat)) {
2260 ap->pio_task_state = PIO_ST_ERR;
2261 return;
2262 }
2263
2264 qc = ata_qc_from_tag(ap, ap->active_tag);
2265 assert(qc != NULL);
2266
2267 ap->pio_task_state = PIO_ST_IDLE;
2268
2269 ata_irq_on(ap);
2270
2271 ata_qc_complete(qc, drv_stat);
2272}
2273
2274void swap_buf_le16(u16 *buf, unsigned int buf_words)
2275{
2276#ifdef __BIG_ENDIAN
2277 unsigned int i;
2278
2279 for (i = 0; i < buf_words; i++)
2280 buf[i] = le16_to_cpu(buf[i]);
2281#endif /* __BIG_ENDIAN */
2282}
2283
2284static void ata_mmio_data_xfer(struct ata_port *ap, unsigned char *buf,
2285 unsigned int buflen, int write_data)
2286{
2287 unsigned int i;
2288 unsigned int words = buflen >> 1;
2289 u16 *buf16 = (u16 *) buf;
2290 void __iomem *mmio = (void __iomem *)ap->ioaddr.data_addr;
2291
2292 if (write_data) {
2293 for (i = 0; i < words; i++)
2294 writew(le16_to_cpu(buf16[i]), mmio);
2295 } else {
2296 for (i = 0; i < words; i++)
2297 buf16[i] = cpu_to_le16(readw(mmio));
2298 }
2299}
2300
2301static void ata_pio_data_xfer(struct ata_port *ap, unsigned char *buf,
2302 unsigned int buflen, int write_data)
2303{
2304 unsigned int dwords = buflen >> 1;
2305
2306 if (write_data)
2307 outsw(ap->ioaddr.data_addr, buf, dwords);
2308 else
2309 insw(ap->ioaddr.data_addr, buf, dwords);
2310}
2311
2312static void ata_data_xfer(struct ata_port *ap, unsigned char *buf,
2313 unsigned int buflen, int do_write)
2314{
2315 if (ap->flags & ATA_FLAG_MMIO)
2316 ata_mmio_data_xfer(ap, buf, buflen, do_write);
2317 else
2318 ata_pio_data_xfer(ap, buf, buflen, do_write);
2319}
2320
2321static void ata_pio_sector(struct ata_queued_cmd *qc)
2322{
2323 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
2324 struct scatterlist *sg = qc->sg;
2325 struct ata_port *ap = qc->ap;
2326 struct page *page;
2327 unsigned int offset;
2328 unsigned char *buf;
2329
2330 if (qc->cursect == (qc->nsect - 1))
2331 ap->pio_task_state = PIO_ST_LAST;
2332
2333 page = sg[qc->cursg].page;
2334 offset = sg[qc->cursg].offset + qc->cursg_ofs * ATA_SECT_SIZE;
2335
2336 /* get the current page and offset */
2337 page = nth_page(page, (offset >> PAGE_SHIFT));
2338 offset %= PAGE_SIZE;
2339
2340 buf = kmap(page) + offset;
2341
2342 qc->cursect++;
2343 qc->cursg_ofs++;
2344
32529e01 2345 if ((qc->cursg_ofs * ATA_SECT_SIZE) == (&sg[qc->cursg])->length) {
1da177e4
LT
2346 qc->cursg++;
2347 qc->cursg_ofs = 0;
2348 }
2349
2350 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
2351
2352 /* do the actual data transfer */
2353 do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
2354 ata_data_xfer(ap, buf, ATA_SECT_SIZE, do_write);
2355
2356 kunmap(page);
2357}
2358
2359static void __atapi_pio_bytes(struct ata_queued_cmd *qc, unsigned int bytes)
2360{
2361 int do_write = (qc->tf.flags & ATA_TFLAG_WRITE);
2362 struct scatterlist *sg = qc->sg;
2363 struct ata_port *ap = qc->ap;
2364 struct page *page;
2365 unsigned char *buf;
2366 unsigned int offset, count;
2367
2368 if (qc->curbytes == qc->nbytes - bytes)
2369 ap->pio_task_state = PIO_ST_LAST;
2370
2371next_sg:
2372 sg = &qc->sg[qc->cursg];
2373
2374next_page:
2375 page = sg->page;
2376 offset = sg->offset + qc->cursg_ofs;
2377
2378 /* get the current page and offset */
2379 page = nth_page(page, (offset >> PAGE_SHIFT));
2380 offset %= PAGE_SIZE;
2381
32529e01 2382 count = min(sg->length - qc->cursg_ofs, bytes);
1da177e4
LT
2383
2384 /* don't cross page boundaries */
2385 count = min(count, (unsigned int)PAGE_SIZE - offset);
2386
2387 buf = kmap(page) + offset;
2388
2389 bytes -= count;
2390 qc->curbytes += count;
2391 qc->cursg_ofs += count;
2392
32529e01 2393 if (qc->cursg_ofs == sg->length) {
1da177e4
LT
2394 qc->cursg++;
2395 qc->cursg_ofs = 0;
2396 }
2397
2398 DPRINTK("data %s\n", qc->tf.flags & ATA_TFLAG_WRITE ? "write" : "read");
2399
2400 /* do the actual data transfer */
2401 ata_data_xfer(ap, buf, count, do_write);
2402
2403 kunmap(page);
2404
2405 if (bytes) {
32529e01 2406 if (qc->cursg_ofs < sg->length)
1da177e4
LT
2407 goto next_page;
2408 goto next_sg;
2409 }
2410}
2411
2412static void atapi_pio_bytes(struct ata_queued_cmd *qc)
2413{
2414 struct ata_port *ap = qc->ap;
2415 struct ata_device *dev = qc->dev;
2416 unsigned int ireason, bc_lo, bc_hi, bytes;
2417 int i_write, do_write = (qc->tf.flags & ATA_TFLAG_WRITE) ? 1 : 0;
2418
2419 ap->ops->tf_read(ap, &qc->tf);
2420 ireason = qc->tf.nsect;
2421 bc_lo = qc->tf.lbam;
2422 bc_hi = qc->tf.lbah;
2423 bytes = (bc_hi << 8) | bc_lo;
2424
2425 /* shall be cleared to zero, indicating xfer of data */
2426 if (ireason & (1 << 0))
2427 goto err_out;
2428
2429 /* make sure transfer direction matches expected */
2430 i_write = ((ireason & (1 << 1)) == 0) ? 1 : 0;
2431 if (do_write != i_write)
2432 goto err_out;
2433
2434 __atapi_pio_bytes(qc, bytes);
2435
2436 return;
2437
2438err_out:
2439 printk(KERN_INFO "ata%u: dev %u: ATAPI check failed\n",
2440 ap->id, dev->devno);
2441 ap->pio_task_state = PIO_ST_ERR;
2442}
2443
2444/**
2445 * ata_pio_sector -
2446 * @ap:
2447 *
2448 * LOCKING:
2449 */
2450
2451static void ata_pio_block(struct ata_port *ap)
2452{
2453 struct ata_queued_cmd *qc;
2454 u8 status;
2455
2456 /*
2457 * This is purely hueristic. This is a fast path.
2458 * Sometimes when we enter, BSY will be cleared in
2459 * a chk-status or two. If not, the drive is probably seeking
2460 * or something. Snooze for a couple msecs, then
2461 * chk-status again. If still busy, fall back to
2462 * PIO_ST_POLL state.
2463 */
2464 status = ata_busy_wait(ap, ATA_BUSY, 5);
2465 if (status & ATA_BUSY) {
2466 msleep(2);
2467 status = ata_busy_wait(ap, ATA_BUSY, 10);
2468 if (status & ATA_BUSY) {
2469 ap->pio_task_state = PIO_ST_POLL;
2470 ap->pio_task_timeout = jiffies + ATA_TMOUT_PIO;
2471 return;
2472 }
2473 }
2474
2475 qc = ata_qc_from_tag(ap, ap->active_tag);
2476 assert(qc != NULL);
2477
2478 if (is_atapi_taskfile(&qc->tf)) {
2479 /* no more data to transfer or unsupported ATAPI command */
2480 if ((status & ATA_DRQ) == 0) {
2481 ap->pio_task_state = PIO_ST_IDLE;
2482
2483 ata_irq_on(ap);
2484
2485 ata_qc_complete(qc, status);
2486 return;
2487 }
2488
2489 atapi_pio_bytes(qc);
2490 } else {
2491 /* handle BSY=0, DRQ=0 as error */
2492 if ((status & ATA_DRQ) == 0) {
2493 ap->pio_task_state = PIO_ST_ERR;
2494 return;
2495 }
2496
2497 ata_pio_sector(qc);
2498 }
2499}
2500
2501static void ata_pio_error(struct ata_port *ap)
2502{
2503 struct ata_queued_cmd *qc;
2504 u8 drv_stat;
2505
2506 qc = ata_qc_from_tag(ap, ap->active_tag);
2507 assert(qc != NULL);
2508
2509 drv_stat = ata_chk_status(ap);
2510 printk(KERN_WARNING "ata%u: PIO error, drv_stat 0x%x\n",
2511 ap->id, drv_stat);
2512
2513 ap->pio_task_state = PIO_ST_IDLE;
2514
2515 ata_irq_on(ap);
2516
2517 ata_qc_complete(qc, drv_stat | ATA_ERR);
2518}
2519
2520static void ata_pio_task(void *_data)
2521{
2522 struct ata_port *ap = _data;
2523 unsigned long timeout = 0;
2524
2525 switch (ap->pio_task_state) {
2526 case PIO_ST_IDLE:
2527 return;
2528
2529 case PIO_ST:
2530 ata_pio_block(ap);
2531 break;
2532
2533 case PIO_ST_LAST:
2534 ata_pio_complete(ap);
2535 break;
2536
2537 case PIO_ST_POLL:
2538 case PIO_ST_LAST_POLL:
2539 timeout = ata_pio_poll(ap);
2540 break;
2541
2542 case PIO_ST_TMOUT:
2543 case PIO_ST_ERR:
2544 ata_pio_error(ap);
2545 return;
2546 }
2547
2548 if (timeout)
2549 queue_delayed_work(ata_wq, &ap->pio_task,
2550 timeout);
2551 else
2552 queue_work(ata_wq, &ap->pio_task);
2553}
2554
2555static void atapi_request_sense(struct ata_port *ap, struct ata_device *dev,
2556 struct scsi_cmnd *cmd)
2557{
2558 DECLARE_COMPLETION(wait);
2559 struct ata_queued_cmd *qc;
2560 unsigned long flags;
2561 int rc;
2562
2563 DPRINTK("ATAPI request sense\n");
2564
2565 qc = ata_qc_new_init(ap, dev);
2566 BUG_ON(qc == NULL);
2567
2568 /* FIXME: is this needed? */
2569 memset(cmd->sense_buffer, 0, sizeof(cmd->sense_buffer));
2570
2571 ata_sg_init_one(qc, cmd->sense_buffer, sizeof(cmd->sense_buffer));
2572 qc->dma_dir = DMA_FROM_DEVICE;
2573
21b1ed74 2574 memset(&qc->cdb, 0, ap->cdb_len);
1da177e4
LT
2575 qc->cdb[0] = REQUEST_SENSE;
2576 qc->cdb[4] = SCSI_SENSE_BUFFERSIZE;
2577
2578 qc->tf.flags |= ATA_TFLAG_ISADDR | ATA_TFLAG_DEVICE;
2579 qc->tf.command = ATA_CMD_PACKET;
2580
2581 qc->tf.protocol = ATA_PROT_ATAPI;
2582 qc->tf.lbam = (8 * 1024) & 0xff;
2583 qc->tf.lbah = (8 * 1024) >> 8;
2584 qc->nbytes = SCSI_SENSE_BUFFERSIZE;
2585
2586 qc->waiting = &wait;
2587 qc->complete_fn = ata_qc_complete_noop;
2588
2589 spin_lock_irqsave(&ap->host_set->lock, flags);
2590 rc = ata_qc_issue(qc);
2591 spin_unlock_irqrestore(&ap->host_set->lock, flags);
2592
2593 if (rc)
2594 ata_port_disable(ap);
2595 else
2596 wait_for_completion(&wait);
2597
2598 DPRINTK("EXIT\n");
2599}
2600
2601/**
2602 * ata_qc_timeout - Handle timeout of queued command
2603 * @qc: Command that timed out
2604 *
2605 * Some part of the kernel (currently, only the SCSI layer)
2606 * has noticed that the active command on port @ap has not
2607 * completed after a specified length of time. Handle this
2608 * condition by disabling DMA (if necessary) and completing
2609 * transactions, with error if necessary.
2610 *
2611 * This also handles the case of the "lost interrupt", where
2612 * for some reason (possibly hardware bug, possibly driver bug)
2613 * an interrupt was not delivered to the driver, even though the
2614 * transaction completed successfully.
2615 *
2616 * LOCKING:
2617 */
2618
2619static void ata_qc_timeout(struct ata_queued_cmd *qc)
2620{
2621 struct ata_port *ap = qc->ap;
2622 struct ata_device *dev = qc->dev;
2623 u8 host_stat = 0, drv_stat;
2624
2625 DPRINTK("ENTER\n");
2626
2627 /* FIXME: doesn't this conflict with timeout handling? */
2628 if (qc->dev->class == ATA_DEV_ATAPI && qc->scsicmd) {
2629 struct scsi_cmnd *cmd = qc->scsicmd;
2630
2631 if (!scsi_eh_eflags_chk(cmd, SCSI_EH_CANCEL_CMD)) {
2632
2633 /* finish completing original command */
2634 __ata_qc_complete(qc);
2635
2636 atapi_request_sense(ap, dev, cmd);
2637
2638 cmd->result = (CHECK_CONDITION << 1) | (DID_OK << 16);
2639 scsi_finish_command(cmd);
2640
2641 goto out;
2642 }
2643 }
2644
2645 /* hack alert! We cannot use the supplied completion
2646 * function from inside the ->eh_strategy_handler() thread.
2647 * libata is the only user of ->eh_strategy_handler() in
2648 * any kernel, so the default scsi_done() assumes it is
2649 * not being called from the SCSI EH.
2650 */
2651 qc->scsidone = scsi_finish_command;
2652
2653 switch (qc->tf.protocol) {
2654
2655 case ATA_PROT_DMA:
2656 case ATA_PROT_ATAPI_DMA:
2657 host_stat = ap->ops->bmdma_status(ap);
2658
2659 /* before we do anything else, clear DMA-Start bit */
2660 ap->ops->bmdma_stop(ap);
2661
2662 /* fall through */
2663
2664 default:
2665 ata_altstatus(ap);
2666 drv_stat = ata_chk_status(ap);
2667
2668 /* ack bmdma irq events */
2669 ap->ops->irq_clear(ap);
2670
2671 printk(KERN_ERR "ata%u: command 0x%x timeout, stat 0x%x host_stat 0x%x\n",
2672 ap->id, qc->tf.command, drv_stat, host_stat);
2673
2674 /* complete taskfile transaction */
2675 ata_qc_complete(qc, drv_stat);
2676 break;
2677 }
2678out:
2679 DPRINTK("EXIT\n");
2680}
2681
2682/**
2683 * ata_eng_timeout - Handle timeout of queued command
2684 * @ap: Port on which timed-out command is active
2685 *
2686 * Some part of the kernel (currently, only the SCSI layer)
2687 * has noticed that the active command on port @ap has not
2688 * completed after a specified length of time. Handle this
2689 * condition by disabling DMA (if necessary) and completing
2690 * transactions, with error if necessary.
2691 *
2692 * This also handles the case of the "lost interrupt", where
2693 * for some reason (possibly hardware bug, possibly driver bug)
2694 * an interrupt was not delivered to the driver, even though the
2695 * transaction completed successfully.
2696 *
2697 * LOCKING:
2698 * Inherited from SCSI layer (none, can sleep)
2699 */
2700
2701void ata_eng_timeout(struct ata_port *ap)
2702{
2703 struct ata_queued_cmd *qc;
2704
2705 DPRINTK("ENTER\n");
2706
2707 qc = ata_qc_from_tag(ap, ap->active_tag);
2708 if (!qc) {
2709 printk(KERN_ERR "ata%u: BUG: timeout without command\n",
2710 ap->id);
2711 goto out;
2712 }
2713
2714 ata_qc_timeout(qc);
2715
2716out:
2717 DPRINTK("EXIT\n");
2718}
2719
2720/**
2721 * ata_qc_new - Request an available ATA command, for queueing
2722 * @ap: Port associated with device @dev
2723 * @dev: Device from whom we request an available command structure
2724 *
2725 * LOCKING:
2726 */
2727
2728static struct ata_queued_cmd *ata_qc_new(struct ata_port *ap)
2729{
2730 struct ata_queued_cmd *qc = NULL;
2731 unsigned int i;
2732
2733 for (i = 0; i < ATA_MAX_QUEUE; i++)
2734 if (!test_and_set_bit(i, &ap->qactive)) {
2735 qc = ata_qc_from_tag(ap, i);
2736 break;
2737 }
2738
2739 if (qc)
2740 qc->tag = i;
2741
2742 return qc;
2743}
2744
2745/**
2746 * ata_qc_new_init - Request an available ATA command, and initialize it
2747 * @ap: Port associated with device @dev
2748 * @dev: Device from whom we request an available command structure
2749 *
2750 * LOCKING:
2751 */
2752
2753struct ata_queued_cmd *ata_qc_new_init(struct ata_port *ap,
2754 struct ata_device *dev)
2755{
2756 struct ata_queued_cmd *qc;
2757
2758 qc = ata_qc_new(ap);
2759 if (qc) {
2760 qc->sg = NULL;
2761 qc->flags = 0;
2762 qc->scsicmd = NULL;
2763 qc->ap = ap;
2764 qc->dev = dev;
2765 qc->cursect = qc->cursg = qc->cursg_ofs = 0;
2766 qc->nsect = 0;
2767 qc->nbytes = qc->curbytes = 0;
2768
2769 ata_tf_init(ap, &qc->tf, dev->devno);
2770
2771 if (dev->flags & ATA_DFLAG_LBA48)
2772 qc->tf.flags |= ATA_TFLAG_LBA48;
2773 }
2774
2775 return qc;
2776}
2777
2778static int ata_qc_complete_noop(struct ata_queued_cmd *qc, u8 drv_stat)
2779{
2780 return 0;
2781}
2782
2783static void __ata_qc_complete(struct ata_queued_cmd *qc)
2784{
2785 struct ata_port *ap = qc->ap;
2786 unsigned int tag, do_clear = 0;
2787
2788 qc->flags = 0;
2789 tag = qc->tag;
2790 if (likely(ata_tag_valid(tag))) {
2791 if (tag == ap->active_tag)
2792 ap->active_tag = ATA_TAG_POISON;
2793 qc->tag = ATA_TAG_POISON;
2794 do_clear = 1;
2795 }
2796
2797 if (qc->waiting) {
2798 struct completion *waiting = qc->waiting;
2799 qc->waiting = NULL;
2800 complete(waiting);
2801 }
2802
2803 if (likely(do_clear))
2804 clear_bit(tag, &ap->qactive);
2805}
2806
2807/**
2808 * ata_qc_free - free unused ata_queued_cmd
2809 * @qc: Command to complete
2810 *
2811 * Designed to free unused ata_queued_cmd object
2812 * in case something prevents using it.
2813 *
2814 * LOCKING:
2815 *
2816 */
2817void ata_qc_free(struct ata_queued_cmd *qc)
2818{
2819 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */
2820 assert(qc->waiting == NULL); /* nothing should be waiting */
2821
2822 __ata_qc_complete(qc);
2823}
2824
2825/**
2826 * ata_qc_complete - Complete an active ATA command
2827 * @qc: Command to complete
2828 * @drv_stat: ATA status register contents
2829 *
2830 * LOCKING:
2831 *
2832 */
2833
2834void ata_qc_complete(struct ata_queued_cmd *qc, u8 drv_stat)
2835{
2836 int rc;
2837
2838 assert(qc != NULL); /* ata_qc_from_tag _might_ return NULL */
2839 assert(qc->flags & ATA_QCFLAG_ACTIVE);
2840
2841 if (likely(qc->flags & ATA_QCFLAG_DMAMAP))
2842 ata_sg_clean(qc);
2843
2844 /* call completion callback */
2845 rc = qc->complete_fn(qc, drv_stat);
21b1ed74 2846 qc->flags &= ~ATA_QCFLAG_ACTIVE;
1da177e4
LT
2847
2848 /* if callback indicates not to complete command (non-zero),
2849 * return immediately
2850 */
2851 if (rc != 0)
2852 return;
2853
2854 __ata_qc_complete(qc);
2855
2856 VPRINTK("EXIT\n");
2857}
2858
2859static inline int ata_should_dma_map(struct ata_queued_cmd *qc)
2860{
2861 struct ata_port *ap = qc->ap;
2862
2863 switch (qc->tf.protocol) {
2864 case ATA_PROT_DMA:
2865 case ATA_PROT_ATAPI_DMA:
2866 return 1;
2867
2868 case ATA_PROT_ATAPI:
2869 case ATA_PROT_PIO:
2870 case ATA_PROT_PIO_MULT:
2871 if (ap->flags & ATA_FLAG_PIO_DMA)
2872 return 1;
2873
2874 /* fall through */
2875
2876 default:
2877 return 0;
2878 }
2879
2880 /* never reached */
2881}
2882
2883/**
2884 * ata_qc_issue - issue taskfile to device
2885 * @qc: command to issue to device
2886 *
2887 * Prepare an ATA command to submission to device.
2888 * This includes mapping the data into a DMA-able
2889 * area, filling in the S/G table, and finally
2890 * writing the taskfile to hardware, starting the command.
2891 *
2892 * LOCKING:
2893 * spin_lock_irqsave(host_set lock)
2894 *
2895 * RETURNS:
2896 * Zero on success, negative on error.
2897 */
2898
2899int ata_qc_issue(struct ata_queued_cmd *qc)
2900{
2901 struct ata_port *ap = qc->ap;
2902
2903 if (ata_should_dma_map(qc)) {
2904 if (qc->flags & ATA_QCFLAG_SG) {
2905 if (ata_sg_setup(qc))
2906 goto err_out;
2907 } else if (qc->flags & ATA_QCFLAG_SINGLE) {
2908 if (ata_sg_setup_one(qc))
2909 goto err_out;
2910 }
2911 } else {
2912 qc->flags &= ~ATA_QCFLAG_DMAMAP;
2913 }
2914
2915 ap->ops->qc_prep(qc);
2916
2917 qc->ap->active_tag = qc->tag;
2918 qc->flags |= ATA_QCFLAG_ACTIVE;
2919
2920 return ap->ops->qc_issue(qc);
2921
2922err_out:
2923 return -1;
2924}
2925
2926/**
2927 * ata_qc_issue_prot - issue taskfile to device in proto-dependent manner
2928 * @qc: command to issue to device
2929 *
2930 * Using various libata functions and hooks, this function
2931 * starts an ATA command. ATA commands are grouped into
2932 * classes called "protocols", and issuing each type of protocol
2933 * is slightly different.
2934 *
2935 * LOCKING:
2936 * spin_lock_irqsave(host_set lock)
2937 *
2938 * RETURNS:
2939 * Zero on success, negative on error.
2940 */
2941
2942int ata_qc_issue_prot(struct ata_queued_cmd *qc)
2943{
2944 struct ata_port *ap = qc->ap;
2945
2946 ata_dev_select(ap, qc->dev->devno, 1, 0);
2947
2948 switch (qc->tf.protocol) {
2949 case ATA_PROT_NODATA:
2950 ata_tf_to_host_nolock(ap, &qc->tf);
2951 break;
2952
2953 case ATA_PROT_DMA:
2954 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
2955 ap->ops->bmdma_setup(qc); /* set up bmdma */
2956 ap->ops->bmdma_start(qc); /* initiate bmdma */
2957 break;
2958
2959 case ATA_PROT_PIO: /* load tf registers, initiate polling pio */
2960 ata_qc_set_polling(qc);
2961 ata_tf_to_host_nolock(ap, &qc->tf);
2962 ap->pio_task_state = PIO_ST;
2963 queue_work(ata_wq, &ap->pio_task);
2964 break;
2965
2966 case ATA_PROT_ATAPI:
2967 ata_qc_set_polling(qc);
2968 ata_tf_to_host_nolock(ap, &qc->tf);
2969 queue_work(ata_wq, &ap->packet_task);
2970 break;
2971
2972 case ATA_PROT_ATAPI_NODATA:
2973 ata_tf_to_host_nolock(ap, &qc->tf);
2974 queue_work(ata_wq, &ap->packet_task);
2975 break;
2976
2977 case ATA_PROT_ATAPI_DMA:
2978 ap->ops->tf_load(ap, &qc->tf); /* load tf registers */
2979 ap->ops->bmdma_setup(qc); /* set up bmdma */
2980 queue_work(ata_wq, &ap->packet_task);
2981 break;
2982
2983 default:
2984 WARN_ON(1);
2985 return -1;
2986 }
2987
2988 return 0;
2989}
2990
2991/**
2992 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
2993 * @qc: Info associated with this ATA transaction.
2994 *
2995 * LOCKING:
2996 * spin_lock_irqsave(host_set lock)
2997 */
2998
2999static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
3000{
3001 struct ata_port *ap = qc->ap;
3002 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
3003 u8 dmactl;
3004 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3005
3006 /* load PRD table addr. */
3007 mb(); /* make sure PRD table writes are visible to controller */
3008 writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
3009
3010 /* specify data direction, triple-check start bit is clear */
3011 dmactl = readb(mmio + ATA_DMA_CMD);
3012 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
3013 if (!rw)
3014 dmactl |= ATA_DMA_WR;
3015 writeb(dmactl, mmio + ATA_DMA_CMD);
3016
3017 /* issue r/w command */
3018 ap->ops->exec_command(ap, &qc->tf);
3019}
3020
3021/**
3022 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
3023 * @qc: Info associated with this ATA transaction.
3024 *
3025 * LOCKING:
3026 * spin_lock_irqsave(host_set lock)
3027 */
3028
3029static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
3030{
3031 struct ata_port *ap = qc->ap;
3032 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3033 u8 dmactl;
3034
3035 /* start host DMA transaction */
3036 dmactl = readb(mmio + ATA_DMA_CMD);
3037 writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
3038
3039 /* Strictly, one may wish to issue a readb() here, to
3040 * flush the mmio write. However, control also passes
3041 * to the hardware at this point, and it will interrupt
3042 * us when we are to resume control. So, in effect,
3043 * we don't care when the mmio write flushes.
3044 * Further, a read of the DMA status register _immediately_
3045 * following the write may not be what certain flaky hardware
3046 * is expected, so I think it is best to not add a readb()
3047 * without first all the MMIO ATA cards/mobos.
3048 * Or maybe I'm just being paranoid.
3049 */
3050}
3051
3052/**
3053 * ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO)
3054 * @qc: Info associated with this ATA transaction.
3055 *
3056 * LOCKING:
3057 * spin_lock_irqsave(host_set lock)
3058 */
3059
3060static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
3061{
3062 struct ata_port *ap = qc->ap;
3063 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
3064 u8 dmactl;
3065
3066 /* load PRD table addr. */
3067 outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
3068
3069 /* specify data direction, triple-check start bit is clear */
3070 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3071 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
3072 if (!rw)
3073 dmactl |= ATA_DMA_WR;
3074 outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3075
3076 /* issue r/w command */
3077 ap->ops->exec_command(ap, &qc->tf);
3078}
3079
3080/**
3081 * ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO)
3082 * @qc: Info associated with this ATA transaction.
3083 *
3084 * LOCKING:
3085 * spin_lock_irqsave(host_set lock)
3086 */
3087
3088static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
3089{
3090 struct ata_port *ap = qc->ap;
3091 u8 dmactl;
3092
3093 /* start host DMA transaction */
3094 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3095 outb(dmactl | ATA_DMA_START,
3096 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3097}
3098
3099void ata_bmdma_start(struct ata_queued_cmd *qc)
3100{
3101 if (qc->ap->flags & ATA_FLAG_MMIO)
3102 ata_bmdma_start_mmio(qc);
3103 else
3104 ata_bmdma_start_pio(qc);
3105}
3106
3107void ata_bmdma_setup(struct ata_queued_cmd *qc)
3108{
3109 if (qc->ap->flags & ATA_FLAG_MMIO)
3110 ata_bmdma_setup_mmio(qc);
3111 else
3112 ata_bmdma_setup_pio(qc);
3113}
3114
3115void ata_bmdma_irq_clear(struct ata_port *ap)
3116{
3117 if (ap->flags & ATA_FLAG_MMIO) {
3118 void __iomem *mmio = ((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS;
3119 writeb(readb(mmio), mmio);
3120 } else {
3121 unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
3122 outb(inb(addr), addr);
3123 }
3124
3125}
3126
3127u8 ata_bmdma_status(struct ata_port *ap)
3128{
3129 u8 host_stat;
3130 if (ap->flags & ATA_FLAG_MMIO) {
3131 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3132 host_stat = readb(mmio + ATA_DMA_STATUS);
3133 } else
3134 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
3135 return host_stat;
3136}
3137
3138void ata_bmdma_stop(struct ata_port *ap)
3139{
3140 if (ap->flags & ATA_FLAG_MMIO) {
3141 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
3142
3143 /* clear start/stop bit */
3144 writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
3145 mmio + ATA_DMA_CMD);
3146 } else {
3147 /* clear start/stop bit */
3148 outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
3149 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
3150 }
3151
3152 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
3153 ata_altstatus(ap); /* dummy read */
3154}
3155
3156/**
3157 * ata_host_intr - Handle host interrupt for given (port, task)
3158 * @ap: Port on which interrupt arrived (possibly...)
3159 * @qc: Taskfile currently active in engine
3160 *
3161 * Handle host interrupt for given queued command. Currently,
3162 * only DMA interrupts are handled. All other commands are
3163 * handled via polling with interrupts disabled (nIEN bit).
3164 *
3165 * LOCKING:
3166 * spin_lock_irqsave(host_set lock)
3167 *
3168 * RETURNS:
3169 * One if interrupt was handled, zero if not (shared irq).
3170 */
3171
3172inline unsigned int ata_host_intr (struct ata_port *ap,
3173 struct ata_queued_cmd *qc)
3174{
3175 u8 status, host_stat;
3176
3177 switch (qc->tf.protocol) {
3178
3179 case ATA_PROT_DMA:
3180 case ATA_PROT_ATAPI_DMA:
3181 case ATA_PROT_ATAPI:
3182 /* check status of DMA engine */
3183 host_stat = ap->ops->bmdma_status(ap);
3184 VPRINTK("ata%u: host_stat 0x%X\n", ap->id, host_stat);
3185
3186 /* if it's not our irq... */
3187 if (!(host_stat & ATA_DMA_INTR))
3188 goto idle_irq;
3189
3190 /* before we do anything else, clear DMA-Start bit */
3191 ap->ops->bmdma_stop(ap);
3192
3193 /* fall through */
3194
3195 case ATA_PROT_ATAPI_NODATA:
3196 case ATA_PROT_NODATA:
3197 /* check altstatus */
3198 status = ata_altstatus(ap);
3199 if (status & ATA_BUSY)
3200 goto idle_irq;
3201
3202 /* check main status, clearing INTRQ */
3203 status = ata_chk_status(ap);
3204 if (unlikely(status & ATA_BUSY))
3205 goto idle_irq;
3206 DPRINTK("ata%u: protocol %d (dev_stat 0x%X)\n",
3207 ap->id, qc->tf.protocol, status);
3208
3209 /* ack bmdma irq events */
3210 ap->ops->irq_clear(ap);
3211
3212 /* complete taskfile transaction */
3213 ata_qc_complete(qc, status);
3214 break;
3215
3216 default:
3217 goto idle_irq;
3218 }
3219
3220 return 1; /* irq handled */
3221
3222idle_irq:
3223 ap->stats.idle_irq++;
3224
3225#ifdef ATA_IRQ_TRAP
3226 if ((ap->stats.idle_irq % 1000) == 0) {
3227 handled = 1;
3228 ata_irq_ack(ap, 0); /* debug trap */
3229 printk(KERN_WARNING "ata%d: irq trap\n", ap->id);
3230 }
3231#endif
3232 return 0; /* irq not handled */
3233}
3234
3235/**
3236 * ata_interrupt - Default ATA host interrupt handler
3237 * @irq: irq line
3238 * @dev_instance: pointer to our host information structure
3239 * @regs: unused
3240 *
3241 * LOCKING:
3242 *
3243 * RETURNS:
3244 *
3245 */
3246
3247irqreturn_t ata_interrupt (int irq, void *dev_instance, struct pt_regs *regs)
3248{
3249 struct ata_host_set *host_set = dev_instance;
3250 unsigned int i;
3251 unsigned int handled = 0;
3252 unsigned long flags;
3253
3254 /* TODO: make _irqsave conditional on x86 PCI IDE legacy mode */
3255 spin_lock_irqsave(&host_set->lock, flags);
3256
3257 for (i = 0; i < host_set->n_ports; i++) {
3258 struct ata_port *ap;
3259
3260 ap = host_set->ports[i];
3261 if (ap && (!(ap->flags & ATA_FLAG_PORT_DISABLED))) {
3262 struct ata_queued_cmd *qc;
3263
3264 qc = ata_qc_from_tag(ap, ap->active_tag);
21b1ed74
AL
3265 if (qc && (!(qc->tf.ctl & ATA_NIEN)) &&
3266 (qc->flags & ATA_QCFLAG_ACTIVE))
1da177e4
LT
3267 handled |= ata_host_intr(ap, qc);
3268 }
3269 }
3270
3271 spin_unlock_irqrestore(&host_set->lock, flags);
3272
3273 return IRQ_RETVAL(handled);
3274}
3275
3276/**
3277 * atapi_packet_task - Write CDB bytes to hardware
3278 * @_data: Port to which ATAPI device is attached.
3279 *
3280 * When device has indicated its readiness to accept
3281 * a CDB, this function is called. Send the CDB.
3282 * If DMA is to be performed, exit immediately.
3283 * Otherwise, we are in polling mode, so poll
3284 * status under operation succeeds or fails.
3285 *
3286 * LOCKING:
3287 * Kernel thread context (may sleep)
3288 */
3289
3290static void atapi_packet_task(void *_data)
3291{
3292 struct ata_port *ap = _data;
3293 struct ata_queued_cmd *qc;
3294 u8 status;
3295
3296 qc = ata_qc_from_tag(ap, ap->active_tag);
3297 assert(qc != NULL);
3298 assert(qc->flags & ATA_QCFLAG_ACTIVE);
3299
3300 /* sleep-wait for BSY to clear */
3301 DPRINTK("busy wait\n");
3302 if (ata_busy_sleep(ap, ATA_TMOUT_CDB_QUICK, ATA_TMOUT_CDB))
3303 goto err_out;
3304
3305 /* make sure DRQ is set */
3306 status = ata_chk_status(ap);
3307 if ((status & (ATA_BUSY | ATA_DRQ)) != ATA_DRQ)
3308 goto err_out;
3309
3310 /* send SCSI cdb */
3311 DPRINTK("send cdb\n");
3312 assert(ap->cdb_len >= 12);
3313 ata_data_xfer(ap, qc->cdb, ap->cdb_len, 1);
3314
3315 /* if we are DMA'ing, irq handler takes over from here */
3316 if (qc->tf.protocol == ATA_PROT_ATAPI_DMA)
3317 ap->ops->bmdma_start(qc); /* initiate bmdma */
3318
3319 /* non-data commands are also handled via irq */
3320 else if (qc->tf.protocol == ATA_PROT_ATAPI_NODATA) {
3321 /* do nothing */
3322 }
3323
3324 /* PIO commands are handled by polling */
3325 else {
3326 ap->pio_task_state = PIO_ST;
3327 queue_work(ata_wq, &ap->pio_task);
3328 }
3329
3330 return;
3331
3332err_out:
3333 ata_qc_complete(qc, ATA_ERR);
3334}
3335
3336int ata_port_start (struct ata_port *ap)
3337{
3338 struct device *dev = ap->host_set->dev;
3339
3340 ap->prd = dma_alloc_coherent(dev, ATA_PRD_TBL_SZ, &ap->prd_dma, GFP_KERNEL);
3341 if (!ap->prd)
3342 return -ENOMEM;
3343
3344 DPRINTK("prd alloc, virt %p, dma %llx\n", ap->prd, (unsigned long long) ap->prd_dma);
3345
3346 return 0;
3347}
3348
3349void ata_port_stop (struct ata_port *ap)
3350{
3351 struct device *dev = ap->host_set->dev;
3352
3353 dma_free_coherent(dev, ATA_PRD_TBL_SZ, ap->prd, ap->prd_dma);
3354}
3355
3356/**
3357 * ata_host_remove - Unregister SCSI host structure with upper layers
3358 * @ap: Port to unregister
3359 * @do_unregister: 1 if we fully unregister, 0 to just stop the port
3360 *
3361 * LOCKING:
3362 */
3363
3364static void ata_host_remove(struct ata_port *ap, unsigned int do_unregister)
3365{
3366 struct Scsi_Host *sh = ap->host;
3367
3368 DPRINTK("ENTER\n");
3369
3370 if (do_unregister)
3371 scsi_remove_host(sh);
3372
3373 ap->ops->port_stop(ap);
3374}
3375
3376/**
3377 * ata_host_init - Initialize an ata_port structure
3378 * @ap: Structure to initialize
3379 * @host: associated SCSI mid-layer structure
3380 * @host_set: Collection of hosts to which @ap belongs
3381 * @ent: Probe information provided by low-level driver
3382 * @port_no: Port number associated with this ata_port
3383 *
3384 * LOCKING:
3385 *
3386 */
3387
3388static void ata_host_init(struct ata_port *ap, struct Scsi_Host *host,
3389 struct ata_host_set *host_set,
3390 struct ata_probe_ent *ent, unsigned int port_no)
3391{
3392 unsigned int i;
3393
3394 host->max_id = 16;
3395 host->max_lun = 1;
3396 host->max_channel = 1;
3397 host->unique_id = ata_unique_id++;
3398 host->max_cmd_len = 12;
3399 scsi_set_device(host, ent->dev);
3400 scsi_assign_lock(host, &host_set->lock);
3401
3402 ap->flags = ATA_FLAG_PORT_DISABLED;
3403 ap->id = host->unique_id;
3404 ap->host = host;
3405 ap->ctl = ATA_DEVCTL_OBS;
3406 ap->host_set = host_set;
3407 ap->port_no = port_no;
3408 ap->hard_port_no =
3409 ent->legacy_mode ? ent->hard_port_no : port_no;
3410 ap->pio_mask = ent->pio_mask;
3411 ap->mwdma_mask = ent->mwdma_mask;
3412 ap->udma_mask = ent->udma_mask;
3413 ap->flags |= ent->host_flags;
3414 ap->ops = ent->port_ops;
3415 ap->cbl = ATA_CBL_NONE;
3416 ap->active_tag = ATA_TAG_POISON;
3417 ap->last_ctl = 0xFF;
3418
3419 INIT_WORK(&ap->packet_task, atapi_packet_task, ap);
3420 INIT_WORK(&ap->pio_task, ata_pio_task, ap);
3421
3422 for (i = 0; i < ATA_MAX_DEVICES; i++)
3423 ap->device[i].devno = i;
3424
3425#ifdef ATA_IRQ_TRAP
3426 ap->stats.unhandled_irq = 1;
3427 ap->stats.idle_irq = 1;
3428#endif
3429
3430 memcpy(&ap->ioaddr, &ent->port[port_no], sizeof(struct ata_ioports));
3431}
3432
3433/**
3434 * ata_host_add - Attach low-level ATA driver to system
3435 * @ent: Information provided by low-level driver
3436 * @host_set: Collections of ports to which we add
3437 * @port_no: Port number associated with this host
3438 *
3439 * LOCKING:
3440 *
3441 * RETURNS:
3442 *
3443 */
3444
3445static struct ata_port * ata_host_add(struct ata_probe_ent *ent,
3446 struct ata_host_set *host_set,
3447 unsigned int port_no)
3448{
3449 struct Scsi_Host *host;
3450 struct ata_port *ap;
3451 int rc;
3452
3453 DPRINTK("ENTER\n");
3454 host = scsi_host_alloc(ent->sht, sizeof(struct ata_port));
3455 if (!host)
3456 return NULL;
3457
3458 ap = (struct ata_port *) &host->hostdata[0];
3459
3460 ata_host_init(ap, host, host_set, ent, port_no);
3461
3462 rc = ap->ops->port_start(ap);
3463 if (rc)
3464 goto err_out;
3465
3466 return ap;
3467
3468err_out:
3469 scsi_host_put(host);
3470 return NULL;
3471}
3472
3473/**
3474 * ata_device_add -
3475 * @ent:
3476 *
3477 * LOCKING:
3478 *
3479 * RETURNS:
3480 *
3481 */
3482
3483int ata_device_add(struct ata_probe_ent *ent)
3484{
3485 unsigned int count = 0, i;
3486 struct device *dev = ent->dev;
3487 struct ata_host_set *host_set;
3488
3489 DPRINTK("ENTER\n");
3490 /* alloc a container for our list of ATA ports (buses) */
3491 host_set = kmalloc(sizeof(struct ata_host_set) +
3492 (ent->n_ports * sizeof(void *)), GFP_KERNEL);
3493 if (!host_set)
3494 return 0;
3495 memset(host_set, 0, sizeof(struct ata_host_set) + (ent->n_ports * sizeof(void *)));
3496 spin_lock_init(&host_set->lock);
3497
3498 host_set->dev = dev;
3499 host_set->n_ports = ent->n_ports;
3500 host_set->irq = ent->irq;
3501 host_set->mmio_base = ent->mmio_base;
3502 host_set->private_data = ent->private_data;
3503 host_set->ops = ent->port_ops;
3504
3505 /* register each port bound to this device */
3506 for (i = 0; i < ent->n_ports; i++) {
3507 struct ata_port *ap;
3508 unsigned long xfer_mode_mask;
3509
3510 ap = ata_host_add(ent, host_set, i);
3511 if (!ap)
3512 goto err_out;
3513
3514 host_set->ports[i] = ap;
3515 xfer_mode_mask =(ap->udma_mask << ATA_SHIFT_UDMA) |
3516 (ap->mwdma_mask << ATA_SHIFT_MWDMA) |
3517 (ap->pio_mask << ATA_SHIFT_PIO);
3518
3519 /* print per-port info to dmesg */
3520 printk(KERN_INFO "ata%u: %cATA max %s cmd 0x%lX ctl 0x%lX "
3521 "bmdma 0x%lX irq %lu\n",
3522 ap->id,
3523 ap->flags & ATA_FLAG_SATA ? 'S' : 'P',
3524 ata_mode_string(xfer_mode_mask),
3525 ap->ioaddr.cmd_addr,
3526 ap->ioaddr.ctl_addr,
3527 ap->ioaddr.bmdma_addr,
3528 ent->irq);
3529
3530 ata_chk_status(ap);
3531 host_set->ops->irq_clear(ap);
3532 count++;
3533 }
3534
3535 if (!count) {
3536 kfree(host_set);
3537 return 0;
3538 }
3539
3540 /* obtain irq, that is shared between channels */
3541 if (request_irq(ent->irq, ent->port_ops->irq_handler, ent->irq_flags,
3542 DRV_NAME, host_set))
3543 goto err_out;
3544
3545 /* perform each probe synchronously */
3546 DPRINTK("probe begin\n");
3547 for (i = 0; i < count; i++) {
3548 struct ata_port *ap;
3549 int rc;
3550
3551 ap = host_set->ports[i];
3552
3553 DPRINTK("ata%u: probe begin\n", ap->id);
3554 rc = ata_bus_probe(ap);
3555 DPRINTK("ata%u: probe end\n", ap->id);
3556
3557 if (rc) {
3558 /* FIXME: do something useful here?
3559 * Current libata behavior will
3560 * tear down everything when
3561 * the module is removed
3562 * or the h/w is unplugged.
3563 */
3564 }
3565
3566 rc = scsi_add_host(ap->host, dev);
3567 if (rc) {
3568 printk(KERN_ERR "ata%u: scsi_add_host failed\n",
3569 ap->id);
3570 /* FIXME: do something useful here */
3571 /* FIXME: handle unconditional calls to
3572 * scsi_scan_host and ata_host_remove, below,
3573 * at the very least
3574 */
3575 }
3576 }
3577
3578 /* probes are done, now scan each port's disk(s) */
3579 DPRINTK("probe begin\n");
3580 for (i = 0; i < count; i++) {
3581 struct ata_port *ap = host_set->ports[i];
3582
3583 scsi_scan_host(ap->host);
3584 }
3585
3586 dev_set_drvdata(dev, host_set);
3587
3588 VPRINTK("EXIT, returning %u\n", ent->n_ports);
3589 return ent->n_ports; /* success */
3590
3591err_out:
3592 for (i = 0; i < count; i++) {
3593 ata_host_remove(host_set->ports[i], 1);
3594 scsi_host_put(host_set->ports[i]->host);
3595 }
3596 kfree(host_set);
3597 VPRINTK("EXIT, returning 0\n");
3598 return 0;
3599}
3600
3601/**
3602 * ata_scsi_release - SCSI layer callback hook for host unload
3603 * @host: libata host to be unloaded
3604 *
3605 * Performs all duties necessary to shut down a libata port...
3606 * Kill port kthread, disable port, and release resources.
3607 *
3608 * LOCKING:
3609 * Inherited from SCSI layer.
3610 *
3611 * RETURNS:
3612 * One.
3613 */
3614
3615int ata_scsi_release(struct Scsi_Host *host)
3616{
3617 struct ata_port *ap = (struct ata_port *) &host->hostdata[0];
3618
3619 DPRINTK("ENTER\n");
3620
3621 ap->ops->port_disable(ap);
3622 ata_host_remove(ap, 0);
3623
3624 DPRINTK("EXIT\n");
3625 return 1;
3626}
3627
3628/**
3629 * ata_std_ports - initialize ioaddr with standard port offsets.
3630 * @ioaddr: IO address structure to be initialized
3631 */
3632void ata_std_ports(struct ata_ioports *ioaddr)
3633{
3634 ioaddr->data_addr = ioaddr->cmd_addr + ATA_REG_DATA;
3635 ioaddr->error_addr = ioaddr->cmd_addr + ATA_REG_ERR;
3636 ioaddr->feature_addr = ioaddr->cmd_addr + ATA_REG_FEATURE;
3637 ioaddr->nsect_addr = ioaddr->cmd_addr + ATA_REG_NSECT;
3638 ioaddr->lbal_addr = ioaddr->cmd_addr + ATA_REG_LBAL;
3639 ioaddr->lbam_addr = ioaddr->cmd_addr + ATA_REG_LBAM;
3640 ioaddr->lbah_addr = ioaddr->cmd_addr + ATA_REG_LBAH;
3641 ioaddr->device_addr = ioaddr->cmd_addr + ATA_REG_DEVICE;
3642 ioaddr->status_addr = ioaddr->cmd_addr + ATA_REG_STATUS;
3643 ioaddr->command_addr = ioaddr->cmd_addr + ATA_REG_CMD;
3644}
3645
3646static struct ata_probe_ent *
3647ata_probe_ent_alloc(struct device *dev, struct ata_port_info *port)
3648{
3649 struct ata_probe_ent *probe_ent;
3650
3651 probe_ent = kmalloc(sizeof(*probe_ent), GFP_KERNEL);
3652 if (!probe_ent) {
3653 printk(KERN_ERR DRV_NAME "(%s): out of memory\n",
3654 kobject_name(&(dev->kobj)));
3655 return NULL;
3656 }
3657
3658 memset(probe_ent, 0, sizeof(*probe_ent));
3659
3660 INIT_LIST_HEAD(&probe_ent->node);
3661 probe_ent->dev = dev;
3662
3663 probe_ent->sht = port->sht;
3664 probe_ent->host_flags = port->host_flags;
3665 probe_ent->pio_mask = port->pio_mask;
3666 probe_ent->mwdma_mask = port->mwdma_mask;
3667 probe_ent->udma_mask = port->udma_mask;
3668 probe_ent->port_ops = port->port_ops;
3669
3670 return probe_ent;
3671}
3672
3673#ifdef CONFIG_PCI
3674struct ata_probe_ent *
3675ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port)
3676{
3677 struct ata_probe_ent *probe_ent =
3678 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
3679 if (!probe_ent)
3680 return NULL;
3681
3682 probe_ent->n_ports = 2;
3683 probe_ent->irq = pdev->irq;
3684 probe_ent->irq_flags = SA_SHIRQ;
3685
3686 probe_ent->port[0].cmd_addr = pci_resource_start(pdev, 0);
3687 probe_ent->port[0].altstatus_addr =
3688 probe_ent->port[0].ctl_addr =
3689 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
3690 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4);
3691
3692 probe_ent->port[1].cmd_addr = pci_resource_start(pdev, 2);
3693 probe_ent->port[1].altstatus_addr =
3694 probe_ent->port[1].ctl_addr =
3695 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
3696 probe_ent->port[1].bmdma_addr = pci_resource_start(pdev, 4) + 8;
3697
3698 ata_std_ports(&probe_ent->port[0]);
3699 ata_std_ports(&probe_ent->port[1]);
3700
3701 return probe_ent;
3702}
3703
3704static struct ata_probe_ent *
3705ata_pci_init_legacy_mode(struct pci_dev *pdev, struct ata_port_info **port,
3706 struct ata_probe_ent **ppe2)
3707{
3708 struct ata_probe_ent *probe_ent, *probe_ent2;
3709
3710 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
3711 if (!probe_ent)
3712 return NULL;
3713 probe_ent2 = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[1]);
3714 if (!probe_ent2) {
3715 kfree(probe_ent);
3716 return NULL;
3717 }
3718
3719 probe_ent->n_ports = 1;
3720 probe_ent->irq = 14;
3721
3722 probe_ent->hard_port_no = 0;
3723 probe_ent->legacy_mode = 1;
3724
3725 probe_ent2->n_ports = 1;
3726 probe_ent2->irq = 15;
3727
3728 probe_ent2->hard_port_no = 1;
3729 probe_ent2->legacy_mode = 1;
3730
3731 probe_ent->port[0].cmd_addr = 0x1f0;
3732 probe_ent->port[0].altstatus_addr =
3733 probe_ent->port[0].ctl_addr = 0x3f6;
3734 probe_ent->port[0].bmdma_addr = pci_resource_start(pdev, 4);
3735
3736 probe_ent2->port[0].cmd_addr = 0x170;
3737 probe_ent2->port[0].altstatus_addr =
3738 probe_ent2->port[0].ctl_addr = 0x376;
3739 probe_ent2->port[0].bmdma_addr = pci_resource_start(pdev, 4)+8;
3740
3741 ata_std_ports(&probe_ent->port[0]);
3742 ata_std_ports(&probe_ent2->port[0]);
3743
3744 *ppe2 = probe_ent2;
3745 return probe_ent;
3746}
3747
3748/**
3749 * ata_pci_init_one - Initialize/register PCI IDE host controller
3750 * @pdev: Controller to be initialized
3751 * @port_info: Information from low-level host driver
3752 * @n_ports: Number of ports attached to host controller
3753 *
3754 * LOCKING:
3755 * Inherited from PCI layer (may sleep).
3756 *
3757 * RETURNS:
3758 *
3759 */
3760
3761int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
3762 unsigned int n_ports)
3763{
3764 struct ata_probe_ent *probe_ent, *probe_ent2 = NULL;
3765 struct ata_port_info *port[2];
3766 u8 tmp8, mask;
3767 unsigned int legacy_mode = 0;
3768 int disable_dev_on_err = 1;
3769 int rc;
3770
3771 DPRINTK("ENTER\n");
3772
3773 port[0] = port_info[0];
3774 if (n_ports > 1)
3775 port[1] = port_info[1];
3776 else
3777 port[1] = port[0];
3778
3779 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
3780 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
3781 /* TODO: support transitioning to native mode? */
3782 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
3783 mask = (1 << 2) | (1 << 0);
3784 if ((tmp8 & mask) != mask)
3785 legacy_mode = (1 << 3);
3786 }
3787
3788 /* FIXME... */
3789 if ((!legacy_mode) && (n_ports > 1)) {
3790 printk(KERN_ERR "ata: BUG: native mode, n_ports > 1\n");
3791 return -EINVAL;
3792 }
3793
3794 rc = pci_enable_device(pdev);
3795 if (rc)
3796 return rc;
3797
3798 rc = pci_request_regions(pdev, DRV_NAME);
3799 if (rc) {
3800 disable_dev_on_err = 0;
3801 goto err_out;
3802 }
3803
3804 if (legacy_mode) {
3805 if (!request_region(0x1f0, 8, "libata")) {
3806 struct resource *conflict, res;
3807 res.start = 0x1f0;
3808 res.end = 0x1f0 + 8 - 1;
3809 conflict = ____request_resource(&ioport_resource, &res);
3810 if (!strcmp(conflict->name, "libata"))
3811 legacy_mode |= (1 << 0);
3812 else {
3813 disable_dev_on_err = 0;
3814 printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n");
3815 }
3816 } else
3817 legacy_mode |= (1 << 0);
3818
3819 if (!request_region(0x170, 8, "libata")) {
3820 struct resource *conflict, res;
3821 res.start = 0x170;
3822 res.end = 0x170 + 8 - 1;
3823 conflict = ____request_resource(&ioport_resource, &res);
3824 if (!strcmp(conflict->name, "libata"))
3825 legacy_mode |= (1 << 1);
3826 else {
3827 disable_dev_on_err = 0;
3828 printk(KERN_WARNING "ata: 0x170 IDE port busy\n");
3829 }
3830 } else
3831 legacy_mode |= (1 << 1);
3832 }
3833
3834 /* we have legacy mode, but all ports are unavailable */
3835 if (legacy_mode == (1 << 3)) {
3836 rc = -EBUSY;
3837 goto err_out_regions;
3838 }
3839
3840 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
3841 if (rc)
3842 goto err_out_regions;
3843 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
3844 if (rc)
3845 goto err_out_regions;
3846
3847 if (legacy_mode) {
3848 probe_ent = ata_pci_init_legacy_mode(pdev, port, &probe_ent2);
3849 } else
3850 probe_ent = ata_pci_init_native_mode(pdev, port);
3851 if (!probe_ent) {
3852 rc = -ENOMEM;
3853 goto err_out_regions;
3854 }
3855
3856 pci_set_master(pdev);
3857
3858 /* FIXME: check ata_device_add return */
3859 if (legacy_mode) {
3860 if (legacy_mode & (1 << 0))
3861 ata_device_add(probe_ent);
3862 if (legacy_mode & (1 << 1))
3863 ata_device_add(probe_ent2);
3864 } else
3865 ata_device_add(probe_ent);
3866
3867 kfree(probe_ent);
3868 kfree(probe_ent2);
3869
3870 return 0;
3871
3872err_out_regions:
3873 if (legacy_mode & (1 << 0))
3874 release_region(0x1f0, 8);
3875 if (legacy_mode & (1 << 1))
3876 release_region(0x170, 8);
3877 pci_release_regions(pdev);
3878err_out:
3879 if (disable_dev_on_err)
3880 pci_disable_device(pdev);
3881 return rc;
3882}
3883
3884/**
3885 * ata_pci_remove_one - PCI layer callback for device removal
3886 * @pdev: PCI device that was removed
3887 *
3888 * PCI layer indicates to libata via this hook that
3889 * hot-unplug or module unload event has occured.
3890 * Handle this by unregistering all objects associated
3891 * with this PCI device. Free those objects. Then finally
3892 * release PCI resources and disable device.
3893 *
3894 * LOCKING:
3895 * Inherited from PCI layer (may sleep).
3896 */
3897
3898void ata_pci_remove_one (struct pci_dev *pdev)
3899{
3900 struct device *dev = pci_dev_to_dev(pdev);
3901 struct ata_host_set *host_set = dev_get_drvdata(dev);
3902 struct ata_port *ap;
3903 unsigned int i;
3904
3905 for (i = 0; i < host_set->n_ports; i++) {
3906 ap = host_set->ports[i];
3907
3908 scsi_remove_host(ap->host);
3909 }
3910
3911 free_irq(host_set->irq, host_set);
3912 if (host_set->ops->host_stop)
3913 host_set->ops->host_stop(host_set);
3914 if (host_set->mmio_base)
3915 iounmap(host_set->mmio_base);
3916
3917 for (i = 0; i < host_set->n_ports; i++) {
3918 ap = host_set->ports[i];
3919
3920 ata_scsi_release(ap->host);
3921
3922 if ((ap->flags & ATA_FLAG_NO_LEGACY) == 0) {
3923 struct ata_ioports *ioaddr = &ap->ioaddr;
3924
3925 if (ioaddr->cmd_addr == 0x1f0)
3926 release_region(0x1f0, 8);
3927 else if (ioaddr->cmd_addr == 0x170)
3928 release_region(0x170, 8);
3929 }
3930
3931 scsi_host_put(ap->host);
3932 }
3933
3934 kfree(host_set);
3935
3936 pci_release_regions(pdev);
3937 pci_disable_device(pdev);
3938 dev_set_drvdata(dev, NULL);
3939}
3940
3941/* move to PCI subsystem */
3942int pci_test_config_bits(struct pci_dev *pdev, struct pci_bits *bits)
3943{
3944 unsigned long tmp = 0;
3945
3946 switch (bits->width) {
3947 case 1: {
3948 u8 tmp8 = 0;
3949 pci_read_config_byte(pdev, bits->reg, &tmp8);
3950 tmp = tmp8;
3951 break;
3952 }
3953 case 2: {
3954 u16 tmp16 = 0;
3955 pci_read_config_word(pdev, bits->reg, &tmp16);
3956 tmp = tmp16;
3957 break;
3958 }
3959 case 4: {
3960 u32 tmp32 = 0;
3961 pci_read_config_dword(pdev, bits->reg, &tmp32);
3962 tmp = tmp32;
3963 break;
3964 }
3965
3966 default:
3967 return -EINVAL;
3968 }
3969
3970 tmp &= bits->mask;
3971
3972 return (tmp == bits->val) ? 1 : 0;
3973}
3974#endif /* CONFIG_PCI */
3975
3976
3977/**
3978 * ata_init -
3979 *
3980 * LOCKING:
3981 *
3982 * RETURNS:
3983 *
3984 */
3985
3986static int __init ata_init(void)
3987{
3988 ata_wq = create_workqueue("ata");
3989 if (!ata_wq)
3990 return -ENOMEM;
3991
3992 printk(KERN_DEBUG "libata version " DRV_VERSION " loaded.\n");
3993 return 0;
3994}
3995
3996static void __exit ata_exit(void)
3997{
3998 destroy_workqueue(ata_wq);
3999}
4000
4001module_init(ata_init);
4002module_exit(ata_exit);
4003
4004/*
4005 * libata is essentially a library of internal helper functions for
4006 * low-level ATA host controller drivers. As such, the API/ABI is
4007 * likely to change as new drivers are added and updated.
4008 * Do not depend on ABI/API stability.
4009 */
4010
4011EXPORT_SYMBOL_GPL(ata_std_bios_param);
4012EXPORT_SYMBOL_GPL(ata_std_ports);
4013EXPORT_SYMBOL_GPL(ata_device_add);
4014EXPORT_SYMBOL_GPL(ata_sg_init);
4015EXPORT_SYMBOL_GPL(ata_sg_init_one);
4016EXPORT_SYMBOL_GPL(ata_qc_complete);
4017EXPORT_SYMBOL_GPL(ata_qc_issue_prot);
4018EXPORT_SYMBOL_GPL(ata_eng_timeout);
4019EXPORT_SYMBOL_GPL(ata_tf_load);
4020EXPORT_SYMBOL_GPL(ata_tf_read);
4021EXPORT_SYMBOL_GPL(ata_noop_dev_select);
4022EXPORT_SYMBOL_GPL(ata_std_dev_select);
4023EXPORT_SYMBOL_GPL(ata_tf_to_fis);
4024EXPORT_SYMBOL_GPL(ata_tf_from_fis);
4025EXPORT_SYMBOL_GPL(ata_check_status);
4026EXPORT_SYMBOL_GPL(ata_altstatus);
4027EXPORT_SYMBOL_GPL(ata_chk_err);
4028EXPORT_SYMBOL_GPL(ata_exec_command);
4029EXPORT_SYMBOL_GPL(ata_port_start);
4030EXPORT_SYMBOL_GPL(ata_port_stop);
4031EXPORT_SYMBOL_GPL(ata_interrupt);
4032EXPORT_SYMBOL_GPL(ata_qc_prep);
4033EXPORT_SYMBOL_GPL(ata_bmdma_setup);
4034EXPORT_SYMBOL_GPL(ata_bmdma_start);
4035EXPORT_SYMBOL_GPL(ata_bmdma_irq_clear);
4036EXPORT_SYMBOL_GPL(ata_bmdma_status);
4037EXPORT_SYMBOL_GPL(ata_bmdma_stop);
4038EXPORT_SYMBOL_GPL(ata_port_probe);
4039EXPORT_SYMBOL_GPL(sata_phy_reset);
4040EXPORT_SYMBOL_GPL(__sata_phy_reset);
4041EXPORT_SYMBOL_GPL(ata_bus_reset);
4042EXPORT_SYMBOL_GPL(ata_port_disable);
4043EXPORT_SYMBOL_GPL(ata_scsi_ioctl);
4044EXPORT_SYMBOL_GPL(ata_scsi_queuecmd);
4045EXPORT_SYMBOL_GPL(ata_scsi_error);
4046EXPORT_SYMBOL_GPL(ata_scsi_slave_config);
4047EXPORT_SYMBOL_GPL(ata_scsi_release);
4048EXPORT_SYMBOL_GPL(ata_host_intr);
4049EXPORT_SYMBOL_GPL(ata_dev_classify);
4050EXPORT_SYMBOL_GPL(ata_dev_id_string);
4051EXPORT_SYMBOL_GPL(ata_scsi_simulate);
4052
4053#ifdef CONFIG_PCI
4054EXPORT_SYMBOL_GPL(pci_test_config_bits);
4055EXPORT_SYMBOL_GPL(ata_pci_init_native_mode);
4056EXPORT_SYMBOL_GPL(ata_pci_init_one);
4057EXPORT_SYMBOL_GPL(ata_pci_remove_one);
4058#endif /* CONFIG_PCI */