]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/scsi/libata-bmdma.c
[PATCH] libata: implement per-dev xfermask
[net-next-2.6.git] / drivers / scsi / libata-bmdma.c
CommitLineData
1fdffbce
JG
1/*
2 * libata-bmdma.c - helper library for PCI IDE BMDMA
3 *
4 * Maintained by: Jeff Garzik <jgarzik@pobox.com>
5 * Please ALWAYS copy linux-ide@vger.kernel.org
6 * on emails.
7 *
8 * Copyright 2003-2006 Red Hat, Inc. All rights reserved.
9 * Copyright 2003-2006 Jeff Garzik
10 *
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 *
26 *
27 * libata documentation is available via 'make {ps|pdf}docs',
28 * as Documentation/DocBook/libata.*
29 *
30 * Hardware documentation available from http://www.t13.org/ and
31 * http://www.sata-io.org/
32 *
33 */
34
1fdffbce
JG
35#include <linux/kernel.h>
36#include <linux/pci.h>
37#include <linux/libata.h>
38
39#include "libata.h"
40
41/**
42 * ata_tf_load_pio - send taskfile registers to host controller
43 * @ap: Port to which output is sent
44 * @tf: ATA taskfile register set
45 *
46 * Outputs ATA taskfile to standard ATA host controller.
47 *
48 * LOCKING:
49 * Inherited from caller.
50 */
51
52static void ata_tf_load_pio(struct ata_port *ap, const struct ata_taskfile *tf)
53{
54 struct ata_ioports *ioaddr = &ap->ioaddr;
55 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
56
57 if (tf->ctl != ap->last_ctl) {
58 outb(tf->ctl, ioaddr->ctl_addr);
59 ap->last_ctl = tf->ctl;
60 ata_wait_idle(ap);
61 }
62
63 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
64 outb(tf->hob_feature, ioaddr->feature_addr);
65 outb(tf->hob_nsect, ioaddr->nsect_addr);
66 outb(tf->hob_lbal, ioaddr->lbal_addr);
67 outb(tf->hob_lbam, ioaddr->lbam_addr);
68 outb(tf->hob_lbah, ioaddr->lbah_addr);
69 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
70 tf->hob_feature,
71 tf->hob_nsect,
72 tf->hob_lbal,
73 tf->hob_lbam,
74 tf->hob_lbah);
75 }
76
77 if (is_addr) {
78 outb(tf->feature, ioaddr->feature_addr);
79 outb(tf->nsect, ioaddr->nsect_addr);
80 outb(tf->lbal, ioaddr->lbal_addr);
81 outb(tf->lbam, ioaddr->lbam_addr);
82 outb(tf->lbah, ioaddr->lbah_addr);
83 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
84 tf->feature,
85 tf->nsect,
86 tf->lbal,
87 tf->lbam,
88 tf->lbah);
89 }
90
91 if (tf->flags & ATA_TFLAG_DEVICE) {
92 outb(tf->device, ioaddr->device_addr);
93 VPRINTK("device 0x%X\n", tf->device);
94 }
95
96 ata_wait_idle(ap);
97}
98
99/**
100 * ata_tf_load_mmio - send taskfile registers to host controller
101 * @ap: Port to which output is sent
102 * @tf: ATA taskfile register set
103 *
104 * Outputs ATA taskfile to standard ATA host controller using MMIO.
105 *
106 * LOCKING:
107 * Inherited from caller.
108 */
109
110static void ata_tf_load_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
111{
112 struct ata_ioports *ioaddr = &ap->ioaddr;
113 unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
114
115 if (tf->ctl != ap->last_ctl) {
116 writeb(tf->ctl, (void __iomem *) ap->ioaddr.ctl_addr);
117 ap->last_ctl = tf->ctl;
118 ata_wait_idle(ap);
119 }
120
121 if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
122 writeb(tf->hob_feature, (void __iomem *) ioaddr->feature_addr);
123 writeb(tf->hob_nsect, (void __iomem *) ioaddr->nsect_addr);
124 writeb(tf->hob_lbal, (void __iomem *) ioaddr->lbal_addr);
125 writeb(tf->hob_lbam, (void __iomem *) ioaddr->lbam_addr);
126 writeb(tf->hob_lbah, (void __iomem *) ioaddr->lbah_addr);
127 VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
128 tf->hob_feature,
129 tf->hob_nsect,
130 tf->hob_lbal,
131 tf->hob_lbam,
132 tf->hob_lbah);
133 }
134
135 if (is_addr) {
136 writeb(tf->feature, (void __iomem *) ioaddr->feature_addr);
137 writeb(tf->nsect, (void __iomem *) ioaddr->nsect_addr);
138 writeb(tf->lbal, (void __iomem *) ioaddr->lbal_addr);
139 writeb(tf->lbam, (void __iomem *) ioaddr->lbam_addr);
140 writeb(tf->lbah, (void __iomem *) ioaddr->lbah_addr);
141 VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
142 tf->feature,
143 tf->nsect,
144 tf->lbal,
145 tf->lbam,
146 tf->lbah);
147 }
148
149 if (tf->flags & ATA_TFLAG_DEVICE) {
150 writeb(tf->device, (void __iomem *) ioaddr->device_addr);
151 VPRINTK("device 0x%X\n", tf->device);
152 }
153
154 ata_wait_idle(ap);
155}
156
157
158/**
159 * ata_tf_load - send taskfile registers to host controller
160 * @ap: Port to which output is sent
161 * @tf: ATA taskfile register set
162 *
163 * Outputs ATA taskfile to standard ATA host controller using MMIO
164 * or PIO as indicated by the ATA_FLAG_MMIO flag.
165 * Writes the control, feature, nsect, lbal, lbam, and lbah registers.
166 * Optionally (ATA_TFLAG_LBA48) writes hob_feature, hob_nsect,
167 * hob_lbal, hob_lbam, and hob_lbah.
168 *
169 * This function waits for idle (!BUSY and !DRQ) after writing
170 * registers. If the control register has a new value, this
171 * function also waits for idle after writing control and before
172 * writing the remaining registers.
173 *
174 * May be used as the tf_load() entry in ata_port_operations.
175 *
176 * LOCKING:
177 * Inherited from caller.
178 */
179void ata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf)
180{
181 if (ap->flags & ATA_FLAG_MMIO)
182 ata_tf_load_mmio(ap, tf);
183 else
184 ata_tf_load_pio(ap, tf);
185}
186
187/**
188 * ata_exec_command_pio - issue ATA command to host controller
189 * @ap: port to which command is being issued
190 * @tf: ATA taskfile register set
191 *
192 * Issues PIO write to ATA command register, with proper
193 * synchronization with interrupt handler / other threads.
194 *
195 * LOCKING:
196 * spin_lock_irqsave(host_set lock)
197 */
198
199static void ata_exec_command_pio(struct ata_port *ap, const struct ata_taskfile *tf)
200{
201 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
202
203 outb(tf->command, ap->ioaddr.command_addr);
204 ata_pause(ap);
205}
206
207
208/**
209 * ata_exec_command_mmio - issue ATA command to host controller
210 * @ap: port to which command is being issued
211 * @tf: ATA taskfile register set
212 *
213 * Issues MMIO write to ATA command register, with proper
214 * synchronization with interrupt handler / other threads.
215 *
7c74ffd0
AC
216 * FIXME: missing write posting for 400nS delay enforcement
217 *
1fdffbce
JG
218 * LOCKING:
219 * spin_lock_irqsave(host_set lock)
220 */
221
222static void ata_exec_command_mmio(struct ata_port *ap, const struct ata_taskfile *tf)
223{
224 DPRINTK("ata%u: cmd 0x%X\n", ap->id, tf->command);
225
226 writeb(tf->command, (void __iomem *) ap->ioaddr.command_addr);
227 ata_pause(ap);
228}
229
230
231/**
232 * ata_exec_command - issue ATA command to host controller
233 * @ap: port to which command is being issued
234 * @tf: ATA taskfile register set
235 *
236 * Issues PIO/MMIO write to ATA command register, with proper
237 * synchronization with interrupt handler / other threads.
238 *
239 * LOCKING:
240 * spin_lock_irqsave(host_set lock)
241 */
242void ata_exec_command(struct ata_port *ap, const struct ata_taskfile *tf)
243{
244 if (ap->flags & ATA_FLAG_MMIO)
245 ata_exec_command_mmio(ap, tf);
246 else
247 ata_exec_command_pio(ap, tf);
248}
249
250/**
251 * ata_tf_read_pio - input device's ATA taskfile shadow registers
252 * @ap: Port from which input is read
253 * @tf: ATA taskfile register set for storing input
254 *
255 * Reads ATA taskfile registers for currently-selected device
256 * into @tf.
257 *
258 * LOCKING:
259 * Inherited from caller.
260 */
261
262static void ata_tf_read_pio(struct ata_port *ap, struct ata_taskfile *tf)
263{
264 struct ata_ioports *ioaddr = &ap->ioaddr;
265
266 tf->command = ata_check_status(ap);
267 tf->feature = inb(ioaddr->error_addr);
268 tf->nsect = inb(ioaddr->nsect_addr);
269 tf->lbal = inb(ioaddr->lbal_addr);
270 tf->lbam = inb(ioaddr->lbam_addr);
271 tf->lbah = inb(ioaddr->lbah_addr);
272 tf->device = inb(ioaddr->device_addr);
273
274 if (tf->flags & ATA_TFLAG_LBA48) {
275 outb(tf->ctl | ATA_HOB, ioaddr->ctl_addr);
276 tf->hob_feature = inb(ioaddr->error_addr);
277 tf->hob_nsect = inb(ioaddr->nsect_addr);
278 tf->hob_lbal = inb(ioaddr->lbal_addr);
279 tf->hob_lbam = inb(ioaddr->lbam_addr);
280 tf->hob_lbah = inb(ioaddr->lbah_addr);
281 }
282}
283
284/**
285 * ata_tf_read_mmio - input device's ATA taskfile shadow registers
286 * @ap: Port from which input is read
287 * @tf: ATA taskfile register set for storing input
288 *
289 * Reads ATA taskfile registers for currently-selected device
290 * into @tf via MMIO.
291 *
292 * LOCKING:
293 * Inherited from caller.
294 */
295
296static void ata_tf_read_mmio(struct ata_port *ap, struct ata_taskfile *tf)
297{
298 struct ata_ioports *ioaddr = &ap->ioaddr;
299
300 tf->command = ata_check_status(ap);
301 tf->feature = readb((void __iomem *)ioaddr->error_addr);
302 tf->nsect = readb((void __iomem *)ioaddr->nsect_addr);
303 tf->lbal = readb((void __iomem *)ioaddr->lbal_addr);
304 tf->lbam = readb((void __iomem *)ioaddr->lbam_addr);
305 tf->lbah = readb((void __iomem *)ioaddr->lbah_addr);
306 tf->device = readb((void __iomem *)ioaddr->device_addr);
307
308 if (tf->flags & ATA_TFLAG_LBA48) {
309 writeb(tf->ctl | ATA_HOB, (void __iomem *) ap->ioaddr.ctl_addr);
310 tf->hob_feature = readb((void __iomem *)ioaddr->error_addr);
311 tf->hob_nsect = readb((void __iomem *)ioaddr->nsect_addr);
312 tf->hob_lbal = readb((void __iomem *)ioaddr->lbal_addr);
313 tf->hob_lbam = readb((void __iomem *)ioaddr->lbam_addr);
314 tf->hob_lbah = readb((void __iomem *)ioaddr->lbah_addr);
315 }
316}
317
318
319/**
320 * ata_tf_read - input device's ATA taskfile shadow registers
321 * @ap: Port from which input is read
322 * @tf: ATA taskfile register set for storing input
323 *
324 * Reads ATA taskfile registers for currently-selected device
325 * into @tf.
326 *
327 * Reads nsect, lbal, lbam, lbah, and device. If ATA_TFLAG_LBA48
328 * is set, also reads the hob registers.
329 *
330 * May be used as the tf_read() entry in ata_port_operations.
331 *
332 * LOCKING:
333 * Inherited from caller.
334 */
335void ata_tf_read(struct ata_port *ap, struct ata_taskfile *tf)
336{
337 if (ap->flags & ATA_FLAG_MMIO)
338 ata_tf_read_mmio(ap, tf);
339 else
340 ata_tf_read_pio(ap, tf);
341}
342
343/**
344 * ata_check_status_pio - Read device status reg & clear interrupt
345 * @ap: port where the device is
346 *
347 * Reads ATA taskfile status register for currently-selected device
348 * and return its value. This also clears pending interrupts
349 * from this device
350 *
351 * LOCKING:
352 * Inherited from caller.
353 */
354static u8 ata_check_status_pio(struct ata_port *ap)
355{
356 return inb(ap->ioaddr.status_addr);
357}
358
359/**
360 * ata_check_status_mmio - Read device status reg & clear interrupt
361 * @ap: port where the device is
362 *
363 * Reads ATA taskfile status register for currently-selected device
364 * via MMIO and return its value. This also clears pending interrupts
365 * from this device
366 *
367 * LOCKING:
368 * Inherited from caller.
369 */
370static u8 ata_check_status_mmio(struct ata_port *ap)
371{
372 return readb((void __iomem *) ap->ioaddr.status_addr);
373}
374
375
376/**
377 * ata_check_status - Read device status reg & clear interrupt
378 * @ap: port where the device is
379 *
380 * Reads ATA taskfile status register for currently-selected device
381 * and return its value. This also clears pending interrupts
382 * from this device
383 *
384 * May be used as the check_status() entry in ata_port_operations.
385 *
386 * LOCKING:
387 * Inherited from caller.
388 */
389u8 ata_check_status(struct ata_port *ap)
390{
391 if (ap->flags & ATA_FLAG_MMIO)
392 return ata_check_status_mmio(ap);
393 return ata_check_status_pio(ap);
394}
395
396
397/**
398 * ata_altstatus - Read device alternate status reg
399 * @ap: port where the device is
400 *
401 * Reads ATA taskfile alternate status register for
402 * currently-selected device and return its value.
403 *
404 * Note: may NOT be used as the check_altstatus() entry in
405 * ata_port_operations.
406 *
407 * LOCKING:
408 * Inherited from caller.
409 */
410u8 ata_altstatus(struct ata_port *ap)
411{
412 if (ap->ops->check_altstatus)
413 return ap->ops->check_altstatus(ap);
414
415 if (ap->flags & ATA_FLAG_MMIO)
416 return readb((void __iomem *)ap->ioaddr.altstatus_addr);
417 return inb(ap->ioaddr.altstatus_addr);
418}
419
2cc432ee
JG
420/**
421 * ata_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction
422 * @qc: Info associated with this ATA transaction.
423 *
424 * LOCKING:
425 * spin_lock_irqsave(host_set lock)
426 */
427
428static void ata_bmdma_setup_mmio (struct ata_queued_cmd *qc)
429{
430 struct ata_port *ap = qc->ap;
431 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
432 u8 dmactl;
433 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
434
435 /* load PRD table addr. */
436 mb(); /* make sure PRD table writes are visible to controller */
437 writel(ap->prd_dma, mmio + ATA_DMA_TABLE_OFS);
438
439 /* specify data direction, triple-check start bit is clear */
440 dmactl = readb(mmio + ATA_DMA_CMD);
441 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
442 if (!rw)
443 dmactl |= ATA_DMA_WR;
444 writeb(dmactl, mmio + ATA_DMA_CMD);
445
446 /* issue r/w command */
447 ap->ops->exec_command(ap, &qc->tf);
448}
449
450/**
451 * ata_bmdma_start_mmio - Start a PCI IDE BMDMA transaction
452 * @qc: Info associated with this ATA transaction.
453 *
454 * LOCKING:
455 * spin_lock_irqsave(host_set lock)
456 */
457
458static void ata_bmdma_start_mmio (struct ata_queued_cmd *qc)
459{
460 struct ata_port *ap = qc->ap;
461 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
462 u8 dmactl;
463
464 /* start host DMA transaction */
465 dmactl = readb(mmio + ATA_DMA_CMD);
466 writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD);
467
468 /* Strictly, one may wish to issue a readb() here, to
469 * flush the mmio write. However, control also passes
470 * to the hardware at this point, and it will interrupt
471 * us when we are to resume control. So, in effect,
472 * we don't care when the mmio write flushes.
473 * Further, a read of the DMA status register _immediately_
474 * following the write may not be what certain flaky hardware
475 * is expected, so I think it is best to not add a readb()
476 * without first all the MMIO ATA cards/mobos.
477 * Or maybe I'm just being paranoid.
478 */
479}
480
481/**
482 * ata_bmdma_setup_pio - Set up PCI IDE BMDMA transaction (PIO)
483 * @qc: Info associated with this ATA transaction.
484 *
485 * LOCKING:
486 * spin_lock_irqsave(host_set lock)
487 */
488
489static void ata_bmdma_setup_pio (struct ata_queued_cmd *qc)
490{
491 struct ata_port *ap = qc->ap;
492 unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
493 u8 dmactl;
494
495 /* load PRD table addr. */
496 outl(ap->prd_dma, ap->ioaddr.bmdma_addr + ATA_DMA_TABLE_OFS);
497
498 /* specify data direction, triple-check start bit is clear */
499 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
500 dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
501 if (!rw)
502 dmactl |= ATA_DMA_WR;
503 outb(dmactl, ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
504
505 /* issue r/w command */
506 ap->ops->exec_command(ap, &qc->tf);
507}
508
509/**
510 * ata_bmdma_start_pio - Start a PCI IDE BMDMA transaction (PIO)
511 * @qc: Info associated with this ATA transaction.
512 *
513 * LOCKING:
514 * spin_lock_irqsave(host_set lock)
515 */
516
517static void ata_bmdma_start_pio (struct ata_queued_cmd *qc)
518{
519 struct ata_port *ap = qc->ap;
520 u8 dmactl;
521
522 /* start host DMA transaction */
523 dmactl = inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
524 outb(dmactl | ATA_DMA_START,
525 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
526}
527
528
529/**
530 * ata_bmdma_start - Start a PCI IDE BMDMA transaction
531 * @qc: Info associated with this ATA transaction.
532 *
533 * Writes the ATA_DMA_START flag to the DMA command register.
534 *
535 * May be used as the bmdma_start() entry in ata_port_operations.
536 *
537 * LOCKING:
538 * spin_lock_irqsave(host_set lock)
539 */
540void ata_bmdma_start(struct ata_queued_cmd *qc)
541{
542 if (qc->ap->flags & ATA_FLAG_MMIO)
543 ata_bmdma_start_mmio(qc);
544 else
545 ata_bmdma_start_pio(qc);
546}
547
548
549/**
550 * ata_bmdma_setup - Set up PCI IDE BMDMA transaction
551 * @qc: Info associated with this ATA transaction.
552 *
553 * Writes address of PRD table to device's PRD Table Address
554 * register, sets the DMA control register, and calls
555 * ops->exec_command() to start the transfer.
556 *
557 * May be used as the bmdma_setup() entry in ata_port_operations.
558 *
559 * LOCKING:
560 * spin_lock_irqsave(host_set lock)
561 */
562void ata_bmdma_setup(struct ata_queued_cmd *qc)
563{
564 if (qc->ap->flags & ATA_FLAG_MMIO)
565 ata_bmdma_setup_mmio(qc);
566 else
567 ata_bmdma_setup_pio(qc);
568}
569
570
571/**
572 * ata_bmdma_irq_clear - Clear PCI IDE BMDMA interrupt.
573 * @ap: Port associated with this ATA transaction.
574 *
575 * Clear interrupt and error flags in DMA status register.
576 *
577 * May be used as the irq_clear() entry in ata_port_operations.
578 *
579 * LOCKING:
580 * spin_lock_irqsave(host_set lock)
581 */
582
583void ata_bmdma_irq_clear(struct ata_port *ap)
584{
585 if (!ap->ioaddr.bmdma_addr)
586 return;
587
588 if (ap->flags & ATA_FLAG_MMIO) {
589 void __iomem *mmio =
590 ((void __iomem *) ap->ioaddr.bmdma_addr) + ATA_DMA_STATUS;
591 writeb(readb(mmio), mmio);
592 } else {
593 unsigned long addr = ap->ioaddr.bmdma_addr + ATA_DMA_STATUS;
594 outb(inb(addr), addr);
595 }
596}
597
598
599/**
600 * ata_bmdma_status - Read PCI IDE BMDMA status
601 * @ap: Port associated with this ATA transaction.
602 *
603 * Read and return BMDMA status register.
604 *
605 * May be used as the bmdma_status() entry in ata_port_operations.
606 *
607 * LOCKING:
608 * spin_lock_irqsave(host_set lock)
609 */
610
611u8 ata_bmdma_status(struct ata_port *ap)
612{
613 u8 host_stat;
614 if (ap->flags & ATA_FLAG_MMIO) {
615 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
616 host_stat = readb(mmio + ATA_DMA_STATUS);
617 } else
618 host_stat = inb(ap->ioaddr.bmdma_addr + ATA_DMA_STATUS);
619 return host_stat;
620}
621
622
623/**
624 * ata_bmdma_stop - Stop PCI IDE BMDMA transfer
625 * @qc: Command we are ending DMA for
626 *
627 * Clears the ATA_DMA_START flag in the dma control register
628 *
629 * May be used as the bmdma_stop() entry in ata_port_operations.
630 *
631 * LOCKING:
632 * spin_lock_irqsave(host_set lock)
633 */
634
635void ata_bmdma_stop(struct ata_queued_cmd *qc)
636{
637 struct ata_port *ap = qc->ap;
638 if (ap->flags & ATA_FLAG_MMIO) {
639 void __iomem *mmio = (void __iomem *) ap->ioaddr.bmdma_addr;
640
641 /* clear start/stop bit */
642 writeb(readb(mmio + ATA_DMA_CMD) & ~ATA_DMA_START,
643 mmio + ATA_DMA_CMD);
644 } else {
645 /* clear start/stop bit */
646 outb(inb(ap->ioaddr.bmdma_addr + ATA_DMA_CMD) & ~ATA_DMA_START,
647 ap->ioaddr.bmdma_addr + ATA_DMA_CMD);
648 }
649
650 /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
651 ata_altstatus(ap); /* dummy read */
652}
653
6d97dbd7
TH
654/**
655 * ata_bmdma_freeze - Freeze BMDMA controller port
656 * @ap: port to freeze
657 *
658 * Freeze BMDMA controller port.
659 *
660 * LOCKING:
661 * Inherited from caller.
662 */
663void ata_bmdma_freeze(struct ata_port *ap)
664{
665 struct ata_ioports *ioaddr = &ap->ioaddr;
666
667 ap->ctl |= ATA_NIEN;
668 ap->last_ctl = ap->ctl;
669
670 if (ap->flags & ATA_FLAG_MMIO)
671 writeb(ap->ctl, (void __iomem *)ioaddr->ctl_addr);
672 else
673 outb(ap->ctl, ioaddr->ctl_addr);
674}
675
676/**
677 * ata_bmdma_thaw - Thaw BMDMA controller port
678 * @ap: port to thaw
679 *
680 * Thaw BMDMA controller port.
681 *
682 * LOCKING:
683 * Inherited from caller.
684 */
685void ata_bmdma_thaw(struct ata_port *ap)
686{
687 /* clear & re-enable interrupts */
688 ata_chk_status(ap);
689 ap->ops->irq_clear(ap);
690 if (ap->ioaddr.ctl_addr) /* FIXME: hack. create a hook instead */
691 ata_irq_on(ap);
692}
693
694/**
695 * ata_bmdma_drive_eh - Perform EH with given methods for BMDMA controller
696 * @ap: port to handle error for
f5914a46 697 * @prereset: prereset method (can be NULL)
6d97dbd7
TH
698 * @softreset: softreset method (can be NULL)
699 * @hardreset: hardreset method (can be NULL)
700 * @postreset: postreset method (can be NULL)
701 *
702 * Handle error for ATA BMDMA controller. It can handle both
703 * PATA and SATA controllers. Many controllers should be able to
704 * use this EH as-is or with some added handling before and
705 * after.
706 *
707 * This function is intended to be used for constructing
708 * ->error_handler callback by low level drivers.
709 *
710 * LOCKING:
711 * Kernel thread context (may sleep)
712 */
f5914a46
TH
713void ata_bmdma_drive_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
714 ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
715 ata_postreset_fn_t postreset)
6d97dbd7 716{
6d97dbd7
TH
717 struct ata_eh_context *ehc = &ap->eh_context;
718 struct ata_queued_cmd *qc;
719 unsigned long flags;
720 int thaw = 0;
721
722 qc = __ata_qc_from_tag(ap, ap->active_tag);
723 if (qc && !(qc->flags & ATA_QCFLAG_FAILED))
724 qc = NULL;
725
726 /* reset PIO HSM and stop DMA engine */
ba6a1308 727 spin_lock_irqsave(ap->lock, flags);
6d97dbd7 728
6d97dbd7
TH
729 ap->hsm_task_state = HSM_ST_IDLE;
730
731 if (qc && (qc->tf.protocol == ATA_PROT_DMA ||
732 qc->tf.protocol == ATA_PROT_ATAPI_DMA)) {
733 u8 host_stat;
734
735 host_stat = ata_bmdma_status(ap);
736
737 ata_ehi_push_desc(&ehc->i, "BMDMA stat 0x%x", host_stat);
738
739 /* BMDMA controllers indicate host bus error by
740 * setting DMA_ERR bit and timing out. As it wasn't
741 * really a timeout event, adjust error mask and
742 * cancel frozen state.
743 */
744 if (qc->err_mask == AC_ERR_TIMEOUT && host_stat & ATA_DMA_ERR) {
745 qc->err_mask = AC_ERR_HOST_BUS;
746 thaw = 1;
747 }
748
749 ap->ops->bmdma_stop(qc);
750 }
751
752 ata_altstatus(ap);
753 ata_chk_status(ap);
754 ap->ops->irq_clear(ap);
755
ba6a1308 756 spin_unlock_irqrestore(ap->lock, flags);
6d97dbd7
TH
757
758 if (thaw)
759 ata_eh_thaw_port(ap);
760
761 /* PIO and DMA engines have been stopped, perform recovery */
f5914a46 762 ata_do_eh(ap, prereset, softreset, hardreset, postreset);
6d97dbd7
TH
763}
764
765/**
766 * ata_bmdma_error_handler - Stock error handler for BMDMA controller
767 * @ap: port to handle error for
768 *
769 * Stock error handler for BMDMA controller.
770 *
771 * LOCKING:
772 * Kernel thread context (may sleep)
773 */
774void ata_bmdma_error_handler(struct ata_port *ap)
775{
776 ata_reset_fn_t hardreset;
777
778 hardreset = NULL;
779 if (sata_scr_valid(ap))
780 hardreset = sata_std_hardreset;
781
f5914a46
TH
782 ata_bmdma_drive_eh(ap, ata_std_prereset, ata_std_softreset, hardreset,
783 ata_std_postreset);
6d97dbd7
TH
784}
785
786/**
787 * ata_bmdma_post_internal_cmd - Stock post_internal_cmd for
788 * BMDMA controller
789 * @qc: internal command to clean up
790 *
791 * LOCKING:
792 * Kernel thread context (may sleep)
793 */
794void ata_bmdma_post_internal_cmd(struct ata_queued_cmd *qc)
795{
796 ata_bmdma_stop(qc);
797}
798
1fdffbce 799#ifdef CONFIG_PCI
1fdffbce
JG
800/**
801 * ata_pci_init_native_mode - Initialize native-mode driver
802 * @pdev: pci device to be initialized
803 * @port: array[2] of pointers to port info structures.
804 * @ports: bitmap of ports present
805 *
806 * Utility function which allocates and initializes an
807 * ata_probe_ent structure for a standard dual-port
808 * PIO-based IDE controller. The returned ata_probe_ent
809 * structure can be passed to ata_device_add(). The returned
810 * ata_probe_ent structure should then be freed with kfree().
811 *
812 * The caller need only pass the address of the primary port, the
813 * secondary will be deduced automatically. If the device has non
814 * standard secondary port mappings this function can be called twice,
815 * once for each interface.
816 */
817
818struct ata_probe_ent *
819ata_pci_init_native_mode(struct pci_dev *pdev, struct ata_port_info **port, int ports)
820{
821 struct ata_probe_ent *probe_ent =
822 ata_probe_ent_alloc(pci_dev_to_dev(pdev), port[0]);
823 int p = 0;
4e5ec5db 824 unsigned long bmdma;
1fdffbce
JG
825
826 if (!probe_ent)
827 return NULL;
828
829 probe_ent->irq = pdev->irq;
1d6f359a 830 probe_ent->irq_flags = IRQF_SHARED;
1fdffbce
JG
831 probe_ent->private_data = port[0]->private_data;
832
833 if (ports & ATA_PORT_PRIMARY) {
834 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 0);
835 probe_ent->port[p].altstatus_addr =
836 probe_ent->port[p].ctl_addr =
837 pci_resource_start(pdev, 1) | ATA_PCI_CTL_OFS;
4e5ec5db
AC
838 bmdma = pci_resource_start(pdev, 4);
839 if (bmdma) {
840 if (inb(bmdma + 2) & 0x80)
841 probe_ent->host_set_flags |= ATA_HOST_SIMPLEX;
842 probe_ent->port[p].bmdma_addr = bmdma;
843 }
1fdffbce
JG
844 ata_std_ports(&probe_ent->port[p]);
845 p++;
846 }
847
848 if (ports & ATA_PORT_SECONDARY) {
849 probe_ent->port[p].cmd_addr = pci_resource_start(pdev, 2);
850 probe_ent->port[p].altstatus_addr =
851 probe_ent->port[p].ctl_addr =
852 pci_resource_start(pdev, 3) | ATA_PCI_CTL_OFS;
4e5ec5db
AC
853 bmdma = pci_resource_start(pdev, 4);
854 if (bmdma) {
855 bmdma += 8;
856 if(inb(bmdma + 2) & 0x80)
857 probe_ent->host_set_flags |= ATA_HOST_SIMPLEX;
858 probe_ent->port[p].bmdma_addr = bmdma;
859 }
1fdffbce
JG
860 ata_std_ports(&probe_ent->port[p]);
861 p++;
862 }
863
864 probe_ent->n_ports = p;
865 return probe_ent;
866}
867
868
869static struct ata_probe_ent *ata_pci_init_legacy_port(struct pci_dev *pdev,
870 struct ata_port_info *port, int port_num)
871{
872 struct ata_probe_ent *probe_ent;
4e5ec5db 873 unsigned long bmdma;
1fdffbce
JG
874
875 probe_ent = ata_probe_ent_alloc(pci_dev_to_dev(pdev), port);
876 if (!probe_ent)
877 return NULL;
878
879 probe_ent->legacy_mode = 1;
880 probe_ent->n_ports = 1;
881 probe_ent->hard_port_no = port_num;
882 probe_ent->private_data = port->private_data;
883
884 switch(port_num)
885 {
886 case 0:
887 probe_ent->irq = 14;
888 probe_ent->port[0].cmd_addr = 0x1f0;
889 probe_ent->port[0].altstatus_addr =
890 probe_ent->port[0].ctl_addr = 0x3f6;
891 break;
892 case 1:
893 probe_ent->irq = 15;
894 probe_ent->port[0].cmd_addr = 0x170;
895 probe_ent->port[0].altstatus_addr =
896 probe_ent->port[0].ctl_addr = 0x376;
897 break;
898 }
899
4e5ec5db
AC
900 bmdma = pci_resource_start(pdev, 4);
901 if (bmdma != 0) {
902 bmdma += 8 * port_num;
903 probe_ent->port[0].bmdma_addr = bmdma;
904 if (inb(bmdma + 2) & 0x80)
905 probe_ent->host_set_flags |= ATA_HOST_SIMPLEX;
906 }
1fdffbce
JG
907 ata_std_ports(&probe_ent->port[0]);
908
909 return probe_ent;
910}
911
912
913/**
914 * ata_pci_init_one - Initialize/register PCI IDE host controller
915 * @pdev: Controller to be initialized
916 * @port_info: Information from low-level host driver
917 * @n_ports: Number of ports attached to host controller
918 *
919 * This is a helper function which can be called from a driver's
920 * xxx_init_one() probe function if the hardware uses traditional
921 * IDE taskfile registers.
922 *
923 * This function calls pci_enable_device(), reserves its register
924 * regions, sets the dma mask, enables bus master mode, and calls
925 * ata_device_add()
926 *
927 * LOCKING:
928 * Inherited from PCI layer (may sleep).
929 *
930 * RETURNS:
931 * Zero on success, negative on errno-based value on error.
932 */
933
934int ata_pci_init_one (struct pci_dev *pdev, struct ata_port_info **port_info,
935 unsigned int n_ports)
936{
937 struct ata_probe_ent *probe_ent = NULL, *probe_ent2 = NULL;
938 struct ata_port_info *port[2];
939 u8 tmp8, mask;
940 unsigned int legacy_mode = 0;
941 int disable_dev_on_err = 1;
942 int rc;
943
944 DPRINTK("ENTER\n");
945
946 port[0] = port_info[0];
947 if (n_ports > 1)
948 port[1] = port_info[1];
949 else
950 port[1] = port[0];
951
952 if ((port[0]->host_flags & ATA_FLAG_NO_LEGACY) == 0
953 && (pdev->class >> 8) == PCI_CLASS_STORAGE_IDE) {
954 /* TODO: What if one channel is in native mode ... */
955 pci_read_config_byte(pdev, PCI_CLASS_PROG, &tmp8);
956 mask = (1 << 2) | (1 << 0);
957 if ((tmp8 & mask) != mask)
958 legacy_mode = (1 << 3);
959 }
960
961 /* FIXME... */
962 if ((!legacy_mode) && (n_ports > 2)) {
963 printk(KERN_ERR "ata: BUG: native mode, n_ports > 2\n");
964 n_ports = 2;
965 /* For now */
966 }
967
968 /* FIXME: Really for ATA it isn't safe because the device may be
969 multi-purpose and we want to leave it alone if it was already
970 enabled. Secondly for shared use as Arjan says we want refcounting
971
972 Checking dev->is_enabled is insufficient as this is not set at
973 boot for the primary video which is BIOS enabled
974 */
975
976 rc = pci_enable_device(pdev);
977 if (rc)
978 return rc;
979
980 rc = pci_request_regions(pdev, DRV_NAME);
981 if (rc) {
982 disable_dev_on_err = 0;
983 goto err_out;
984 }
985
986 /* FIXME: Should use platform specific mappers for legacy port ranges */
987 if (legacy_mode) {
988 if (!request_region(0x1f0, 8, "libata")) {
989 struct resource *conflict, res;
990 res.start = 0x1f0;
991 res.end = 0x1f0 + 8 - 1;
992 conflict = ____request_resource(&ioport_resource, &res);
993 if (!strcmp(conflict->name, "libata"))
994 legacy_mode |= (1 << 0);
995 else {
996 disable_dev_on_err = 0;
997 printk(KERN_WARNING "ata: 0x1f0 IDE port busy\n");
998 }
999 } else
1000 legacy_mode |= (1 << 0);
1001
1002 if (!request_region(0x170, 8, "libata")) {
1003 struct resource *conflict, res;
1004 res.start = 0x170;
1005 res.end = 0x170 + 8 - 1;
1006 conflict = ____request_resource(&ioport_resource, &res);
1007 if (!strcmp(conflict->name, "libata"))
1008 legacy_mode |= (1 << 1);
1009 else {
1010 disable_dev_on_err = 0;
1011 printk(KERN_WARNING "ata: 0x170 IDE port busy\n");
1012 }
1013 } else
1014 legacy_mode |= (1 << 1);
1015 }
1016
1017 /* we have legacy mode, but all ports are unavailable */
1018 if (legacy_mode == (1 << 3)) {
1019 rc = -EBUSY;
1020 goto err_out_regions;
1021 }
1022
41bbc8bf 1023 /* FIXME: If we get no DMA mask we should fall back to PIO */
1fdffbce
JG
1024 rc = pci_set_dma_mask(pdev, ATA_DMA_MASK);
1025 if (rc)
1026 goto err_out_regions;
1027 rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK);
1028 if (rc)
1029 goto err_out_regions;
1030
1031 if (legacy_mode) {
1032 if (legacy_mode & (1 << 0))
1033 probe_ent = ata_pci_init_legacy_port(pdev, port[0], 0);
1034 if (legacy_mode & (1 << 1))
1035 probe_ent2 = ata_pci_init_legacy_port(pdev, port[1], 1);
1036 } else {
1037 if (n_ports == 2)
1038 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY | ATA_PORT_SECONDARY);
1039 else
1040 probe_ent = ata_pci_init_native_mode(pdev, port, ATA_PORT_PRIMARY);
1041 }
1042 if (!probe_ent && !probe_ent2) {
1043 rc = -ENOMEM;
1044 goto err_out_regions;
1045 }
1046
1047 pci_set_master(pdev);
1048
1049 /* FIXME: check ata_device_add return */
1050 if (legacy_mode) {
f0eb62b8
TH
1051 struct device *dev = &pdev->dev;
1052 struct ata_host_set *host_set = NULL;
1053
1054 if (legacy_mode & (1 << 0)) {
1fdffbce 1055 ata_device_add(probe_ent);
f0eb62b8
TH
1056 host_set = dev_get_drvdata(dev);
1057 }
1058
1059 if (legacy_mode & (1 << 1)) {
1fdffbce 1060 ata_device_add(probe_ent2);
f0eb62b8
TH
1061 if (host_set) {
1062 host_set->next = dev_get_drvdata(dev);
1063 dev_set_drvdata(dev, host_set);
1064 }
1065 }
1fdffbce
JG
1066 } else
1067 ata_device_add(probe_ent);
1068
1069 kfree(probe_ent);
1070 kfree(probe_ent2);
1071
1072 return 0;
1073
1074err_out_regions:
1075 if (legacy_mode & (1 << 0))
1076 release_region(0x1f0, 8);
1077 if (legacy_mode & (1 << 1))
1078 release_region(0x170, 8);
1079 pci_release_regions(pdev);
1080err_out:
1081 if (disable_dev_on_err)
1082 pci_disable_device(pdev);
1083 return rc;
1084}
1085
d33d44fa
AC
1086/**
1087 * ata_pci_clear_simplex - attempt to kick device out of simplex
1088 * @pdev: PCI device
1089 *
1090 * Some PCI ATA devices report simplex mode but in fact can be told to
2e9edbf8 1091 * enter non simplex mode. This implements the neccessary logic to
d33d44fa
AC
1092 * perform the task on such devices. Calling it on other devices will
1093 * have -undefined- behaviour.
1094 */
1095
1096int ata_pci_clear_simplex(struct pci_dev *pdev)
1097{
1098 unsigned long bmdma = pci_resource_start(pdev, 4);
1099 u8 simplex;
1100
1101 if (bmdma == 0)
1102 return -ENOENT;
1103
1104 simplex = inb(bmdma + 0x02);
1105 outb(simplex & 0x60, bmdma + 0x02);
1106 simplex = inb(bmdma + 0x02);
1107 if (simplex & 0x80)
1108 return -EOPNOTSUPP;
1109 return 0;
1110}
1111
1112unsigned long ata_pci_default_filter(const struct ata_port *ap, struct ata_device *adev, unsigned long xfer_mask)
1113{
1114 /* Filter out DMA modes if the device has been configured by
1115 the BIOS as PIO only */
2e9edbf8 1116
d33d44fa
AC
1117 if (ap->ioaddr.bmdma_addr == 0)
1118 xfer_mask &= ~(ATA_MASK_MWDMA | ATA_MASK_UDMA);
1119 return xfer_mask;
1120}
1121
1fdffbce
JG
1122#endif /* CONFIG_PCI */
1123