]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/ide/pci/scc_pata.c
ide: add struct ide_host (take 3)
[net-next-2.6.git] / drivers / ide / pci / scc_pata.c
CommitLineData
bde18a2e
KI
1/*
2 * Support for IDE interfaces on Celleb platform
3 *
4 * (C) Copyright 2006 TOSHIBA CORPORATION
5 *
6 * This code is based on drivers/ide/pci/siimage.c:
7 * Copyright (C) 2001-2002 Andre Hedrick <andre@linux-ide.org>
8 * Copyright (C) 2003 Red Hat <alan@redhat.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2 of the License, or
13 * (at your option) any later version.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License along
21 * with this program; if not, write to the Free Software Foundation, Inc.,
22 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
23 */
24
25#include <linux/types.h>
26#include <linux/module.h>
27#include <linux/pci.h>
28#include <linux/delay.h>
29#include <linux/hdreg.h>
30#include <linux/ide.h>
31#include <linux/init.h>
32
33#define PCI_DEVICE_ID_TOSHIBA_SCC_ATA 0x01b4
34
35#define SCC_PATA_NAME "scc IDE"
36
37#define TDVHSEL_MASTER 0x00000001
38#define TDVHSEL_SLAVE 0x00000004
39
40#define MODE_JCUSFEN 0x00000080
41
42#define CCKCTRL_ATARESET 0x00040000
43#define CCKCTRL_BUFCNT 0x00020000
44#define CCKCTRL_CRST 0x00010000
45#define CCKCTRL_OCLKEN 0x00000100
46#define CCKCTRL_ATACLKOEN 0x00000002
47#define CCKCTRL_LCLKEN 0x00000001
48
49#define QCHCD_IOS_SS 0x00000001
50
51#define QCHSD_STPDIAG 0x00020000
52
53#define INTMASK_MSK 0xD1000012
54#define INTSTS_SERROR 0x80000000
55#define INTSTS_PRERR 0x40000000
56#define INTSTS_RERR 0x10000000
57#define INTSTS_ICERR 0x01000000
58#define INTSTS_BMSINT 0x00000010
59#define INTSTS_BMHE 0x00000008
60#define INTSTS_IOIRQS 0x00000004
61#define INTSTS_INTRQ 0x00000002
62#define INTSTS_ACTEINT 0x00000001
63
64#define ECMODE_VALUE 0x01
65
66static struct scc_ports {
67 unsigned long ctl, dma;
48c3c107 68 struct ide_host *host; /* for removing port from system */
bde18a2e
KI
69} scc_ports[MAX_HWIFS];
70
71/* PIO transfer mode table */
72/* JCHST */
73static unsigned long JCHSTtbl[2][7] = {
74 {0x0E, 0x05, 0x02, 0x03, 0x02, 0x00, 0x00}, /* 100MHz */
75 {0x13, 0x07, 0x04, 0x04, 0x03, 0x00, 0x00} /* 133MHz */
76};
77
78/* JCHHT */
79static unsigned long JCHHTtbl[2][7] = {
80 {0x0E, 0x02, 0x02, 0x02, 0x02, 0x00, 0x00}, /* 100MHz */
81 {0x13, 0x03, 0x03, 0x03, 0x03, 0x00, 0x00} /* 133MHz */
82};
83
84/* JCHCT */
85static unsigned long JCHCTtbl[2][7] = {
86 {0x1D, 0x1D, 0x1C, 0x0B, 0x06, 0x00, 0x00}, /* 100MHz */
87 {0x27, 0x26, 0x26, 0x0E, 0x09, 0x00, 0x00} /* 133MHz */
88};
89
90
91/* DMA transfer mode table */
92/* JCHDCTM/JCHDCTS */
93static unsigned long JCHDCTxtbl[2][7] = {
94 {0x0A, 0x06, 0x04, 0x03, 0x01, 0x00, 0x00}, /* 100MHz */
95 {0x0E, 0x09, 0x06, 0x04, 0x02, 0x01, 0x00} /* 133MHz */
96};
97
98/* JCSTWTM/JCSTWTS */
99static unsigned long JCSTWTxtbl[2][7] = {
100 {0x06, 0x04, 0x03, 0x02, 0x02, 0x02, 0x00}, /* 100MHz */
101 {0x09, 0x06, 0x04, 0x02, 0x02, 0x02, 0x02} /* 133MHz */
102};
103
104/* JCTSS */
105static unsigned long JCTSStbl[2][7] = {
106 {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x00}, /* 100MHz */
107 {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05} /* 133MHz */
108};
109
110/* JCENVT */
111static unsigned long JCENVTtbl[2][7] = {
112 {0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00}, /* 100MHz */
113 {0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02} /* 133MHz */
114};
115
116/* JCACTSELS/JCACTSELM */
117static unsigned long JCACTSELtbl[2][7] = {
118 {0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00}, /* 100MHz */
119 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01} /* 133MHz */
120};
121
122
123static u8 scc_ide_inb(unsigned long port)
124{
125 u32 data = in_be32((void*)port);
126 return (u8)data;
127}
128
c6dfa867
BZ
129static void scc_exec_command(ide_hwif_t *hwif, u8 cmd)
130{
131 out_be32((void *)hwif->io_ports.command_addr, cmd);
132 eieio();
133 in_be32((void *)(hwif->dma_base + 0x01c));
134 eieio();
135}
136
b73c7ee2
BZ
137static u8 scc_read_status(ide_hwif_t *hwif)
138{
139 return (u8)in_be32((void *)hwif->io_ports.status_addr);
140}
141
1f6d8a0f
BZ
142static u8 scc_read_altstatus(ide_hwif_t *hwif)
143{
144 return (u8)in_be32((void *)hwif->io_ports.ctl_addr);
145}
146
b2f951aa
BZ
147static u8 scc_read_sff_dma_status(ide_hwif_t *hwif)
148{
cab7f8ed 149 return (u8)in_be32((void *)(hwif->dma_base + 4));
b2f951aa
BZ
150}
151
6e6afb3b
BZ
152static void scc_set_irq(ide_hwif_t *hwif, int on)
153{
154 u8 ctl = ATA_DEVCTL_OBS;
155
156 if (on == 4) { /* hack for SRST */
157 ctl |= 4;
158 on &= ~4;
159 }
160
161 ctl |= on ? 0 : 2;
162
163 out_be32((void *)hwif->io_ports.ctl_addr, ctl);
164 eieio();
165 in_be32((void *)(hwif->dma_base + 0x01c));
166 eieio();
167}
168
bde18a2e
KI
169static void scc_ide_insw(unsigned long port, void *addr, u32 count)
170{
171 u16 *ptr = (u16 *)addr;
172 while (count--) {
173 *ptr++ = le16_to_cpu(in_be32((void*)port));
174 }
175}
176
177static void scc_ide_insl(unsigned long port, void *addr, u32 count)
178{
179 u16 *ptr = (u16 *)addr;
180 while (count--) {
181 *ptr++ = le16_to_cpu(in_be32((void*)port));
182 *ptr++ = le16_to_cpu(in_be32((void*)port));
183 }
184}
185
186static void scc_ide_outb(u8 addr, unsigned long port)
187{
188 out_be32((void*)port, addr);
189}
190
bde18a2e
KI
191static void
192scc_ide_outsw(unsigned long port, void *addr, u32 count)
193{
194 u16 *ptr = (u16 *)addr;
195 while (count--) {
196 out_be32((void*)port, cpu_to_le16(*ptr++));
197 }
198}
199
200static void
201scc_ide_outsl(unsigned long port, void *addr, u32 count)
202{
203 u16 *ptr = (u16 *)addr;
204 while (count--) {
205 out_be32((void*)port, cpu_to_le16(*ptr++));
206 out_be32((void*)port, cpu_to_le16(*ptr++));
207 }
208}
209
bde18a2e 210/**
88b2b32b
BZ
211 * scc_set_pio_mode - set host controller for PIO mode
212 * @drive: drive
213 * @pio: PIO mode number
bde18a2e
KI
214 *
215 * Load the timing settings for this device mode into the
216 * controller.
217 */
218
88b2b32b 219static void scc_set_pio_mode(ide_drive_t *drive, const u8 pio)
bde18a2e
KI
220{
221 ide_hwif_t *hwif = HWIF(drive);
222 struct scc_ports *ports = ide_get_hwifdata(hwif);
223 unsigned long ctl_base = ports->ctl;
224 unsigned long cckctrl_port = ctl_base + 0xff0;
225 unsigned long piosht_port = ctl_base + 0x000;
226 unsigned long pioct_port = ctl_base + 0x004;
227 unsigned long reg;
bde18a2e
KI
228 int offset;
229
0ecdca26 230 reg = in_be32((void __iomem *)cckctrl_port);
bde18a2e
KI
231 if (reg & CCKCTRL_ATACLKOEN) {
232 offset = 1; /* 133MHz */
233 } else {
234 offset = 0; /* 100MHz */
235 }
3fcece66 236 reg = JCHSTtbl[offset][pio] << 16 | JCHHTtbl[offset][pio];
0ecdca26 237 out_be32((void __iomem *)piosht_port, reg);
3fcece66 238 reg = JCHCTtbl[offset][pio];
0ecdca26 239 out_be32((void __iomem *)pioct_port, reg);
3fcece66 240}
bde18a2e 241
bde18a2e 242/**
88b2b32b
BZ
243 * scc_set_dma_mode - set host controller for DMA mode
244 * @drive: drive
245 * @speed: DMA mode
bde18a2e
KI
246 *
247 * Load the timing settings for this device mode into the
248 * controller.
249 */
250
88b2b32b 251static void scc_set_dma_mode(ide_drive_t *drive, const u8 speed)
bde18a2e
KI
252{
253 ide_hwif_t *hwif = HWIF(drive);
bde18a2e
KI
254 struct scc_ports *ports = ide_get_hwifdata(hwif);
255 unsigned long ctl_base = ports->ctl;
256 unsigned long cckctrl_port = ctl_base + 0xff0;
257 unsigned long mdmact_port = ctl_base + 0x008;
258 unsigned long mcrcst_port = ctl_base + 0x00c;
259 unsigned long sdmact_port = ctl_base + 0x010;
260 unsigned long scrcst_port = ctl_base + 0x014;
261 unsigned long udenvt_port = ctl_base + 0x018;
262 unsigned long tdvhsel_port = ctl_base + 0x020;
263 int is_slave = (&hwif->drives[1] == drive);
264 int offset, idx;
265 unsigned long reg;
266 unsigned long jcactsel;
267
0ecdca26 268 reg = in_be32((void __iomem *)cckctrl_port);
bde18a2e
KI
269 if (reg & CCKCTRL_ATACLKOEN) {
270 offset = 1; /* 133MHz */
271 } else {
272 offset = 0; /* 100MHz */
273 }
274
4db90a14 275 idx = speed - XFER_UDMA_0;
bde18a2e
KI
276
277 jcactsel = JCACTSELtbl[offset][idx];
278 if (is_slave) {
0ecdca26
BZ
279 out_be32((void __iomem *)sdmact_port, JCHDCTxtbl[offset][idx]);
280 out_be32((void __iomem *)scrcst_port, JCSTWTxtbl[offset][idx]);
281 jcactsel = jcactsel << 2;
282 out_be32((void __iomem *)tdvhsel_port, (in_be32((void __iomem *)tdvhsel_port) & ~TDVHSEL_SLAVE) | jcactsel);
bde18a2e 283 } else {
0ecdca26
BZ
284 out_be32((void __iomem *)mdmact_port, JCHDCTxtbl[offset][idx]);
285 out_be32((void __iomem *)mcrcst_port, JCSTWTxtbl[offset][idx]);
286 out_be32((void __iomem *)tdvhsel_port, (in_be32((void __iomem *)tdvhsel_port) & ~TDVHSEL_MASTER) | jcactsel);
bde18a2e
KI
287 }
288 reg = JCTSStbl[offset][idx] << 16 | JCENVTtbl[offset][idx];
0ecdca26 289 out_be32((void __iomem *)udenvt_port, reg);
bde18a2e
KI
290}
291
669185e9
BZ
292static void scc_dma_host_set(ide_drive_t *drive, int on)
293{
294 ide_hwif_t *hwif = drive->hwif;
295 u8 unit = (drive->select.b.unit & 0x01);
cab7f8ed 296 u8 dma_stat = scc_ide_inb(hwif->dma_base + 4);
669185e9
BZ
297
298 if (on)
299 dma_stat |= (1 << (5 + unit));
300 else
301 dma_stat &= ~(1 << (5 + unit));
302
cab7f8ed 303 scc_ide_outb(dma_stat, hwif->dma_base + 4);
669185e9
BZ
304}
305
0ecdca26
BZ
306/**
307 * scc_ide_dma_setup - begin a DMA phase
308 * @drive: target device
309 *
310 * Build an IDE DMA PRD (IDE speak for scatter gather table)
311 * and then set up the DMA transfer registers.
312 *
313 * Returns 0 on success. If a PIO fallback is required then 1
314 * is returned.
315 */
316
317static int scc_dma_setup(ide_drive_t *drive)
318{
319 ide_hwif_t *hwif = drive->hwif;
320 struct request *rq = HWGROUP(drive)->rq;
321 unsigned int reading;
322 u8 dma_stat;
323
324 if (rq_data_dir(rq))
325 reading = 0;
326 else
327 reading = 1 << 3;
328
329 /* fall back to pio! */
330 if (!ide_build_dmatable(drive, rq)) {
331 ide_map_sg(drive, rq);
332 return 1;
333 }
334
335 /* PRD table */
55224bc8 336 out_be32((void __iomem *)(hwif->dma_base + 8), hwif->dmatable_dma);
0ecdca26
BZ
337
338 /* specify r/w */
cab7f8ed 339 out_be32((void __iomem *)hwif->dma_base, reading);
0ecdca26 340
cab7f8ed
BZ
341 /* read DMA status for INTR & ERROR flags */
342 dma_stat = in_be32((void __iomem *)(hwif->dma_base + 4));
0ecdca26
BZ
343
344 /* clear INTR & ERROR flags */
cab7f8ed 345 out_be32((void __iomem *)(hwif->dma_base + 4), dma_stat | 6);
0ecdca26
BZ
346 drive->waiting_for_dma = 1;
347 return 0;
348}
349
669185e9
BZ
350static void scc_dma_start(ide_drive_t *drive)
351{
352 ide_hwif_t *hwif = drive->hwif;
cab7f8ed 353 u8 dma_cmd = scc_ide_inb(hwif->dma_base);
669185e9
BZ
354
355 /* start DMA */
cab7f8ed 356 scc_ide_outb(dma_cmd | 1, hwif->dma_base);
669185e9
BZ
357 hwif->dma = 1;
358 wmb();
359}
360
361static int __scc_dma_end(ide_drive_t *drive)
362{
363 ide_hwif_t *hwif = drive->hwif;
364 u8 dma_stat, dma_cmd;
365
366 drive->waiting_for_dma = 0;
367 /* get DMA command mode */
cab7f8ed 368 dma_cmd = scc_ide_inb(hwif->dma_base);
669185e9 369 /* stop DMA */
cab7f8ed 370 scc_ide_outb(dma_cmd & ~1, hwif->dma_base);
669185e9 371 /* get DMA status */
cab7f8ed 372 dma_stat = scc_ide_inb(hwif->dma_base + 4);
669185e9 373 /* clear the INTR & ERROR bits */
cab7f8ed 374 scc_ide_outb(dma_stat | 6, hwif->dma_base + 4);
669185e9
BZ
375 /* purge DMA mappings */
376 ide_destroy_dmatable(drive);
377 /* verify good DMA status */
378 hwif->dma = 0;
379 wmb();
380 return (dma_stat & 7) != 4 ? (0x10 | dma_stat) : 0;
381}
0ecdca26 382
bde18a2e 383/**
5e37bdc0 384 * scc_dma_end - Stop DMA
bde18a2e
KI
385 * @drive: IDE drive
386 *
387 * Check and clear INT Status register.
669185e9 388 * Then call __scc_dma_end().
bde18a2e
KI
389 */
390
5e37bdc0 391static int scc_dma_end(ide_drive_t *drive)
bde18a2e
KI
392{
393 ide_hwif_t *hwif = HWIF(drive);
cab7f8ed 394 void __iomem *dma_base = (void __iomem *)hwif->dma_base;
bde18a2e
KI
395 unsigned long intsts_port = hwif->dma_base + 0x014;
396 u32 reg;
4ae41ff8
KI
397 int dma_stat, data_loss = 0;
398 static int retry = 0;
399
400 /* errata A308 workaround: Step5 (check data loss) */
401 /* We don't check non ide_disk because it is limited to UDMA4 */
4c3032d8 402 if (!(in_be32((void __iomem *)hwif->io_ports.ctl_addr)
23579a2a 403 & ERR_STAT) &&
4ae41ff8
KI
404 drive->media == ide_disk && drive->current_speed > XFER_UDMA_4) {
405 reg = in_be32((void __iomem *)intsts_port);
406 if (!(reg & INTSTS_ACTEINT)) {
407 printk(KERN_WARNING "%s: operation failed (transfer data loss)\n",
408 drive->name);
409 data_loss = 1;
410 if (retry++) {
411 struct request *rq = HWGROUP(drive)->rq;
412 int unit;
413 /* ERROR_RESET and drive->crc_count are needed
414 * to reduce DMA transfer mode in retry process.
415 */
416 if (rq)
417 rq->errors |= ERROR_RESET;
418 for (unit = 0; unit < MAX_DRIVES; unit++) {
419 ide_drive_t *drive = &hwif->drives[unit];
420 drive->crc_count++;
421 }
422 }
423 }
424 }
bde18a2e
KI
425
426 while (1) {
0ecdca26 427 reg = in_be32((void __iomem *)intsts_port);
bde18a2e
KI
428
429 if (reg & INTSTS_SERROR) {
430 printk(KERN_WARNING "%s: SERROR\n", SCC_PATA_NAME);
0ecdca26 431 out_be32((void __iomem *)intsts_port, INTSTS_SERROR|INTSTS_BMSINT);
bde18a2e 432
cab7f8ed 433 out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
bde18a2e
KI
434 continue;
435 }
436
437 if (reg & INTSTS_PRERR) {
438 u32 maea0, maec0;
439 unsigned long ctl_base = hwif->config_data;
440
0ecdca26
BZ
441 maea0 = in_be32((void __iomem *)(ctl_base + 0xF50));
442 maec0 = in_be32((void __iomem *)(ctl_base + 0xF54));
bde18a2e
KI
443
444 printk(KERN_WARNING "%s: PRERR [addr:%x cmd:%x]\n", SCC_PATA_NAME, maea0, maec0);
445
0ecdca26 446 out_be32((void __iomem *)intsts_port, INTSTS_PRERR|INTSTS_BMSINT);
bde18a2e 447
cab7f8ed 448 out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
bde18a2e
KI
449 continue;
450 }
451
452 if (reg & INTSTS_RERR) {
453 printk(KERN_WARNING "%s: Response Error\n", SCC_PATA_NAME);
0ecdca26 454 out_be32((void __iomem *)intsts_port, INTSTS_RERR|INTSTS_BMSINT);
bde18a2e 455
cab7f8ed 456 out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
bde18a2e
KI
457 continue;
458 }
459
460 if (reg & INTSTS_ICERR) {
cab7f8ed 461 out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
bde18a2e
KI
462
463 printk(KERN_WARNING "%s: Illegal Configuration\n", SCC_PATA_NAME);
0ecdca26 464 out_be32((void __iomem *)intsts_port, INTSTS_ICERR|INTSTS_BMSINT);
bde18a2e
KI
465 continue;
466 }
467
468 if (reg & INTSTS_BMSINT) {
469 printk(KERN_WARNING "%s: Internal Bus Error\n", SCC_PATA_NAME);
0ecdca26 470 out_be32((void __iomem *)intsts_port, INTSTS_BMSINT);
bde18a2e
KI
471
472 ide_do_reset(drive);
473 continue;
474 }
475
476 if (reg & INTSTS_BMHE) {
0ecdca26 477 out_be32((void __iomem *)intsts_port, INTSTS_BMHE);
bde18a2e
KI
478 continue;
479 }
480
481 if (reg & INTSTS_ACTEINT) {
0ecdca26 482 out_be32((void __iomem *)intsts_port, INTSTS_ACTEINT);
bde18a2e
KI
483 continue;
484 }
485
486 if (reg & INTSTS_IOIRQS) {
0ecdca26 487 out_be32((void __iomem *)intsts_port, INTSTS_IOIRQS);
bde18a2e
KI
488 continue;
489 }
490 break;
491 }
492
669185e9 493 dma_stat = __scc_dma_end(drive);
4ae41ff8
KI
494 if (data_loss)
495 dma_stat |= 2; /* emulate DMA error (to retry command) */
496 return dma_stat;
bde18a2e
KI
497}
498
06a9952b
AI
499/* returns 1 if dma irq issued, 0 otherwise */
500static int scc_dma_test_irq(ide_drive_t *drive)
501{
4ae41ff8
KI
502 ide_hwif_t *hwif = HWIF(drive);
503 u32 int_stat = in_be32((void __iomem *)hwif->dma_base + 0x014);
06a9952b 504
4ae41ff8 505 /* SCC errata A252,A308 workaround: Step4 */
4c3032d8 506 if ((in_be32((void __iomem *)hwif->io_ports.ctl_addr)
23579a2a 507 & ERR_STAT) &&
4ae41ff8 508 (int_stat & INTSTS_INTRQ))
06a9952b
AI
509 return 1;
510
4ae41ff8
KI
511 /* SCC errata A308 workaround: Step5 (polling IOIRQS) */
512 if (int_stat & INTSTS_IOIRQS)
06a9952b
AI
513 return 1;
514
515 if (!drive->waiting_for_dma)
516 printk(KERN_WARNING "%s: (%s) called while not waiting\n",
eb63963a 517 drive->name, __func__);
06a9952b
AI
518 return 0;
519}
520
4ae41ff8
KI
521static u8 scc_udma_filter(ide_drive_t *drive)
522{
523 ide_hwif_t *hwif = drive->hwif;
524 u8 mask = hwif->ultra_mask;
525
526 /* errata A308 workaround: limit non ide_disk drive to UDMA4 */
527 if ((drive->media != ide_disk) && (mask & 0xE0)) {
528 printk(KERN_INFO "%s: limit %s to UDMA4\n",
529 SCC_PATA_NAME, drive->name);
5f8b6c34 530 mask = ATA_UDMA4;
4ae41ff8
KI
531 }
532
533 return mask;
534}
535
bde18a2e
KI
536/**
537 * setup_mmio_scc - map CTRL/BMID region
538 * @dev: PCI device we are configuring
539 * @name: device name
540 *
541 */
542
543static int setup_mmio_scc (struct pci_dev *dev, const char *name)
544{
545 unsigned long ctl_base = pci_resource_start(dev, 0);
546 unsigned long dma_base = pci_resource_start(dev, 1);
547 unsigned long ctl_size = pci_resource_len(dev, 0);
548 unsigned long dma_size = pci_resource_len(dev, 1);
0bd8496b
AV
549 void __iomem *ctl_addr;
550 void __iomem *dma_addr;
0d1bad21 551 int i, ret;
bde18a2e
KI
552
553 for (i = 0; i < MAX_HWIFS; i++) {
554 if (scc_ports[i].ctl == 0)
555 break;
556 }
557 if (i >= MAX_HWIFS)
558 return -ENOMEM;
559
0d1bad21
BZ
560 ret = pci_request_selected_regions(dev, (1 << 2) - 1, name);
561 if (ret < 0) {
562 printk(KERN_ERR "%s: can't reserve resources\n", name);
563 return ret;
bde18a2e
KI
564 }
565
566 if ((ctl_addr = ioremap(ctl_base, ctl_size)) == NULL)
0d1bad21 567 goto fail_0;
bde18a2e
KI
568
569 if ((dma_addr = ioremap(dma_base, dma_size)) == NULL)
0d1bad21 570 goto fail_1;
bde18a2e
KI
571
572 pci_set_master(dev);
573 scc_ports[i].ctl = (unsigned long)ctl_addr;
574 scc_ports[i].dma = (unsigned long)dma_addr;
575 pci_set_drvdata(dev, (void *) &scc_ports[i]);
576
577 return 1;
578
bde18a2e 579 fail_1:
0d1bad21 580 iounmap(ctl_addr);
bde18a2e
KI
581 fail_0:
582 return -ENOMEM;
583}
584
3d53ba87
AI
585static int scc_ide_setup_pci_device(struct pci_dev *dev,
586 const struct ide_port_info *d)
587{
588 struct scc_ports *ports = pci_get_drvdata(dev);
48c3c107 589 struct ide_host *host;
c97c6aca 590 hw_regs_t hw, *hws[] = { &hw, NULL, NULL, NULL };
3d53ba87
AI
591 int i;
592
3d53ba87 593 memset(&hw, 0, sizeof(hw));
4c3032d8
BZ
594 for (i = 0; i <= 8; i++)
595 hw.io_ports_array[i] = ports->dma + 0x20 + i * 4;
3d53ba87
AI
596 hw.irq = dev->irq;
597 hw.dev = &dev->dev;
598 hw.chipset = ide_pci;
3d53ba87 599
48c3c107
BZ
600 host = ide_host_alloc(d, hws);
601 if (host == NULL)
602 return -ENOMEM;
3d53ba87 603
48c3c107
BZ
604 ide_host_register(host, d, hws);
605
606 ports->host = host;
3d53ba87
AI
607
608 return 0;
609}
610
bde18a2e
KI
611/**
612 * init_setup_scc - set up an SCC PATA Controller
613 * @dev: PCI device
039788e1 614 * @d: IDE port info
bde18a2e
KI
615 *
616 * Perform the initial set up for this device.
617 */
618
039788e1 619static int __devinit init_setup_scc(struct pci_dev *dev,
85620436 620 const struct ide_port_info *d)
bde18a2e
KI
621{
622 unsigned long ctl_base;
623 unsigned long dma_base;
624 unsigned long cckctrl_port;
625 unsigned long intmask_port;
626 unsigned long mode_port;
627 unsigned long ecmode_port;
628 unsigned long dma_status_port;
629 u32 reg = 0;
630 struct scc_ports *ports;
631 int rc;
632
3d53ba87
AI
633 rc = pci_enable_device(dev);
634 if (rc)
635 goto end;
636
bde18a2e 637 rc = setup_mmio_scc(dev, d->name);
3d53ba87
AI
638 if (rc < 0)
639 goto end;
bde18a2e
KI
640
641 ports = pci_get_drvdata(dev);
642 ctl_base = ports->ctl;
643 dma_base = ports->dma;
644 cckctrl_port = ctl_base + 0xff0;
645 intmask_port = dma_base + 0x010;
646 mode_port = ctl_base + 0x024;
647 ecmode_port = ctl_base + 0xf00;
648 dma_status_port = dma_base + 0x004;
649
650 /* controller initialization */
651 reg = 0;
652 out_be32((void*)cckctrl_port, reg);
653 reg |= CCKCTRL_ATACLKOEN;
654 out_be32((void*)cckctrl_port, reg);
655 reg |= CCKCTRL_LCLKEN | CCKCTRL_OCLKEN;
656 out_be32((void*)cckctrl_port, reg);
657 reg |= CCKCTRL_CRST;
658 out_be32((void*)cckctrl_port, reg);
659
660 for (;;) {
661 reg = in_be32((void*)cckctrl_port);
662 if (reg & CCKCTRL_CRST)
663 break;
664 udelay(5000);
665 }
666
667 reg |= CCKCTRL_ATARESET;
668 out_be32((void*)cckctrl_port, reg);
669
670 out_be32((void*)ecmode_port, ECMODE_VALUE);
671 out_be32((void*)mode_port, MODE_JCUSFEN);
672 out_be32((void*)intmask_port, INTMASK_MSK);
673
3d53ba87
AI
674 rc = scc_ide_setup_pci_device(dev, d);
675
676 end:
677 return rc;
bde18a2e
KI
678}
679
db2432c4
BZ
680static void scc_tf_load(ide_drive_t *drive, ide_task_t *task)
681{
682 struct ide_io_ports *io_ports = &drive->hwif->io_ports;
683 struct ide_taskfile *tf = &task->tf;
684 u8 HIHI = (task->tf_flags & IDE_TFLAG_LBA48) ? 0xE0 : 0xEF;
685
686 if (task->tf_flags & IDE_TFLAG_FLAGGED)
687 HIHI = 0xFF;
688
db2432c4 689 if (task->tf_flags & IDE_TFLAG_OUT_DATA)
7c0daf26
BZ
690 out_be32((void *)io_ports->data_addr,
691 (tf->hob_data << 8) | tf->data);
db2432c4
BZ
692
693 if (task->tf_flags & IDE_TFLAG_OUT_HOB_FEATURE)
694 scc_ide_outb(tf->hob_feature, io_ports->feature_addr);
695 if (task->tf_flags & IDE_TFLAG_OUT_HOB_NSECT)
696 scc_ide_outb(tf->hob_nsect, io_ports->nsect_addr);
697 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAL)
698 scc_ide_outb(tf->hob_lbal, io_ports->lbal_addr);
699 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAM)
700 scc_ide_outb(tf->hob_lbam, io_ports->lbam_addr);
701 if (task->tf_flags & IDE_TFLAG_OUT_HOB_LBAH)
702 scc_ide_outb(tf->hob_lbah, io_ports->lbah_addr);
703
704 if (task->tf_flags & IDE_TFLAG_OUT_FEATURE)
705 scc_ide_outb(tf->feature, io_ports->feature_addr);
706 if (task->tf_flags & IDE_TFLAG_OUT_NSECT)
707 scc_ide_outb(tf->nsect, io_ports->nsect_addr);
708 if (task->tf_flags & IDE_TFLAG_OUT_LBAL)
709 scc_ide_outb(tf->lbal, io_ports->lbal_addr);
710 if (task->tf_flags & IDE_TFLAG_OUT_LBAM)
711 scc_ide_outb(tf->lbam, io_ports->lbam_addr);
712 if (task->tf_flags & IDE_TFLAG_OUT_LBAH)
713 scc_ide_outb(tf->lbah, io_ports->lbah_addr);
714
715 if (task->tf_flags & IDE_TFLAG_OUT_DEVICE)
716 scc_ide_outb((tf->device & HIHI) | drive->select.all,
717 io_ports->device_addr);
718}
719
720static void scc_tf_read(ide_drive_t *drive, ide_task_t *task)
721{
722 struct ide_io_ports *io_ports = &drive->hwif->io_ports;
723 struct ide_taskfile *tf = &task->tf;
724
725 if (task->tf_flags & IDE_TFLAG_IN_DATA) {
7c0daf26 726 u16 data = (u16)in_be32((void *)io_ports->data_addr);
db2432c4
BZ
727
728 tf->data = data & 0xff;
729 tf->hob_data = (data >> 8) & 0xff;
730 }
731
732 /* be sure we're looking at the low order bits */
ff074883 733 scc_ide_outb(ATA_DEVCTL_OBS & ~0x80, io_ports->ctl_addr);
db2432c4 734
92eb4380
BZ
735 if (task->tf_flags & IDE_TFLAG_IN_FEATURE)
736 tf->feature = scc_ide_inb(io_ports->feature_addr);
db2432c4
BZ
737 if (task->tf_flags & IDE_TFLAG_IN_NSECT)
738 tf->nsect = scc_ide_inb(io_ports->nsect_addr);
739 if (task->tf_flags & IDE_TFLAG_IN_LBAL)
740 tf->lbal = scc_ide_inb(io_ports->lbal_addr);
741 if (task->tf_flags & IDE_TFLAG_IN_LBAM)
742 tf->lbam = scc_ide_inb(io_ports->lbam_addr);
743 if (task->tf_flags & IDE_TFLAG_IN_LBAH)
744 tf->lbah = scc_ide_inb(io_ports->lbah_addr);
745 if (task->tf_flags & IDE_TFLAG_IN_DEVICE)
746 tf->device = scc_ide_inb(io_ports->device_addr);
747
748 if (task->tf_flags & IDE_TFLAG_LBA48) {
ff074883 749 scc_ide_outb(ATA_DEVCTL_OBS | 0x80, io_ports->ctl_addr);
db2432c4
BZ
750
751 if (task->tf_flags & IDE_TFLAG_IN_HOB_FEATURE)
752 tf->hob_feature = scc_ide_inb(io_ports->feature_addr);
753 if (task->tf_flags & IDE_TFLAG_IN_HOB_NSECT)
754 tf->hob_nsect = scc_ide_inb(io_ports->nsect_addr);
755 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAL)
756 tf->hob_lbal = scc_ide_inb(io_ports->lbal_addr);
757 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAM)
758 tf->hob_lbam = scc_ide_inb(io_ports->lbam_addr);
759 if (task->tf_flags & IDE_TFLAG_IN_HOB_LBAH)
760 tf->hob_lbah = scc_ide_inb(io_ports->lbah_addr);
761 }
762}
763
efa3db1b
BZ
764static void scc_input_data(ide_drive_t *drive, struct request *rq,
765 void *buf, unsigned int len)
766{
767 unsigned long data_addr = drive->hwif->io_ports.data_addr;
768
769 len++;
770
771 if (drive->io_32bit) {
772 scc_ide_insl(data_addr, buf, len / 4);
773
774 if ((len & 3) >= 2)
775 scc_ide_insw(data_addr, (u8 *)buf + (len & ~3), 1);
776 } else
777 scc_ide_insw(data_addr, buf, len / 2);
778}
779
780static void scc_output_data(ide_drive_t *drive, struct request *rq,
781 void *buf, unsigned int len)
782{
783 unsigned long data_addr = drive->hwif->io_ports.data_addr;
784
785 len++;
786
787 if (drive->io_32bit) {
788 scc_ide_outsl(data_addr, buf, len / 4);
789
790 if ((len & 3) >= 2)
791 scc_ide_outsw(data_addr, (u8 *)buf + (len & ~3), 1);
792 } else
793 scc_ide_outsw(data_addr, buf, len / 2);
794}
795
bde18a2e
KI
796/**
797 * init_mmio_iops_scc - set up the iops for MMIO
798 * @hwif: interface to set up
799 *
800 */
801
802static void __devinit init_mmio_iops_scc(ide_hwif_t *hwif)
803{
36501650 804 struct pci_dev *dev = to_pci_dev(hwif->dev);
bde18a2e
KI
805 struct scc_ports *ports = pci_get_drvdata(dev);
806 unsigned long dma_base = ports->dma;
807
808 ide_set_hwifdata(hwif, ports);
809
bde18a2e
KI
810 hwif->dma_base = dma_base;
811 hwif->config_data = ports->ctl;
bde18a2e
KI
812}
813
814/**
815 * init_iops_scc - set up iops
816 * @hwif: interface to set up
817 *
818 * Do the basic setup for the SCC hardware interface
819 * and then do the MMIO setup.
820 */
821
822static void __devinit init_iops_scc(ide_hwif_t *hwif)
823{
36501650
BZ
824 struct pci_dev *dev = to_pci_dev(hwif->dev);
825
bde18a2e
KI
826 hwif->hwif_data = NULL;
827 if (pci_get_drvdata(dev) == NULL)
828 return;
829 init_mmio_iops_scc(hwif);
830}
831
b4d1c73d
BZ
832static u8 __devinit scc_cable_detect(ide_hwif_t *hwif)
833{
834 return ATA_CBL_PATA80;
835}
836
bde18a2e
KI
837/**
838 * init_hwif_scc - set up hwif
839 * @hwif: interface to set up
840 *
841 * We do the basic set up of the interface structure. The SCC
842 * requires several custom handlers so we override the default
843 * ide DMA handlers appropriately.
844 */
845
846static void __devinit init_hwif_scc(ide_hwif_t *hwif)
847{
848 struct scc_ports *ports = ide_get_hwifdata(hwif);
849
0ecdca26
BZ
850 /* PTERADD */
851 out_be32((void __iomem *)(hwif->dma_base + 0x018), hwif->dmatable_dma);
bde18a2e 852
5f8b6c34
BZ
853 if (in_be32((void __iomem *)(hwif->config_data + 0xff0)) & CCKCTRL_ATACLKOEN)
854 hwif->ultra_mask = ATA_UDMA6; /* 133MHz */
855 else
856 hwif->ultra_mask = ATA_UDMA5; /* 100MHz */
bde18a2e
KI
857}
858
374e042c
BZ
859static const struct ide_tp_ops scc_tp_ops = {
860 .exec_command = scc_exec_command,
861 .read_status = scc_read_status,
862 .read_altstatus = scc_read_altstatus,
863 .read_sff_dma_status = scc_read_sff_dma_status,
864
865 .set_irq = scc_set_irq,
866
867 .tf_load = scc_tf_load,
868 .tf_read = scc_tf_read,
869
870 .input_data = scc_input_data,
871 .output_data = scc_output_data,
872};
873
ac95beed
BZ
874static const struct ide_port_ops scc_port_ops = {
875 .set_pio_mode = scc_set_pio_mode,
876 .set_dma_mode = scc_set_dma_mode,
877 .udma_filter = scc_udma_filter,
878 .cable_detect = scc_cable_detect,
879};
880
f37afdac 881static const struct ide_dma_ops scc_dma_ops = {
669185e9 882 .dma_host_set = scc_dma_host_set,
5e37bdc0 883 .dma_setup = scc_dma_setup,
f37afdac 884 .dma_exec_cmd = ide_dma_exec_cmd,
669185e9 885 .dma_start = scc_dma_start,
5e37bdc0
BZ
886 .dma_end = scc_dma_end,
887 .dma_test_irq = scc_dma_test_irq,
f37afdac
BZ
888 .dma_lost_irq = ide_dma_lost_irq,
889 .dma_timeout = ide_dma_timeout,
5e37bdc0
BZ
890};
891
bde18a2e
KI
892#define DECLARE_SCC_DEV(name_str) \
893 { \
894 .name = name_str, \
bde18a2e
KI
895 .init_iops = init_iops_scc, \
896 .init_hwif = init_hwif_scc, \
374e042c 897 .tp_ops = &scc_tp_ops, \
ac95beed 898 .port_ops = &scc_port_ops, \
5e37bdc0 899 .dma_ops = &scc_dma_ops, \
5e71d9c5 900 .host_flags = IDE_HFLAG_SINGLE, \
4099d143 901 .pio_mask = ATA_PIO4, \
bde18a2e
KI
902 }
903
85620436 904static const struct ide_port_info scc_chipsets[] __devinitdata = {
bde18a2e
KI
905 /* 0 */ DECLARE_SCC_DEV("sccIDE"),
906};
907
908/**
909 * scc_init_one - pci layer discovery entry
910 * @dev: PCI device
911 * @id: ident table entry
912 *
913 * Called by the PCI code when it finds an SCC PATA controller.
914 * We then use the IDE PCI generic helper to do most of the work.
915 */
916
917static int __devinit scc_init_one(struct pci_dev *dev, const struct pci_device_id *id)
918{
039788e1 919 return init_setup_scc(dev, &scc_chipsets[id->driver_data]);
bde18a2e
KI
920}
921
922/**
923 * scc_remove - pci layer remove entry
924 * @dev: PCI device
925 *
926 * Called by the PCI code when it removes an SCC PATA controller.
927 */
928
929static void __devexit scc_remove(struct pci_dev *dev)
930{
931 struct scc_ports *ports = pci_get_drvdata(dev);
48c3c107
BZ
932 struct ide_host *host = ports->host;
933 ide_hwif_t *hwif = host->ports[0];
bde18a2e
KI
934
935 if (hwif->dmatable_cpu) {
36501650
BZ
936 pci_free_consistent(dev, PRD_ENTRIES * PRD_BYTES,
937 hwif->dmatable_cpu, hwif->dmatable_dma);
bde18a2e
KI
938 hwif->dmatable_cpu = NULL;
939 }
940
48c3c107 941 ide_host_remove(host);
bde18a2e 942
bde18a2e
KI
943 iounmap((void*)ports->dma);
944 iounmap((void*)ports->ctl);
0d1bad21 945 pci_release_selected_regions(dev, (1 << 2) - 1);
bde18a2e
KI
946 memset(ports, 0, sizeof(*ports));
947}
948
9cbcc5e3
BZ
949static const struct pci_device_id scc_pci_tbl[] = {
950 { PCI_VDEVICE(TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SCC_ATA), 0 },
bde18a2e
KI
951 { 0, },
952};
953MODULE_DEVICE_TABLE(pci, scc_pci_tbl);
954
955static struct pci_driver driver = {
956 .name = "SCC IDE",
957 .id_table = scc_pci_tbl,
958 .probe = scc_init_one,
959 .remove = scc_remove,
960};
961
962static int scc_ide_init(void)
963{
964 return ide_pci_register_driver(&driver);
965}
966
967module_init(scc_ide_init);
968/* -- No exit code?
969static void scc_ide_exit(void)
970{
971 ide_pci_unregister_driver(&driver);
972}
973module_exit(scc_ide_exit);
974 */
975
976
977MODULE_DESCRIPTION("PCI driver module for Toshiba SCC IDE");
978MODULE_LICENSE("GPL");