]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/scsi/sun_esp.c
sparc: Convert all SBUS drivers to dma_*() interfaces.
[net-next-2.6.git] / drivers / scsi / sun_esp.c
CommitLineData
cd9ad58d
DM
1/* sun_esp.c: ESP front-end for Sparc SBUS systems.
2 *
334ae614 3 * Copyright (C) 2007, 2008 David S. Miller (davem@davemloft.net)
cd9ad58d
DM
4 */
5
6#include <linux/kernel.h>
7#include <linux/types.h>
6025dfe5 8#include <linux/delay.h>
cd9ad58d 9#include <linux/module.h>
27ac792c 10#include <linux/mm.h>
cd9ad58d 11#include <linux/init.h>
738f2b7b 12#include <linux/dma-mapping.h>
cd9ad58d
DM
13
14#include <asm/irq.h>
15#include <asm/io.h>
16#include <asm/dma.h>
17
18#include <asm/sbus.h>
19
20#include <scsi/scsi_host.h>
21
22#include "esp_scsi.h"
23
24#define DRV_MODULE_NAME "sun_esp"
25#define PFX DRV_MODULE_NAME ": "
26#define DRV_VERSION "1.000"
27#define DRV_MODULE_RELDATE "April 19, 2007"
28
29#define dma_read32(REG) \
30 sbus_readl(esp->dma_regs + (REG))
31#define dma_write32(VAL, REG) \
32 sbus_writel((VAL), esp->dma_regs + (REG))
33
334ae614
DM
34/* DVMA chip revisions */
35enum dvma_rev {
36 dvmarev0,
37 dvmaesc1,
38 dvmarev1,
39 dvmarev2,
40 dvmarev3,
41 dvmarevplus,
42 dvmahme
43};
cd9ad58d 44
334ae614
DM
45static int __devinit esp_sbus_setup_dma(struct esp *esp,
46 struct of_device *dma_of)
47{
48 esp->dma = dma_of;
cd9ad58d 49
334ae614
DM
50 esp->dma_regs = of_ioremap(&dma_of->resource[0], 0,
51 resource_size(&dma_of->resource[0]),
52 "espdma");
53 if (!esp->dma_regs)
54 return -ENOMEM;
cd9ad58d 55
334ae614
DM
56 switch (dma_read32(DMA_CSR) & DMA_DEVICE_ID) {
57 case DMA_VERS0:
58 esp->dmarev = dvmarev0;
59 break;
60 case DMA_ESCV1:
61 esp->dmarev = dvmaesc1;
62 break;
63 case DMA_VERS1:
64 esp->dmarev = dvmarev1;
65 break;
66 case DMA_VERS2:
67 esp->dmarev = dvmarev2;
68 break;
69 case DMA_VERHME:
70 esp->dmarev = dvmahme;
71 break;
72 case DMA_VERSPLUS:
73 esp->dmarev = dvmarevplus;
74 break;
cd9ad58d 75 }
cd9ad58d
DM
76
77 return 0;
78
79}
80
81static int __devinit esp_sbus_map_regs(struct esp *esp, int hme)
82{
83 struct sbus_dev *sdev = esp->dev;
84 struct resource *res;
85
86 /* On HME, two reg sets exist, first is DVMA,
87 * second is ESP registers.
88 */
89 if (hme)
90 res = &sdev->resource[1];
91 else
92 res = &sdev->resource[0];
93
94 esp->regs = sbus_ioremap(res, 0, SBUS_ESP_REG_SIZE, "ESP");
95 if (!esp->regs)
96 return -ENOMEM;
97
98 return 0;
99}
100
101static int __devinit esp_sbus_map_command_block(struct esp *esp)
102{
103 struct sbus_dev *sdev = esp->dev;
104
738f2b7b
DM
105 esp->command_block = dma_alloc_coherent(&sdev->ofdev.dev, 16,
106 &esp->command_block_dma,
107 GFP_ATOMIC);
cd9ad58d
DM
108 if (!esp->command_block)
109 return -ENOMEM;
110 return 0;
111}
112
113static int __devinit esp_sbus_register_irq(struct esp *esp)
114{
115 struct Scsi_Host *host = esp->host;
116 struct sbus_dev *sdev = esp->dev;
117
118 host->irq = sdev->irqs[0];
119 return request_irq(host->irq, scsi_esp_intr, IRQF_SHARED, "ESP", esp);
120}
121
122static void __devinit esp_get_scsi_id(struct esp *esp)
123{
124 struct sbus_dev *sdev = esp->dev;
125 struct device_node *dp = sdev->ofdev.node;
126
127 esp->scsi_id = of_getintprop_default(dp, "initiator-id", 0xff);
128 if (esp->scsi_id != 0xff)
129 goto done;
130
131 esp->scsi_id = of_getintprop_default(dp, "scsi-initiator-id", 0xff);
132 if (esp->scsi_id != 0xff)
133 goto done;
134
135 if (!sdev->bus) {
136 /* SUN4 */
137 esp->scsi_id = 7;
138 goto done;
139 }
140
141 esp->scsi_id = of_getintprop_default(sdev->bus->ofdev.node,
142 "scsi-initiator-id", 7);
143
144done:
145 esp->host->this_id = esp->scsi_id;
146 esp->scsi_id_mask = (1 << esp->scsi_id);
147}
148
149static void __devinit esp_get_differential(struct esp *esp)
150{
151 struct sbus_dev *sdev = esp->dev;
152 struct device_node *dp = sdev->ofdev.node;
153
154 if (of_find_property(dp, "differential", NULL))
155 esp->flags |= ESP_FLAG_DIFFERENTIAL;
156 else
157 esp->flags &= ~ESP_FLAG_DIFFERENTIAL;
158}
159
160static void __devinit esp_get_clock_params(struct esp *esp)
161{
162 struct sbus_dev *sdev = esp->dev;
163 struct device_node *dp = sdev->ofdev.node;
164 struct device_node *bus_dp;
165 int fmhz;
166
167 bus_dp = NULL;
168 if (sdev != NULL && sdev->bus != NULL)
169 bus_dp = sdev->bus->ofdev.node;
170
171 fmhz = of_getintprop_default(dp, "clock-frequency", 0);
172 if (fmhz == 0)
173 fmhz = (!bus_dp) ? 0 :
174 of_getintprop_default(bus_dp, "clock-frequency", 0);
175
176 esp->cfreq = fmhz;
177}
178
334ae614 179static void __devinit esp_get_bursts(struct esp *esp, struct of_device *dma_of)
cd9ad58d 180{
334ae614 181 struct device_node *dma_dp = dma_of->node;
cd9ad58d 182 struct sbus_dev *sdev = esp->dev;
334ae614
DM
183 struct device_node *dp;
184 u8 bursts, val;
cd9ad58d 185
334ae614 186 dp = sdev->ofdev.node;
cd9ad58d 187 bursts = of_getintprop_default(dp, "burst-sizes", 0xff);
334ae614
DM
188 val = of_getintprop_default(dma_dp, "burst-sizes", 0xff);
189 if (val != 0xff)
190 bursts &= val;
cd9ad58d
DM
191
192 if (sdev->bus) {
193 u8 val = of_getintprop_default(sdev->bus->ofdev.node,
194 "burst-sizes", 0xff);
195 if (val != 0xff)
196 bursts &= val;
197 }
198
199 if (bursts == 0xff ||
200 (bursts & DMA_BURST16) == 0 ||
201 (bursts & DMA_BURST32) == 0)
202 bursts = (DMA_BURST32 - 1);
203
204 esp->bursts = bursts;
205}
206
334ae614 207static void __devinit esp_sbus_get_props(struct esp *esp, struct of_device *espdma)
cd9ad58d
DM
208{
209 esp_get_scsi_id(esp);
210 esp_get_differential(esp);
211 esp_get_clock_params(esp);
212 esp_get_bursts(esp, espdma);
213}
214
215static void sbus_esp_write8(struct esp *esp, u8 val, unsigned long reg)
216{
217 sbus_writeb(val, esp->regs + (reg * 4UL));
218}
219
220static u8 sbus_esp_read8(struct esp *esp, unsigned long reg)
221{
222 return sbus_readb(esp->regs + (reg * 4UL));
223}
224
225static dma_addr_t sbus_esp_map_single(struct esp *esp, void *buf,
226 size_t sz, int dir)
227{
7a715f46
DM
228 struct sbus_dev *sdev = esp->dev;
229
738f2b7b 230 return dma_map_single(&sdev->ofdev.dev, buf, sz, dir);
cd9ad58d
DM
231}
232
233static int sbus_esp_map_sg(struct esp *esp, struct scatterlist *sg,
234 int num_sg, int dir)
235{
7a715f46
DM
236 struct sbus_dev *sdev = esp->dev;
237
738f2b7b 238 return dma_map_sg(&sdev->ofdev.dev, sg, num_sg, dir);
cd9ad58d
DM
239}
240
241static void sbus_esp_unmap_single(struct esp *esp, dma_addr_t addr,
242 size_t sz, int dir)
243{
7a715f46
DM
244 struct sbus_dev *sdev = esp->dev;
245
738f2b7b 246 dma_unmap_single(&sdev->ofdev.dev, addr, sz, dir);
cd9ad58d
DM
247}
248
249static void sbus_esp_unmap_sg(struct esp *esp, struct scatterlist *sg,
250 int num_sg, int dir)
251{
7a715f46
DM
252 struct sbus_dev *sdev = esp->dev;
253
738f2b7b 254 dma_unmap_sg(&sdev->ofdev.dev, sg, num_sg, dir);
cd9ad58d
DM
255}
256
257static int sbus_esp_irq_pending(struct esp *esp)
258{
259 if (dma_read32(DMA_CSR) & (DMA_HNDL_INTR | DMA_HNDL_ERROR))
260 return 1;
261 return 0;
262}
263
264static void sbus_esp_reset_dma(struct esp *esp)
265{
266 int can_do_burst16, can_do_burst32, can_do_burst64;
267 int can_do_sbus64, lim;
268 u32 val;
269
270 can_do_burst16 = (esp->bursts & DMA_BURST16) != 0;
271 can_do_burst32 = (esp->bursts & DMA_BURST32) != 0;
272 can_do_burst64 = 0;
273 can_do_sbus64 = 0;
274 if (sbus_can_dma_64bit(esp->dev))
275 can_do_sbus64 = 1;
276 if (sbus_can_burst64(esp->sdev))
277 can_do_burst64 = (esp->bursts & DMA_BURST64) != 0;
278
279 /* Put the DVMA into a known state. */
334ae614 280 if (esp->dmarev != dvmahme) {
cd9ad58d
DM
281 val = dma_read32(DMA_CSR);
282 dma_write32(val | DMA_RST_SCSI, DMA_CSR);
283 dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
284 }
334ae614 285 switch (esp->dmarev) {
cd9ad58d
DM
286 case dvmahme:
287 dma_write32(DMA_RESET_FAS366, DMA_CSR);
288 dma_write32(DMA_RST_SCSI, DMA_CSR);
289
290 esp->prev_hme_dmacsr = (DMA_PARITY_OFF | DMA_2CLKS |
291 DMA_SCSI_DISAB | DMA_INT_ENAB);
292
293 esp->prev_hme_dmacsr &= ~(DMA_ENABLE | DMA_ST_WRITE |
294 DMA_BRST_SZ);
295
296 if (can_do_burst64)
297 esp->prev_hme_dmacsr |= DMA_BRST64;
298 else if (can_do_burst32)
299 esp->prev_hme_dmacsr |= DMA_BRST32;
300
301 if (can_do_sbus64) {
302 esp->prev_hme_dmacsr |= DMA_SCSI_SBUS64;
303 sbus_set_sbus64(esp->dev, esp->bursts);
304 }
305
306 lim = 1000;
307 while (dma_read32(DMA_CSR) & DMA_PEND_READ) {
308 if (--lim == 0) {
309 printk(KERN_ALERT PFX "esp%d: DMA_PEND_READ "
310 "will not clear!\n",
311 esp->host->unique_id);
312 break;
313 }
314 udelay(1);
315 }
316
317 dma_write32(0, DMA_CSR);
318 dma_write32(esp->prev_hme_dmacsr, DMA_CSR);
319
320 dma_write32(0, DMA_ADDR);
321 break;
322
323 case dvmarev2:
324 if (esp->rev != ESP100) {
325 val = dma_read32(DMA_CSR);
326 dma_write32(val | DMA_3CLKS, DMA_CSR);
327 }
328 break;
329
330 case dvmarev3:
331 val = dma_read32(DMA_CSR);
332 val &= ~DMA_3CLKS;
333 val |= DMA_2CLKS;
334 if (can_do_burst32) {
335 val &= ~DMA_BRST_SZ;
336 val |= DMA_BRST32;
337 }
338 dma_write32(val, DMA_CSR);
339 break;
340
341 case dvmaesc1:
342 val = dma_read32(DMA_CSR);
343 val |= DMA_ADD_ENABLE;
344 val &= ~DMA_BCNT_ENAB;
345 if (!can_do_burst32 && can_do_burst16) {
346 val |= DMA_ESC_BURST;
347 } else {
348 val &= ~(DMA_ESC_BURST);
349 }
350 dma_write32(val, DMA_CSR);
351 break;
352
353 default:
354 break;
355 }
356
357 /* Enable interrupts. */
358 val = dma_read32(DMA_CSR);
359 dma_write32(val | DMA_INT_ENAB, DMA_CSR);
360}
361
362static void sbus_esp_dma_drain(struct esp *esp)
363{
364 u32 csr;
365 int lim;
366
334ae614 367 if (esp->dmarev == dvmahme)
cd9ad58d
DM
368 return;
369
370 csr = dma_read32(DMA_CSR);
371 if (!(csr & DMA_FIFO_ISDRAIN))
372 return;
373
334ae614 374 if (esp->dmarev != dvmarev3 && esp->dmarev != dvmaesc1)
cd9ad58d
DM
375 dma_write32(csr | DMA_FIFO_STDRAIN, DMA_CSR);
376
377 lim = 1000;
378 while (dma_read32(DMA_CSR) & DMA_FIFO_ISDRAIN) {
379 if (--lim == 0) {
380 printk(KERN_ALERT PFX "esp%d: DMA will not drain!\n",
381 esp->host->unique_id);
382 break;
383 }
384 udelay(1);
385 }
386}
387
388static void sbus_esp_dma_invalidate(struct esp *esp)
389{
334ae614 390 if (esp->dmarev == dvmahme) {
cd9ad58d
DM
391 dma_write32(DMA_RST_SCSI, DMA_CSR);
392
393 esp->prev_hme_dmacsr = ((esp->prev_hme_dmacsr |
394 (DMA_PARITY_OFF | DMA_2CLKS |
395 DMA_SCSI_DISAB | DMA_INT_ENAB)) &
396 ~(DMA_ST_WRITE | DMA_ENABLE));
397
398 dma_write32(0, DMA_CSR);
399 dma_write32(esp->prev_hme_dmacsr, DMA_CSR);
400
401 /* This is necessary to avoid having the SCSI channel
402 * engine lock up on us.
403 */
404 dma_write32(0, DMA_ADDR);
405 } else {
406 u32 val;
407 int lim;
408
409 lim = 1000;
410 while ((val = dma_read32(DMA_CSR)) & DMA_PEND_READ) {
411 if (--lim == 0) {
412 printk(KERN_ALERT PFX "esp%d: DMA will not "
413 "invalidate!\n", esp->host->unique_id);
414 break;
415 }
416 udelay(1);
417 }
418
419 val &= ~(DMA_ENABLE | DMA_ST_WRITE | DMA_BCNT_ENAB);
420 val |= DMA_FIFO_INV;
421 dma_write32(val, DMA_CSR);
422 val &= ~DMA_FIFO_INV;
423 dma_write32(val, DMA_CSR);
424 }
425}
426
427static void sbus_esp_send_dma_cmd(struct esp *esp, u32 addr, u32 esp_count,
428 u32 dma_count, int write, u8 cmd)
429{
430 u32 csr;
431
432 BUG_ON(!(cmd & ESP_CMD_DMA));
433
434 sbus_esp_write8(esp, (esp_count >> 0) & 0xff, ESP_TCLOW);
435 sbus_esp_write8(esp, (esp_count >> 8) & 0xff, ESP_TCMED);
436 if (esp->rev == FASHME) {
437 sbus_esp_write8(esp, (esp_count >> 16) & 0xff, FAS_RLO);
438 sbus_esp_write8(esp, 0, FAS_RHI);
439
440 scsi_esp_cmd(esp, cmd);
441
442 csr = esp->prev_hme_dmacsr;
443 csr |= DMA_SCSI_DISAB | DMA_ENABLE;
444 if (write)
445 csr |= DMA_ST_WRITE;
446 else
447 csr &= ~DMA_ST_WRITE;
448 esp->prev_hme_dmacsr = csr;
449
450 dma_write32(dma_count, DMA_COUNT);
451 dma_write32(addr, DMA_ADDR);
452 dma_write32(csr, DMA_CSR);
453 } else {
454 csr = dma_read32(DMA_CSR);
455 csr |= DMA_ENABLE;
456 if (write)
457 csr |= DMA_ST_WRITE;
458 else
459 csr &= ~DMA_ST_WRITE;
460 dma_write32(csr, DMA_CSR);
334ae614 461 if (esp->dmarev == dvmaesc1) {
cd9ad58d
DM
462 u32 end = PAGE_ALIGN(addr + dma_count + 16U);
463 dma_write32(end - addr, DMA_COUNT);
464 }
465 dma_write32(addr, DMA_ADDR);
466
467 scsi_esp_cmd(esp, cmd);
468 }
469
470}
471
472static int sbus_esp_dma_error(struct esp *esp)
473{
474 u32 csr = dma_read32(DMA_CSR);
475
476 if (csr & DMA_HNDL_ERROR)
477 return 1;
478
479 return 0;
480}
481
482static const struct esp_driver_ops sbus_esp_ops = {
483 .esp_write8 = sbus_esp_write8,
484 .esp_read8 = sbus_esp_read8,
485 .map_single = sbus_esp_map_single,
486 .map_sg = sbus_esp_map_sg,
487 .unmap_single = sbus_esp_unmap_single,
488 .unmap_sg = sbus_esp_unmap_sg,
489 .irq_pending = sbus_esp_irq_pending,
490 .reset_dma = sbus_esp_reset_dma,
491 .dma_drain = sbus_esp_dma_drain,
492 .dma_invalidate = sbus_esp_dma_invalidate,
493 .send_dma_cmd = sbus_esp_send_dma_cmd,
494 .dma_error = sbus_esp_dma_error,
495};
496
497static int __devinit esp_sbus_probe_one(struct device *dev,
498 struct sbus_dev *esp_dev,
334ae614 499 struct of_device *espdma,
cd9ad58d
DM
500 struct sbus_bus *sbus,
501 int hme)
502{
503 struct scsi_host_template *tpnt = &scsi_esp_template;
504 struct Scsi_Host *host;
505 struct esp *esp;
506 int err;
507
508 host = scsi_host_alloc(tpnt, sizeof(struct esp));
509
510 err = -ENOMEM;
511 if (!host)
512 goto fail;
513
514 host->max_id = (hme ? 16 : 8);
2b14ec78 515 esp = shost_priv(host);
cd9ad58d
DM
516
517 esp->host = host;
518 esp->dev = esp_dev;
519 esp->ops = &sbus_esp_ops;
520
521 if (hme)
522 esp->flags |= ESP_FLAG_WIDE_CAPABLE;
523
334ae614 524 err = esp_sbus_setup_dma(esp, espdma);
cd9ad58d
DM
525 if (err < 0)
526 goto fail_unlink;
527
528 err = esp_sbus_map_regs(esp, hme);
529 if (err < 0)
530 goto fail_unlink;
531
532 err = esp_sbus_map_command_block(esp);
533 if (err < 0)
534 goto fail_unmap_regs;
535
536 err = esp_sbus_register_irq(esp);
537 if (err < 0)
538 goto fail_unmap_command_block;
539
540 esp_sbus_get_props(esp, espdma);
541
542 /* Before we try to touch the ESP chip, ESC1 dma can
543 * come up with the reset bit set, so make sure that
544 * is clear first.
545 */
334ae614 546 if (esp->dmarev == dvmaesc1) {
cd9ad58d
DM
547 u32 val = dma_read32(DMA_CSR);
548
549 dma_write32(val & ~DMA_RST_SCSI, DMA_CSR);
550 }
551
552 dev_set_drvdata(&esp_dev->ofdev.dev, esp);
553
554 err = scsi_esp_register(esp, dev);
555 if (err)
556 goto fail_free_irq;
557
558 return 0;
559
560fail_free_irq:
561 free_irq(host->irq, esp);
562fail_unmap_command_block:
738f2b7b
DM
563 dma_free_coherent(&esp_dev->ofdev.dev, 16,
564 esp->command_block,
565 esp->command_block_dma);
cd9ad58d
DM
566fail_unmap_regs:
567 sbus_iounmap(esp->regs, SBUS_ESP_REG_SIZE);
568fail_unlink:
569 scsi_host_put(host);
570fail:
571 return err;
572}
573
574static int __devinit esp_sbus_probe(struct of_device *dev, const struct of_device_id *match)
575{
576 struct sbus_dev *sdev = to_sbus_device(&dev->dev);
334ae614 577 struct device_node *dma_node = NULL;
cd9ad58d 578 struct device_node *dp = dev->node;
334ae614 579 struct of_device *dma_of = NULL;
cd9ad58d
DM
580 int hme = 0;
581
582 if (dp->parent &&
583 (!strcmp(dp->parent->name, "espdma") ||
584 !strcmp(dp->parent->name, "dma")))
334ae614 585 dma_node = dp->parent;
cd9ad58d 586 else if (!strcmp(dp->name, "SUNW,fas")) {
334ae614 587 dma_node = sdev->ofdev.node;
cd9ad58d
DM
588 hme = 1;
589 }
334ae614
DM
590 if (dma_node)
591 dma_of = of_find_device_by_node(dma_node);
592 if (!dma_of)
593 return -ENODEV;
cd9ad58d 594
334ae614 595 return esp_sbus_probe_one(&dev->dev, sdev, dma_of,
cd9ad58d
DM
596 sdev->bus, hme);
597}
598
599static int __devexit esp_sbus_remove(struct of_device *dev)
600{
601 struct esp *esp = dev_get_drvdata(&dev->dev);
7a715f46 602 struct sbus_dev *sdev = esp->dev;
334ae614 603 struct of_device *dma_of = esp->dma;
cd9ad58d
DM
604 unsigned int irq = esp->host->irq;
605 u32 val;
606
607 scsi_esp_unregister(esp);
608
609 /* Disable interrupts. */
610 val = dma_read32(DMA_CSR);
611 dma_write32(val & ~DMA_INT_ENAB, DMA_CSR);
612
613 free_irq(irq, esp);
738f2b7b
DM
614 dma_free_coherent(&sdev->ofdev.dev, 16,
615 esp->command_block,
616 esp->command_block_dma);
cd9ad58d 617 sbus_iounmap(esp->regs, SBUS_ESP_REG_SIZE);
334ae614
DM
618 of_iounmap(&dma_of->resource[0], esp->dma_regs,
619 resource_size(&dma_of->resource[0]));
cd9ad58d
DM
620
621 scsi_host_put(esp->host);
622
623 return 0;
624}
625
626static struct of_device_id esp_match[] = {
627 {
628 .name = "SUNW,esp",
629 },
630 {
631 .name = "SUNW,fas",
632 },
633 {
634 .name = "esp",
635 },
636 {},
637};
638MODULE_DEVICE_TABLE(of, esp_match);
639
640static struct of_platform_driver esp_sbus_driver = {
641 .name = "esp",
642 .match_table = esp_match,
643 .probe = esp_sbus_probe,
644 .remove = __devexit_p(esp_sbus_remove),
645};
646
647static int __init sunesp_init(void)
648{
649 return of_register_driver(&esp_sbus_driver, &sbus_bus_type);
650}
651
652static void __exit sunesp_exit(void)
653{
654 of_unregister_driver(&esp_sbus_driver);
655}
656
657MODULE_DESCRIPTION("Sun ESP SCSI driver");
658MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
659MODULE_LICENSE("GPL");
660MODULE_VERSION(DRV_VERSION);
661
662module_init(sunesp_init);
663module_exit(sunesp_exit);