]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/staging/vme/bridges/vme_tsi148.c
Staging: vme: add Tundra TSI148 VME-PCI Bridge driver
[net-next-2.6.git] / drivers / staging / vme / bridges / vme_tsi148.c
CommitLineData
d22b8ed9
MW
1/*
2 * Support for the Tundra TSI148 VME-PCI Bridge Chip
3 *
4 * Author: Martyn Welch <martyn.welch@gefanuc.com>
5 * Copyright 2008 GE Fanuc Intelligent Platforms Embedded Systems, Inc.
6 *
7 * Based on work by Tom Armistead and Ajit Prem
8 * Copyright 2004 Motorola Inc.
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License as published by the
12 * Free Software Foundation; either version 2 of the License, or (at your
13 * option) any later version.
14 */
15
16#include <linux/version.h>
17#include <linux/module.h>
18#include <linux/moduleparam.h>
19#include <linux/mm.h>
20#include <linux/types.h>
21#include <linux/errno.h>
22#include <linux/proc_fs.h>
23#include <linux/pci.h>
24#include <linux/poll.h>
25#include <linux/dma-mapping.h>
26#include <linux/interrupt.h>
27#include <linux/spinlock.h>
28#include <asm/time.h>
29#include <asm/io.h>
30#include <asm/uaccess.h>
31
32#include "../vme.h"
33#include "../vme_bridge.h"
34#include "vme_tsi148.h"
35
36static int __init tsi148_init(void);
37static int tsi148_probe(struct pci_dev *, const struct pci_device_id *);
38static void tsi148_remove(struct pci_dev *);
39static void __exit tsi148_exit(void);
40
41
42int tsi148_slave_set(struct vme_slave_resource *, int, unsigned long long,
43 unsigned long long, dma_addr_t, vme_address_t, vme_cycle_t);
44int tsi148_slave_get(struct vme_slave_resource *, int *, unsigned long long *,
45 unsigned long long *, dma_addr_t *, vme_address_t *, vme_cycle_t *);
46
47int tsi148_master_get(struct vme_master_resource *, int *, unsigned long long *,
48 unsigned long long *, vme_address_t *, vme_cycle_t *, vme_width_t *);
49int tsi148_master_set(struct vme_master_resource *, int, unsigned long long,
50 unsigned long long, vme_address_t, vme_cycle_t, vme_width_t);
51ssize_t tsi148_master_read(struct vme_master_resource *, void *, size_t,
52 loff_t);
53ssize_t tsi148_master_write(struct vme_master_resource *, void *, size_t,
54 loff_t);
55unsigned int tsi148_master_rmw(struct vme_master_resource *, unsigned int,
56 unsigned int, unsigned int, loff_t);
57int tsi148_dma_list_add (struct vme_dma_list *, struct vme_dma_attr *,
58 struct vme_dma_attr *, size_t);
59int tsi148_dma_list_exec(struct vme_dma_list *);
60int tsi148_dma_list_empty(struct vme_dma_list *);
61int tsi148_generate_irq(int, int);
62int tsi148_lm_set(unsigned long long, vme_address_t, vme_cycle_t);
63int tsi148_lm_get(unsigned long long *, vme_address_t *, vme_cycle_t *);
64int tsi148_lm_attach(int, void (*callback)(int));
65int tsi148_lm_detach(int);
66int tsi148_slot_get(void);
67
68/* Modue parameter */
69int err_chk = 0;
70
71/* XXX These should all be in a per device structure */
72struct vme_bridge *tsi148_bridge;
73wait_queue_head_t dma_queue[2];
74wait_queue_head_t iack_queue;
75void (*lm_callback[4])(int); /* Called in interrupt handler, be careful! */
76void *crcsr_kernel;
77dma_addr_t crcsr_bus;
78struct vme_master_resource *flush_image;
79struct semaphore vme_rmw; /* Only one RMW cycle at a time */
80struct semaphore vme_int; /*
81 * Only one VME interrupt can be
82 * generated at a time, provide locking
83 */
84struct semaphore vme_irq; /* Locking for VME irq callback configuration */
85struct semaphore vme_lm; /* Locking for location monitor operations */
86
87
88static char driver_name[] = "vme_tsi148";
89
90static struct pci_device_id tsi148_ids[] = {
91 { PCI_DEVICE(PCI_VENDOR_ID_TUNDRA, PCI_DEVICE_ID_TUNDRA_TSI148) },
92 { },
93};
94
95static struct pci_driver tsi148_driver = {
96 .name = driver_name,
97 .id_table = tsi148_ids,
98 .probe = tsi148_probe,
99 .remove = tsi148_remove,
100};
101
102static void reg_join(unsigned int high, unsigned int low,
103 unsigned long long *variable)
104{
105 *variable = (unsigned long long)high << 32;
106 *variable |= (unsigned long long)low;
107}
108
109static void reg_split(unsigned long long variable, unsigned int *high,
110 unsigned int *low)
111{
112 *low = (unsigned int)variable & 0xFFFFFFFF;
113 *high = (unsigned int)(variable >> 32);
114}
115
116/*
117 * Wakes up DMA queue.
118 */
119static u32 tsi148_DMA_irqhandler(int channel_mask)
120{
121 u32 serviced = 0;
122
123 if (channel_mask & TSI148_LCSR_INTS_DMA0S) {
124 wake_up(&dma_queue[0]);
125 serviced |= TSI148_LCSR_INTC_DMA0C;
126 }
127 if (channel_mask & TSI148_LCSR_INTS_DMA1S) {
128 wake_up(&dma_queue[1]);
129 serviced |= TSI148_LCSR_INTC_DMA1C;
130 }
131
132 return serviced;
133}
134
135/*
136 * Wake up location monitor queue
137 */
138static u32 tsi148_LM_irqhandler(u32 stat)
139{
140 int i;
141 u32 serviced = 0;
142
143 for (i = 0; i < 4; i++) {
144 if(stat & TSI148_LCSR_INTS_LMS[i]) {
145 /* We only enable interrupts if the callback is set */
146 lm_callback[i](i);
147 serviced |= TSI148_LCSR_INTC_LMC[i];
148 }
149 }
150
151 return serviced;
152}
153
154/*
155 * Wake up mail box queue.
156 *
157 * XXX This functionality is not exposed up though API.
158 */
159static u32 tsi148_MB_irqhandler(u32 stat)
160{
161 int i;
162 u32 val;
163 u32 serviced = 0;
164
165 for (i = 0; i < 4; i++) {
166 if(stat & TSI148_LCSR_INTS_MBS[i]) {
167 val = ioread32be(tsi148_bridge->base +
168 TSI148_GCSR_MBOX[i]);
169 printk("VME Mailbox %d received: 0x%x\n", i, val);
170 serviced |= TSI148_LCSR_INTC_MBC[i];
171 }
172 }
173
174 return serviced;
175}
176
177/*
178 * Display error & status message when PERR (PCI) exception interrupt occurs.
179 */
180static u32 tsi148_PERR_irqhandler(void)
181{
182 printk(KERN_ERR
183 "PCI Exception at address: 0x%08x:%08x, attributes: %08x\n",
184 ioread32be(tsi148_bridge->base + TSI148_LCSR_EDPAU),
185 ioread32be(tsi148_bridge->base + TSI148_LCSR_EDPAL),
186 ioread32be(tsi148_bridge->base + TSI148_LCSR_EDPAT)
187 );
188 printk(KERN_ERR
189 "PCI-X attribute reg: %08x, PCI-X split completion reg: %08x\n",
190 ioread32be(tsi148_bridge->base + TSI148_LCSR_EDPXA),
191 ioread32be(tsi148_bridge->base + TSI148_LCSR_EDPXS)
192 );
193
194 iowrite32be(TSI148_LCSR_EDPAT_EDPCL,
195 tsi148_bridge->base + TSI148_LCSR_EDPAT);
196
197 return TSI148_LCSR_INTC_PERRC;
198}
199
200/*
201 * Save address and status when VME error interrupt occurs.
202 */
203static u32 tsi148_VERR_irqhandler(void)
204{
205 unsigned int error_addr_high, error_addr_low;
206 unsigned long long error_addr;
207 u32 error_attrib;
208 struct vme_bus_error *error;
209
210 error_addr_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_VEAU);
211 error_addr_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_VEAL);
212 error_attrib = ioread32be(tsi148_bridge->base + TSI148_LCSR_VEAT);
213
214 reg_join(error_addr_high, error_addr_low, &error_addr);
215
216 /* Check for exception register overflow (we have lost error data) */
217 if(error_attrib & TSI148_LCSR_VEAT_VEOF) {
218 printk(KERN_ERR "VME Bus Exception Overflow Occurred\n");
219 }
220
221 error = (struct vme_bus_error *)kmalloc(sizeof (struct vme_bus_error),
222 GFP_ATOMIC);
223 if (error) {
224 error->address = error_addr;
225 error->attributes = error_attrib;
226 list_add_tail(&(error->list), &(tsi148_bridge->vme_errors));
227 } else {
228 printk(KERN_ERR
229 "Unable to alloc memory for VMEbus Error reporting\n");
230 printk(KERN_ERR
231 "VME Bus Error at address: 0x%llx, attributes: %08x\n",
232 error_addr, error_attrib);
233 }
234
235 /* Clear Status */
236 iowrite32be(TSI148_LCSR_VEAT_VESCL,
237 tsi148_bridge->base + TSI148_LCSR_VEAT);
238
239 return TSI148_LCSR_INTC_VERRC;
240}
241
242/*
243 * Wake up IACK queue.
244 */
245static u32 tsi148_IACK_irqhandler(void)
246{
247 printk("tsi148_IACK_irqhandler\n");
248 wake_up(&iack_queue);
249
250 return TSI148_LCSR_INTC_IACKC;
251}
252
253/*
254 * Calling VME bus interrupt callback if provided.
255 */
256static u32 tsi148_VIRQ_irqhandler(u32 stat)
257{
258 int vec, i, serviced = 0;
259 void (*call)(int, int, void *);
260 void *priv_data;
261
262 for (i = 7; i > 0; i--) {
263 if (stat & (1 << i)) {
264 /*
265 * Note: Even though the registers are defined
266 * as 32-bits in the spec, we only want to issue
267 * 8-bit IACK cycles on the bus, read from offset
268 * 3.
269 */
270 vec = ioread8(tsi148_bridge->base +
271 TSI148_LCSR_VIACK[i] + 3);
272
273 call = tsi148_bridge->irq[i - 1].callback[vec].func;
274 priv_data =
275 tsi148_bridge->irq[i-1].callback[vec].priv_data;
276
277 if (call != NULL)
278 call(i, vec, priv_data);
279 else
280 printk("Spurilous VME interrupt, level:%x, "
281 "vector:%x\n", i, vec);
282
283 serviced |= (1 << i);
284 }
285 }
286
287 return serviced;
288}
289
290/*
291 * Top level interrupt handler. Clears appropriate interrupt status bits and
292 * then calls appropriate sub handler(s).
293 */
294static irqreturn_t tsi148_irqhandler(int irq, void *dev_id)
295{
296 u32 stat, enable, serviced = 0;
297
298 /* Determine which interrupts are unmasked and set */
299 enable = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEO);
300 stat = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTS);
301
302 /* Only look at unmasked interrupts */
303 stat &= enable;
304
305 if (unlikely(!stat)) {
306 return IRQ_NONE;
307 }
308
309 /* Call subhandlers as appropriate */
310 /* DMA irqs */
311 if (stat & (TSI148_LCSR_INTS_DMA1S | TSI148_LCSR_INTS_DMA0S))
312 serviced |= tsi148_DMA_irqhandler(stat);
313
314 /* Location monitor irqs */
315 if (stat & (TSI148_LCSR_INTS_LM3S | TSI148_LCSR_INTS_LM2S |
316 TSI148_LCSR_INTS_LM1S | TSI148_LCSR_INTS_LM0S))
317 serviced |= tsi148_LM_irqhandler(stat);
318
319 /* Mail box irqs */
320 if (stat & (TSI148_LCSR_INTS_MB3S | TSI148_LCSR_INTS_MB2S |
321 TSI148_LCSR_INTS_MB1S | TSI148_LCSR_INTS_MB0S))
322 serviced |= tsi148_MB_irqhandler(stat);
323
324 /* PCI bus error */
325 if (stat & TSI148_LCSR_INTS_PERRS)
326 serviced |= tsi148_PERR_irqhandler();
327
328 /* VME bus error */
329 if (stat & TSI148_LCSR_INTS_VERRS)
330 serviced |= tsi148_VERR_irqhandler();
331
332 /* IACK irq */
333 if (stat & TSI148_LCSR_INTS_IACKS)
334 serviced |= tsi148_IACK_irqhandler();
335
336 /* VME bus irqs */
337 if (stat & (TSI148_LCSR_INTS_IRQ7S | TSI148_LCSR_INTS_IRQ6S |
338 TSI148_LCSR_INTS_IRQ5S | TSI148_LCSR_INTS_IRQ4S |
339 TSI148_LCSR_INTS_IRQ3S | TSI148_LCSR_INTS_IRQ2S |
340 TSI148_LCSR_INTS_IRQ1S))
341 serviced |= tsi148_VIRQ_irqhandler(stat);
342
343 /* Clear serviced interrupts */
344 iowrite32be(serviced, tsi148_bridge->base + TSI148_LCSR_INTC);
345
346 return IRQ_HANDLED;
347}
348
349static int tsi148_irq_init(struct vme_bridge *bridge)
350{
351 int result;
352 unsigned int tmp;
353 struct pci_dev *pdev;
354
355 /* Need pdev */
356 pdev = container_of(bridge->parent, struct pci_dev, dev);
357
358 /* Initialise list for VME bus errors */
359 INIT_LIST_HEAD(&(bridge->vme_errors));
360
361 result = request_irq(pdev->irq,
362 tsi148_irqhandler,
363 IRQF_SHARED,
364 driver_name, pdev);
365 if (result) {
366 dev_err(&pdev->dev, "Can't get assigned pci irq vector %02X\n",
367 pdev->irq);
368 return result;
369 }
370
371 /* Enable and unmask interrupts */
372 tmp = TSI148_LCSR_INTEO_DMA1EO | TSI148_LCSR_INTEO_DMA0EO |
373 TSI148_LCSR_INTEO_MB3EO | TSI148_LCSR_INTEO_MB2EO |
374 TSI148_LCSR_INTEO_MB1EO | TSI148_LCSR_INTEO_MB0EO |
375 TSI148_LCSR_INTEO_PERREO | TSI148_LCSR_INTEO_VERREO |
376 TSI148_LCSR_INTEO_IACKEO;
377
378 /* XXX This leaves the following interrupts masked.
379 * TSI148_LCSR_INTEO_VIEEO
380 * TSI148_LCSR_INTEO_SYSFLEO
381 * TSI148_LCSR_INTEO_ACFLEO
382 */
383
384 /* Don't enable Location Monitor interrupts here - they will be
385 * enabled when the location monitors are properly configured and
386 * a callback has been attached.
387 * TSI148_LCSR_INTEO_LM0EO
388 * TSI148_LCSR_INTEO_LM1EO
389 * TSI148_LCSR_INTEO_LM2EO
390 * TSI148_LCSR_INTEO_LM3EO
391 */
392
393 /* Don't enable VME interrupts until we add a handler, else the board
394 * will respond to it and we don't want that unless it knows how to
395 * properly deal with it.
396 * TSI148_LCSR_INTEO_IRQ7EO
397 * TSI148_LCSR_INTEO_IRQ6EO
398 * TSI148_LCSR_INTEO_IRQ5EO
399 * TSI148_LCSR_INTEO_IRQ4EO
400 * TSI148_LCSR_INTEO_IRQ3EO
401 * TSI148_LCSR_INTEO_IRQ2EO
402 * TSI148_LCSR_INTEO_IRQ1EO
403 */
404
405 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEO);
406 iowrite32be(tmp, bridge->base + TSI148_LCSR_INTEN);
407
408 return 0;
409}
410
411static void tsi148_irq_exit(struct pci_dev *pdev)
412{
413 /* Turn off interrupts */
414 iowrite32be(0x0, tsi148_bridge->base + TSI148_LCSR_INTEO);
415 iowrite32be(0x0, tsi148_bridge->base + TSI148_LCSR_INTEN);
416
417 /* Clear all interrupts */
418 iowrite32be(0xFFFFFFFF, tsi148_bridge->base + TSI148_LCSR_INTC);
419
420 /* Detach interrupt handler */
421 free_irq(pdev->irq, pdev);
422}
423
424/*
425 * Check to see if an IACk has been received, return true (1) or false (0).
426 */
427int tsi148_iack_received(void)
428{
429 u32 tmp;
430
431 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_VICR);
432
433 if (tmp & TSI148_LCSR_VICR_IRQS)
434 return 0;
435 else
436 return 1;
437}
438
439/*
440 * Set up an VME interrupt
441 */
442int tsi148_request_irq(int level, int statid,
443 void (*callback)(int level, int vector, void *priv_data),
444 void *priv_data)
445{
446 u32 tmp;
447
448 /* Get semaphore */
449 down(&(vme_irq));
450
451 if(tsi148_bridge->irq[level - 1].callback[statid].func) {
452 up(&(vme_irq));
453 printk("VME Interrupt already taken\n");
454 return -EBUSY;
455 }
456
457
458 tsi148_bridge->irq[level - 1].count++;
459 tsi148_bridge->irq[level - 1].callback[statid].priv_data = priv_data;
460 tsi148_bridge->irq[level - 1].callback[statid].func = callback;
461
462 /* Enable IRQ level */
463 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEO);
464 tmp |= TSI148_LCSR_INTEO_IRQEO[level - 1];
465 iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_INTEO);
466
467 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEN);
468 tmp |= TSI148_LCSR_INTEN_IRQEN[level - 1];
469 iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_INTEN);
470
471 /* Release semaphore */
472 up(&(vme_irq));
473
474 return 0;
475}
476
477/*
478 * Free VME interrupt
479 */
480void tsi148_free_irq(int level, int statid)
481{
482 u32 tmp;
483
484 /* Get semaphore */
485 down(&(vme_irq));
486
487 tsi148_bridge->irq[level - 1].callback[statid].func = NULL;
488 tsi148_bridge->irq[level - 1].callback[statid].priv_data = NULL;
489 tsi148_bridge->irq[level - 1].count--;
490
491 /* Disable IRQ level */
492 if (tsi148_bridge->irq[level - 1].count == 0) {
493 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEO);
494 tmp &= ~TSI148_LCSR_INTEO_IRQEO[level - 1];
495 iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_INTEO);
496
497 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEN);
498 tmp &= ~TSI148_LCSR_INTEN_IRQEN[level - 1];
499 iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_INTEN);
500 }
501
502 /* Release semaphore */
503 up(&(vme_irq));
504}
505
506/*
507 * Generate a VME bus interrupt at the requested level & vector. Wait for
508 * interrupt to be acked.
509 *
510 * Only one interrupt can be generated at a time - so add a semaphore.
511 */
512int tsi148_generate_irq(int level, int statid)
513{
514 u32 tmp;
515
516 /* Get semaphore */
517 down(&(vme_int));
518
519 /* Read VICR register */
520 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_VICR);
521
522 /* Set Status/ID */
523 tmp = (tmp & ~TSI148_LCSR_VICR_STID_M) |
524 (statid & TSI148_LCSR_VICR_STID_M);
525 iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_VICR);
526
527 /* Assert VMEbus IRQ */
528 tmp = tmp | TSI148_LCSR_VICR_IRQL[level];
529 iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_VICR);
530
531 /* XXX Consider implementing a timeout? */
532 wait_event_interruptible(iack_queue, tsi148_iack_received());
533
534 /* Release semaphore */
535 up(&(vme_int));
536
537 return 0;
538}
539
540/*
541 * Find the first error in this address range
542 */
543static struct vme_bus_error *tsi148_find_error(vme_address_t aspace,
544 unsigned long long address, size_t count)
545{
546 struct list_head *err_pos;
547 struct vme_bus_error *vme_err, *valid = NULL;
548 unsigned long long bound;
549
550 bound = address + count;
551
552 /*
553 * XXX We are currently not looking at the address space when parsing
554 * for errors. This is because parsing the Address Modifier Codes
555 * is going to be quite resource intensive to do properly. We
556 * should be OK just looking at the addresses and this is certainly
557 * much better than what we had before.
558 */
559 err_pos = NULL;
560 /* Iterate through errors */
561 list_for_each(err_pos, &(tsi148_bridge->vme_errors)) {
562 vme_err = list_entry(err_pos, struct vme_bus_error, list);
563 if((vme_err->address >= address) && (vme_err->address < bound)){
564 valid = vme_err;
565 break;
566 }
567 }
568
569 return valid;
570}
571
572/*
573 * Clear errors in the provided address range.
574 */
575static void tsi148_clear_errors(vme_address_t aspace,
576 unsigned long long address, size_t count)
577{
578 struct list_head *err_pos, *temp;
579 struct vme_bus_error *vme_err;
580 unsigned long long bound;
581
582 bound = address + count;
583
584 /*
585 * XXX We are currently not looking at the address space when parsing
586 * for errors. This is because parsing the Address Modifier Codes
587 * is going to be quite resource intensive to do properly. We
588 * should be OK just looking at the addresses and this is certainly
589 * much better than what we had before.
590 */
591 err_pos = NULL;
592 /* Iterate through errors */
593 list_for_each_safe(err_pos, temp, &(tsi148_bridge->vme_errors)) {
594 vme_err = list_entry(err_pos, struct vme_bus_error, list);
595
596 if((vme_err->address >= address) && (vme_err->address < bound)){
597 list_del(err_pos);
598 kfree(vme_err);
599 }
600 }
601}
602
603/*
604 * Initialize a slave window with the requested attributes.
605 */
606int tsi148_slave_set(struct vme_slave_resource *image, int enabled,
607 unsigned long long vme_base, unsigned long long size,
608 dma_addr_t pci_base, vme_address_t aspace, vme_cycle_t cycle)
609{
610 unsigned int i, addr = 0, granularity = 0;
611 unsigned int temp_ctl = 0;
612 unsigned int vme_base_low, vme_base_high;
613 unsigned int vme_bound_low, vme_bound_high;
614 unsigned int pci_offset_low, pci_offset_high;
615 unsigned long long vme_bound, pci_offset;
616
617#if 0
618 printk("Set slave image %d to:\n", image->number);
619 printk("\tEnabled: %s\n", (enabled == 1)? "yes" : "no");
620 printk("\tVME Base:0x%llx\n", vme_base);
621 printk("\tWindow Size:0x%llx\n", size);
622 printk("\tPCI Base:0x%lx\n", (unsigned long)pci_base);
623 printk("\tAddress Space:0x%x\n", aspace);
624 printk("\tTransfer Cycle Properties:0x%x\n", cycle);
625#endif
626
627 i = image->number;
628
629 switch (aspace) {
630 case VME_A16:
631 granularity = 0x10;
632 addr |= TSI148_LCSR_ITAT_AS_A16;
633 break;
634 case VME_A24:
635 granularity = 0x1000;
636 addr |= TSI148_LCSR_ITAT_AS_A24;
637 break;
638 case VME_A32:
639 granularity = 0x10000;
640 addr |= TSI148_LCSR_ITAT_AS_A32;
641 break;
642 case VME_A64:
643 granularity = 0x10000;
644 addr |= TSI148_LCSR_ITAT_AS_A64;
645 break;
646 case VME_CRCSR:
647 case VME_USER1:
648 case VME_USER2:
649 case VME_USER3:
650 case VME_USER4:
651 default:
652 printk("Invalid address space\n");
653 return -EINVAL;
654 break;
655 }
656
657 /* Convert 64-bit variables to 2x 32-bit variables */
658 reg_split(vme_base, &vme_base_high, &vme_base_low);
659
660 /*
661 * Bound address is a valid address for the window, adjust
662 * accordingly
663 */
664 vme_bound = vme_base + size - granularity;
665 reg_split(vme_bound, &vme_bound_high, &vme_bound_low);
666 pci_offset = (unsigned long long)pci_base - vme_base;
667 reg_split(pci_offset, &pci_offset_high, &pci_offset_low);
668
669 if (vme_base_low & (granularity - 1)) {
670 printk("Invalid VME base alignment\n");
671 return -EINVAL;
672 }
673 if (vme_bound_low & (granularity - 1)) {
674 printk("Invalid VME bound alignment\n");
675 return -EINVAL;
676 }
677 if (pci_offset_low & (granularity - 1)) {
678 printk("Invalid PCI Offset alignment\n");
679 return -EINVAL;
680 }
681
682#if 0
683 printk("\tVME Bound:0x%llx\n", vme_bound);
684 printk("\tPCI Offset:0x%llx\n", pci_offset);
685#endif
686
687 /* Disable while we are mucking around */
688 temp_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] +
689 TSI148_LCSR_OFFSET_ITAT);
690 temp_ctl &= ~TSI148_LCSR_ITAT_EN;
691 iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_IT[i] +
692 TSI148_LCSR_OFFSET_ITAT);
693
694 /* Setup mapping */
695 iowrite32be(vme_base_high, tsi148_bridge->base + TSI148_LCSR_IT[i] +
696 TSI148_LCSR_OFFSET_ITSAU);
697 iowrite32be(vme_base_low, tsi148_bridge->base + TSI148_LCSR_IT[i] +
698 TSI148_LCSR_OFFSET_ITSAL);
699 iowrite32be(vme_bound_high, tsi148_bridge->base + TSI148_LCSR_IT[i] +
700 TSI148_LCSR_OFFSET_ITEAU);
701 iowrite32be(vme_bound_low, tsi148_bridge->base + TSI148_LCSR_IT[i] +
702 TSI148_LCSR_OFFSET_ITEAL);
703 iowrite32be(pci_offset_high, tsi148_bridge->base + TSI148_LCSR_IT[i] +
704 TSI148_LCSR_OFFSET_ITOFU);
705 iowrite32be(pci_offset_low, tsi148_bridge->base + TSI148_LCSR_IT[i] +
706 TSI148_LCSR_OFFSET_ITOFL);
707
708/* XXX Prefetch stuff currently unsupported */
709#if 0
710
711 for (x = 0; x < 4; x++) {
712 if ((64 << x) >= vmeIn->prefetchSize) {
713 break;
714 }
715 }
716 if (x == 4)
717 x--;
718 temp_ctl |= (x << 16);
719
720 if (vmeIn->prefetchThreshold)
721 if (vmeIn->prefetchThreshold)
722 temp_ctl |= 0x40000;
723#endif
724
725 /* Setup 2eSST speeds */
726 temp_ctl &= ~TSI148_LCSR_ITAT_2eSSTM_M;
727 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
728 case VME_2eSST160:
729 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_160;
730 break;
731 case VME_2eSST267:
732 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_267;
733 break;
734 case VME_2eSST320:
735 temp_ctl |= TSI148_LCSR_ITAT_2eSSTM_320;
736 break;
737 }
738
739 /* Setup cycle types */
740 temp_ctl &= ~(0x1F << 7);
741 if (cycle & VME_BLT)
742 temp_ctl |= TSI148_LCSR_ITAT_BLT;
743 if (cycle & VME_MBLT)
744 temp_ctl |= TSI148_LCSR_ITAT_MBLT;
745 if (cycle & VME_2eVME)
746 temp_ctl |= TSI148_LCSR_ITAT_2eVME;
747 if (cycle & VME_2eSST)
748 temp_ctl |= TSI148_LCSR_ITAT_2eSST;
749 if (cycle & VME_2eSSTB)
750 temp_ctl |= TSI148_LCSR_ITAT_2eSSTB;
751
752 /* Setup address space */
753 temp_ctl &= ~TSI148_LCSR_ITAT_AS_M;
754 temp_ctl |= addr;
755
756 temp_ctl &= ~0xF;
757 if (cycle & VME_SUPER)
758 temp_ctl |= TSI148_LCSR_ITAT_SUPR ;
759 if (cycle & VME_USER)
760 temp_ctl |= TSI148_LCSR_ITAT_NPRIV;
761 if (cycle & VME_PROG)
762 temp_ctl |= TSI148_LCSR_ITAT_PGM;
763 if (cycle & VME_DATA)
764 temp_ctl |= TSI148_LCSR_ITAT_DATA;
765
766 /* Write ctl reg without enable */
767 iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_IT[i] +
768 TSI148_LCSR_OFFSET_ITAT);
769
770 if (enabled)
771 temp_ctl |= TSI148_LCSR_ITAT_EN;
772
773 iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_IT[i] +
774 TSI148_LCSR_OFFSET_ITAT);
775
776 return 0;
777}
778
779/*
780 * Get slave window configuration.
781 *
782 * XXX Prefetch currently unsupported.
783 */
784int tsi148_slave_get(struct vme_slave_resource *image, int *enabled,
785 unsigned long long *vme_base, unsigned long long *size,
786 dma_addr_t *pci_base, vme_address_t *aspace, vme_cycle_t *cycle)
787{
788 unsigned int i, granularity = 0, ctl = 0;
789 unsigned int vme_base_low, vme_base_high;
790 unsigned int vme_bound_low, vme_bound_high;
791 unsigned int pci_offset_low, pci_offset_high;
792 unsigned long long vme_bound, pci_offset;
793
794
795 i = image->number;
796
797 /* Read registers */
798 ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] +
799 TSI148_LCSR_OFFSET_ITAT);
800
801 vme_base_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] +
802 TSI148_LCSR_OFFSET_ITSAU);
803 vme_base_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] +
804 TSI148_LCSR_OFFSET_ITSAL);
805 vme_bound_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] +
806 TSI148_LCSR_OFFSET_ITEAU);
807 vme_bound_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] +
808 TSI148_LCSR_OFFSET_ITEAL);
809 pci_offset_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] +
810 TSI148_LCSR_OFFSET_ITOFU);
811 pci_offset_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_IT[i] +
812 TSI148_LCSR_OFFSET_ITOFL);
813
814 /* Convert 64-bit variables to 2x 32-bit variables */
815 reg_join(vme_base_high, vme_base_low, vme_base);
816 reg_join(vme_bound_high, vme_bound_low, &vme_bound);
817 reg_join(pci_offset_high, pci_offset_low, &pci_offset);
818
819 *pci_base = (dma_addr_t)vme_base + pci_offset;
820
821 *enabled = 0;
822 *aspace = 0;
823 *cycle = 0;
824
825 if (ctl & TSI148_LCSR_ITAT_EN)
826 *enabled = 1;
827
828 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A16) {
829 granularity = 0x10;
830 *aspace |= VME_A16;
831 }
832 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A24) {
833 granularity = 0x1000;
834 *aspace |= VME_A24;
835 }
836 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A32) {
837 granularity = 0x10000;
838 *aspace |= VME_A32;
839 }
840 if ((ctl & TSI148_LCSR_ITAT_AS_M) == TSI148_LCSR_ITAT_AS_A64) {
841 granularity = 0x10000;
842 *aspace |= VME_A64;
843 }
844
845 /* Need granularity before we set the size */
846 *size = (unsigned long long)((vme_bound - *vme_base) + granularity);
847
848
849 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_160)
850 *cycle |= VME_2eSST160;
851 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_267)
852 *cycle |= VME_2eSST267;
853 if ((ctl & TSI148_LCSR_ITAT_2eSSTM_M) == TSI148_LCSR_ITAT_2eSSTM_320)
854 *cycle |= VME_2eSST320;
855
856 if (ctl & TSI148_LCSR_ITAT_BLT)
857 *cycle |= VME_BLT;
858 if (ctl & TSI148_LCSR_ITAT_MBLT)
859 *cycle |= VME_MBLT;
860 if (ctl & TSI148_LCSR_ITAT_2eVME)
861 *cycle |= VME_2eVME;
862 if (ctl & TSI148_LCSR_ITAT_2eSST)
863 *cycle |= VME_2eSST;
864 if (ctl & TSI148_LCSR_ITAT_2eSSTB)
865 *cycle |= VME_2eSSTB;
866
867 if (ctl & TSI148_LCSR_ITAT_SUPR)
868 *cycle |= VME_SUPER;
869 if (ctl & TSI148_LCSR_ITAT_NPRIV)
870 *cycle |= VME_USER;
871 if (ctl & TSI148_LCSR_ITAT_PGM)
872 *cycle |= VME_PROG;
873 if (ctl & TSI148_LCSR_ITAT_DATA)
874 *cycle |= VME_DATA;
875
876 return 0;
877}
878
879/*
880 * Allocate and map PCI Resource
881 */
882static int tsi148_alloc_resource(struct vme_master_resource *image,
883 unsigned long long size)
884{
885 unsigned long long existing_size;
886 int retval = 0;
887 struct pci_dev *pdev;
888
889 /* Find pci_dev container of dev */
890 if (tsi148_bridge->parent == NULL) {
891 printk("Dev entry NULL\n");
892 return -EINVAL;
893 }
894 pdev = container_of(tsi148_bridge->parent, struct pci_dev, dev);
895
896 existing_size = (unsigned long long)(image->pci_resource.end -
897 image->pci_resource.start);
898
899 /* If the existing size is OK, return */
900 if (existing_size == (size - 1))
901 return 0;
902
903 if (existing_size != 0) {
904 iounmap(image->kern_base);
905 image->kern_base = NULL;
906 if (image->pci_resource.name != NULL)
907 kfree(image->pci_resource.name);
908 release_resource(&(image->pci_resource));
909 memset(&(image->pci_resource), 0, sizeof(struct resource));
910 }
911
912 if (image->pci_resource.name == NULL) {
913 image->pci_resource.name = kmalloc(VMENAMSIZ+3, GFP_KERNEL);
914 if (image->pci_resource.name == NULL) {
915 printk(KERN_ERR "Unable to allocate memory for resource"
916 " name\n");
917 retval = -ENOMEM;
918 goto err_name;
919 }
920 }
921
922 sprintf((char *)image->pci_resource.name, "%s.%d", tsi148_bridge->name,
923 image->number);
924
925 image->pci_resource.start = 0;
926 image->pci_resource.end = (unsigned long)size;
927 image->pci_resource.flags = IORESOURCE_MEM;
928
929 retval = pci_bus_alloc_resource(pdev->bus,
930 &(image->pci_resource), size, size, PCIBIOS_MIN_MEM,
931 0, NULL, NULL);
932 if (retval) {
933 printk(KERN_ERR "Failed to allocate mem resource for "
934 "window %d size 0x%lx start 0x%lx\n",
935 image->number, (unsigned long)size,
936 (unsigned long)image->pci_resource.start);
937 goto err_resource;
938 }
939
940 image->kern_base = ioremap_nocache(
941 image->pci_resource.start, size);
942 if (image->kern_base == NULL) {
943 printk(KERN_ERR "Failed to remap resource\n");
944 retval = -ENOMEM;
945 goto err_remap;
946 }
947
948 return 0;
949
950 iounmap(image->kern_base);
951 image->kern_base = NULL;
952err_remap:
953 release_resource(&(image->pci_resource));
954err_resource:
955 kfree(image->pci_resource.name);
956 memset(&(image->pci_resource), 0, sizeof(struct resource));
957err_name:
958 return retval;
959}
960
961/*
962 * Free and unmap PCI Resource
963 */
964static void tsi148_free_resource(struct vme_master_resource *image)
965{
966 iounmap(image->kern_base);
967 image->kern_base = NULL;
968 release_resource(&(image->pci_resource));
969 kfree(image->pci_resource.name);
970 memset(&(image->pci_resource), 0, sizeof(struct resource));
971}
972
973/*
974 * Set the attributes of an outbound window.
975 */
976int tsi148_master_set( struct vme_master_resource *image, int enabled,
977 unsigned long long vme_base, unsigned long long size,
978 vme_address_t aspace, vme_cycle_t cycle, vme_width_t dwidth)
979{
980 int retval = 0;
981 unsigned int i;
982 unsigned int temp_ctl = 0;
983 unsigned int pci_base_low, pci_base_high;
984 unsigned int pci_bound_low, pci_bound_high;
985 unsigned int vme_offset_low, vme_offset_high;
986 unsigned long long pci_bound, vme_offset, pci_base;
987
988 /* Verify input data */
989 if (vme_base & 0xFFFF) {
990 printk("Invalid VME Window alignment\n");
991 retval = -EINVAL;
992 goto err_window;
993 }
994 if (size < 0x10000) {
995 printk("Invalid VME Window size\n");
996 retval = -EINVAL;
997 goto err_window;
998 }
999
1000 spin_lock(&(image->lock));
1001
1002 /* Let's allocate the resource here rather than further up the stack as
1003 * it avoids pushing loads of bus dependant stuff up the stack
1004 */
1005 retval = tsi148_alloc_resource(image, size);
1006 if (retval) {
1007 spin_unlock(&(image->lock));
1008 printk(KERN_ERR "Unable to allocate memory for resource "
1009 "name\n");
1010 retval = -ENOMEM;
1011 goto err_res;
1012 }
1013
1014 pci_base = (unsigned long long)image->pci_resource.start;
1015
1016
1017 /*
1018 * Bound address is a valid address for the window, adjust
1019 * according to window granularity.
1020 */
1021 pci_bound = pci_base + (size - 0x10000);
1022 vme_offset = vme_base - pci_base;
1023
1024 /* Convert 64-bit variables to 2x 32-bit variables */
1025 reg_split(pci_base, &pci_base_high, &pci_base_low);
1026 reg_split(pci_bound, &pci_bound_high, &pci_bound_low);
1027 reg_split(vme_offset, &vme_offset_high, &vme_offset_low);
1028
1029 if (pci_base_low & 0xFFFF) {
1030 spin_unlock(&(image->lock));
1031 printk("Invalid PCI base alignment\n");
1032 retval = -EINVAL;
1033 goto err_gran;
1034 }
1035 if (pci_bound_low & 0xFFFF) {
1036 spin_unlock(&(image->lock));
1037 printk("Invalid PCI bound alignment\n");
1038 retval = -EINVAL;
1039 goto err_gran;
1040 }
1041 if (vme_offset_low & 0xFFFF) {
1042 spin_unlock(&(image->lock));
1043 printk("Invalid VME Offset alignment\n");
1044 retval = -EINVAL;
1045 goto err_gran;
1046 }
1047
1048 i = image->number;
1049
1050 /* Disable while we are mucking around */
1051 temp_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
1052 TSI148_LCSR_OFFSET_OTAT);
1053 temp_ctl &= ~TSI148_LCSR_OTAT_EN;
1054 iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_OT[i] +
1055 TSI148_LCSR_OFFSET_OTAT);
1056
1057/* XXX Prefetch stuff currently unsupported */
1058#if 0
1059 if (vmeOut->prefetchEnable) {
1060 temp_ctl |= 0x40000;
1061 for (x = 0; x < 4; x++) {
1062 if ((2 << x) >= vmeOut->prefetchSize)
1063 break;
1064 }
1065 if (x == 4)
1066 x = 3;
1067 temp_ctl |= (x << 16);
1068 }
1069#endif
1070
1071 /* Setup 2eSST speeds */
1072 temp_ctl &= ~TSI148_LCSR_OTAT_2eSSTM_M;
1073 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1074 case VME_2eSST160:
1075 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_160;
1076 break;
1077 case VME_2eSST267:
1078 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_267;
1079 break;
1080 case VME_2eSST320:
1081 temp_ctl |= TSI148_LCSR_OTAT_2eSSTM_320;
1082 break;
1083 }
1084
1085 /* Setup cycle types */
1086 if (cycle & VME_BLT) {
1087 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1088 temp_ctl |= TSI148_LCSR_OTAT_TM_BLT;
1089 }
1090 if (cycle & VME_MBLT) {
1091 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1092 temp_ctl |= TSI148_LCSR_OTAT_TM_MBLT;
1093 }
1094 if (cycle & VME_2eVME) {
1095 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1096 temp_ctl |= TSI148_LCSR_OTAT_TM_2eVME;
1097 }
1098 if (cycle & VME_2eSST) {
1099 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1100 temp_ctl |= TSI148_LCSR_OTAT_TM_2eSST;
1101 }
1102 if (cycle & VME_2eSSTB) {
1103 printk("Currently not setting Broadcast Select Registers\n");
1104 temp_ctl &= ~TSI148_LCSR_OTAT_TM_M;
1105 temp_ctl |= TSI148_LCSR_OTAT_TM_2eSSTB;
1106 }
1107
1108 /* Setup data width */
1109 temp_ctl &= ~TSI148_LCSR_OTAT_DBW_M;
1110 switch (dwidth) {
1111 case VME_D16:
1112 temp_ctl |= TSI148_LCSR_OTAT_DBW_16;
1113 break;
1114 case VME_D32:
1115 temp_ctl |= TSI148_LCSR_OTAT_DBW_32;
1116 break;
1117 default:
1118 spin_unlock(&(image->lock));
1119 printk("Invalid data width\n");
1120 retval = -EINVAL;
1121 goto err_dwidth;
1122 }
1123
1124 /* Setup address space */
1125 temp_ctl &= ~TSI148_LCSR_OTAT_AMODE_M;
1126 switch (aspace) {
1127 case VME_A16:
1128 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A16;
1129 break;
1130 case VME_A24:
1131 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A24;
1132 break;
1133 case VME_A32:
1134 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A32;
1135 break;
1136 case VME_A64:
1137 temp_ctl |= TSI148_LCSR_OTAT_AMODE_A64;
1138 break;
1139 case VME_CRCSR:
1140 temp_ctl |= TSI148_LCSR_OTAT_AMODE_CRCSR;
1141 break;
1142 case VME_USER1:
1143 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER1;
1144 break;
1145 case VME_USER2:
1146 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER2;
1147 break;
1148 case VME_USER3:
1149 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER3;
1150 break;
1151 case VME_USER4:
1152 temp_ctl |= TSI148_LCSR_OTAT_AMODE_USER4;
1153 break;
1154 default:
1155 spin_unlock(&(image->lock));
1156 printk("Invalid address space\n");
1157 retval = -EINVAL;
1158 goto err_aspace;
1159 break;
1160 }
1161
1162 temp_ctl &= ~(3<<4);
1163 if (cycle & VME_SUPER)
1164 temp_ctl |= TSI148_LCSR_OTAT_SUP;
1165 if (cycle & VME_PROG)
1166 temp_ctl |= TSI148_LCSR_OTAT_PGM;
1167
1168 /* Setup mapping */
1169 iowrite32be(pci_base_high, tsi148_bridge->base + TSI148_LCSR_OT[i] +
1170 TSI148_LCSR_OFFSET_OTSAU);
1171 iowrite32be(pci_base_low, tsi148_bridge->base + TSI148_LCSR_OT[i] +
1172 TSI148_LCSR_OFFSET_OTSAL);
1173 iowrite32be(pci_bound_high, tsi148_bridge->base + TSI148_LCSR_OT[i] +
1174 TSI148_LCSR_OFFSET_OTEAU);
1175 iowrite32be(pci_bound_low, tsi148_bridge->base + TSI148_LCSR_OT[i] +
1176 TSI148_LCSR_OFFSET_OTEAL);
1177 iowrite32be(vme_offset_high, tsi148_bridge->base + TSI148_LCSR_OT[i] +
1178 TSI148_LCSR_OFFSET_OTOFU);
1179 iowrite32be(vme_offset_low, tsi148_bridge->base + TSI148_LCSR_OT[i] +
1180 TSI148_LCSR_OFFSET_OTOFL);
1181
1182/* XXX We need to deal with OTBS */
1183#if 0
1184 iowrite32be(vmeOut->bcastSelect2esst, tsi148_bridge->base +
1185 TSI148_LCSR_OT[i] + TSI148_LCSR_OFFSET_OTBS);
1186#endif
1187
1188 /* Write ctl reg without enable */
1189 iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_OT[i] +
1190 TSI148_LCSR_OFFSET_OTAT);
1191
1192 if (enabled)
1193 temp_ctl |= TSI148_LCSR_OTAT_EN;
1194
1195 iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_OT[i] +
1196 TSI148_LCSR_OFFSET_OTAT);
1197
1198 spin_unlock(&(image->lock));
1199 return 0;
1200
1201err_aspace:
1202err_dwidth:
1203err_gran:
1204 tsi148_free_resource(image);
1205err_res:
1206err_window:
1207 return retval;
1208
1209}
1210
1211/*
1212 * Set the attributes of an outbound window.
1213 *
1214 * XXX Not parsing prefetch information.
1215 */
1216int __tsi148_master_get( struct vme_master_resource *image, int *enabled,
1217 unsigned long long *vme_base, unsigned long long *size,
1218 vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
1219{
1220 unsigned int i, ctl;
1221 unsigned int pci_base_low, pci_base_high;
1222 unsigned int pci_bound_low, pci_bound_high;
1223 unsigned int vme_offset_low, vme_offset_high;
1224
1225 unsigned long long pci_base, pci_bound, vme_offset;
1226
1227 i = image->number;
1228
1229 ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
1230 TSI148_LCSR_OFFSET_OTAT);
1231
1232 pci_base_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
1233 TSI148_LCSR_OFFSET_OTSAU);
1234 pci_base_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
1235 TSI148_LCSR_OFFSET_OTSAL);
1236 pci_bound_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
1237 TSI148_LCSR_OFFSET_OTEAU);
1238 pci_bound_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
1239 TSI148_LCSR_OFFSET_OTEAL);
1240 vme_offset_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
1241 TSI148_LCSR_OFFSET_OTOFU);
1242 vme_offset_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
1243 TSI148_LCSR_OFFSET_OTOFL);
1244
1245 /* Convert 64-bit variables to 2x 32-bit variables */
1246 reg_join(pci_base_high, pci_base_low, &pci_base);
1247 reg_join(pci_bound_high, pci_bound_low, &pci_bound);
1248 reg_join(vme_offset_high, vme_offset_low, &vme_offset);
1249
1250 *vme_base = pci_base + vme_offset;
1251 *size = (unsigned long long)(pci_bound - pci_base) + 0x10000;
1252
1253 *enabled = 0;
1254 *aspace = 0;
1255 *cycle = 0;
1256 *dwidth = 0;
1257
1258 if (ctl & TSI148_LCSR_OTAT_EN)
1259 *enabled = 1;
1260
1261 /* Setup address space */
1262 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A16)
1263 *aspace |= VME_A16;
1264 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A24)
1265 *aspace |= VME_A24;
1266 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A32)
1267 *aspace |= VME_A32;
1268 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_A64)
1269 *aspace |= VME_A64;
1270 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_CRCSR)
1271 *aspace |= VME_CRCSR;
1272 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER1)
1273 *aspace |= VME_USER1;
1274 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER2)
1275 *aspace |= VME_USER2;
1276 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER3)
1277 *aspace |= VME_USER3;
1278 if ((ctl & TSI148_LCSR_OTAT_AMODE_M) == TSI148_LCSR_OTAT_AMODE_USER4)
1279 *aspace |= VME_USER4;
1280
1281 /* Setup 2eSST speeds */
1282 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_160)
1283 *cycle |= VME_2eSST160;
1284 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_267)
1285 *cycle |= VME_2eSST267;
1286 if ((ctl & TSI148_LCSR_OTAT_2eSSTM_M) == TSI148_LCSR_OTAT_2eSSTM_320)
1287 *cycle |= VME_2eSST320;
1288
1289 /* Setup cycle types */
1290 if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_SCT)
1291 *cycle |= VME_SCT;
1292 if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_BLT)
1293 *cycle |= VME_BLT;
1294 if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_MBLT)
1295 *cycle |= VME_MBLT;
1296 if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_2eVME)
1297 *cycle |= VME_2eVME;
1298 if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_2eSST)
1299 *cycle |= VME_2eSST;
1300 if ((ctl & TSI148_LCSR_OTAT_TM_M ) == TSI148_LCSR_OTAT_TM_2eSSTB)
1301 *cycle |= VME_2eSSTB;
1302
1303 if (ctl & TSI148_LCSR_OTAT_SUP)
1304 *cycle |= VME_SUPER;
1305 else
1306 *cycle |= VME_USER;
1307
1308 if (ctl & TSI148_LCSR_OTAT_PGM)
1309 *cycle |= VME_PROG;
1310 else
1311 *cycle |= VME_DATA;
1312
1313 /* Setup data width */
1314 if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_16)
1315 *dwidth = VME_D16;
1316 if ((ctl & TSI148_LCSR_OTAT_DBW_M) == TSI148_LCSR_OTAT_DBW_32)
1317 *dwidth = VME_D32;
1318
1319 return 0;
1320}
1321
1322
1323int tsi148_master_get( struct vme_master_resource *image, int *enabled,
1324 unsigned long long *vme_base, unsigned long long *size,
1325 vme_address_t *aspace, vme_cycle_t *cycle, vme_width_t *dwidth)
1326{
1327 int retval;
1328
1329 spin_lock(&(image->lock));
1330
1331 retval = __tsi148_master_get(image, enabled, vme_base, size, aspace,
1332 cycle, dwidth);
1333
1334 spin_unlock(&(image->lock));
1335
1336 return retval;
1337}
1338
1339ssize_t tsi148_master_read(struct vme_master_resource *image, void *buf,
1340 size_t count, loff_t offset)
1341{
1342 int retval, enabled;
1343 unsigned long long vme_base, size;
1344 vme_address_t aspace;
1345 vme_cycle_t cycle;
1346 vme_width_t dwidth;
1347 struct vme_bus_error *vme_err = NULL;
1348
1349 spin_lock(&(image->lock));
1350
1351 memcpy_fromio(buf, image->kern_base + offset, (unsigned int)count);
1352 retval = count;
1353
1354 if (!err_chk)
1355 goto skip_chk;
1356
1357 __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
1358 &dwidth);
1359
1360 vme_err = tsi148_find_error(aspace, vme_base + offset, count);
1361 if(vme_err != NULL) {
1362 dev_err(image->parent->parent, "First VME read error detected "
1363 "an at address 0x%llx\n", vme_err->address);
1364 retval = vme_err->address - (vme_base + offset);
1365 /* Clear down save errors in this address range */
1366 tsi148_clear_errors(aspace, vme_base + offset, count);
1367 }
1368
1369skip_chk:
1370 spin_unlock(&(image->lock));
1371
1372 return retval;
1373}
1374
1375
1376/* XXX We need to change vme_master_resource->sem to a spinlock so that read
1377 * and write functions can be used in an interrupt context
1378 */
1379ssize_t tsi148_master_write(struct vme_master_resource *image, void *buf,
1380 size_t count, loff_t offset)
1381{
1382 int retval = 0, enabled;
1383 unsigned long long vme_base, size;
1384 vme_address_t aspace;
1385 vme_cycle_t cycle;
1386 vme_width_t dwidth;
1387
1388 struct vme_bus_error *vme_err = NULL;
1389
1390 spin_lock(&(image->lock));
1391
1392 memcpy_toio(image->kern_base + offset, buf, (unsigned int)count);
1393 retval = count;
1394
1395 /*
1396 * Writes are posted. We need to do a read on the VME bus to flush out
1397 * all of the writes before we check for errors. We can't guarentee
1398 * that reading the data we have just written is safe. It is believed
1399 * that there isn't any read, write re-ordering, so we can read any
1400 * location in VME space, so lets read the Device ID from the tsi148's
1401 * own registers as mapped into CR/CSR space.
1402 *
1403 * We check for saved errors in the written address range/space.
1404 */
1405
1406 if (!err_chk)
1407 goto skip_chk;
1408
1409 /*
1410 * Get window info first, to maximise the time that the buffers may
1411 * fluch on their own
1412 */
1413 __tsi148_master_get(image, &enabled, &vme_base, &size, &aspace, &cycle,
1414 &dwidth);
1415
1416 ioread16(flush_image->kern_base + 0x7F000);
1417
1418 vme_err = tsi148_find_error(aspace, vme_base + offset, count);
1419 if(vme_err != NULL) {
1420 printk("First VME write error detected an at address 0x%llx\n",
1421 vme_err->address);
1422 retval = vme_err->address - (vme_base + offset);
1423 /* Clear down save errors in this address range */
1424 tsi148_clear_errors(aspace, vme_base + offset, count);
1425 }
1426
1427skip_chk:
1428 spin_unlock(&(image->lock));
1429
1430 return retval;
1431}
1432
1433/*
1434 * Perform an RMW cycle on the VME bus.
1435 *
1436 * Requires a previously configured master window, returns final value.
1437 */
1438unsigned int tsi148_master_rmw(struct vme_master_resource *image,
1439 unsigned int mask, unsigned int compare, unsigned int swap,
1440 loff_t offset)
1441{
1442 unsigned long long pci_addr;
1443 unsigned int pci_addr_high, pci_addr_low;
1444 u32 tmp, result;
1445 int i;
1446
1447
1448 /* Find the PCI address that maps to the desired VME address */
1449 i = image->number;
1450
1451 /* Locking as we can only do one of these at a time */
1452 down(&(vme_rmw));
1453
1454 /* Lock image */
1455 spin_lock(&(image->lock));
1456
1457 pci_addr_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
1458 TSI148_LCSR_OFFSET_OTSAU);
1459 pci_addr_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_OT[i] +
1460 TSI148_LCSR_OFFSET_OTSAL);
1461
1462 reg_join(pci_addr_high, pci_addr_low, &pci_addr);
1463 reg_split(pci_addr + offset, &pci_addr_high, &pci_addr_low);
1464
1465 /* Configure registers */
1466 iowrite32be(mask, tsi148_bridge->base + TSI148_LCSR_RMWEN);
1467 iowrite32be(compare, tsi148_bridge->base + TSI148_LCSR_RMWC);
1468 iowrite32be(swap, tsi148_bridge->base + TSI148_LCSR_RMWS);
1469 iowrite32be(pci_addr_high, tsi148_bridge->base + TSI148_LCSR_RMWAU);
1470 iowrite32be(pci_addr_low, tsi148_bridge->base + TSI148_LCSR_RMWAL);
1471
1472 /* Enable RMW */
1473 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_VMCTRL);
1474 tmp |= TSI148_LCSR_VMCTRL_RMWEN;
1475 iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_VMCTRL);
1476
1477 /* Kick process off with a read to the required address. */
1478 result = ioread32be(image->kern_base + offset);
1479
1480 /* Disable RMW */
1481 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_VMCTRL);
1482 tmp &= ~TSI148_LCSR_VMCTRL_RMWEN;
1483 iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_VMCTRL);
1484
1485 spin_unlock(&(image->lock));
1486
1487 up(&(vme_rmw));
1488
1489 return result;
1490}
1491
1492static int tsi148_dma_set_vme_src_attributes (u32 *attr, vme_address_t aspace,
1493 vme_cycle_t cycle, vme_width_t dwidth)
1494{
1495 /* Setup 2eSST speeds */
1496 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1497 case VME_2eSST160:
1498 *attr |= TSI148_LCSR_DSAT_2eSSTM_160;
1499 break;
1500 case VME_2eSST267:
1501 *attr |= TSI148_LCSR_DSAT_2eSSTM_267;
1502 break;
1503 case VME_2eSST320:
1504 *attr |= TSI148_LCSR_DSAT_2eSSTM_320;
1505 break;
1506 }
1507
1508 /* Setup cycle types */
1509 if (cycle & VME_SCT) {
1510 *attr |= TSI148_LCSR_DSAT_TM_SCT;
1511 }
1512 if (cycle & VME_BLT) {
1513 *attr |= TSI148_LCSR_DSAT_TM_BLT;
1514 }
1515 if (cycle & VME_MBLT) {
1516 *attr |= TSI148_LCSR_DSAT_TM_MBLT;
1517 }
1518 if (cycle & VME_2eVME) {
1519 *attr |= TSI148_LCSR_DSAT_TM_2eVME;
1520 }
1521 if (cycle & VME_2eSST) {
1522 *attr |= TSI148_LCSR_DSAT_TM_2eSST;
1523 }
1524 if (cycle & VME_2eSSTB) {
1525 printk("Currently not setting Broadcast Select Registers\n");
1526 *attr |= TSI148_LCSR_DSAT_TM_2eSSTB;
1527 }
1528
1529 /* Setup data width */
1530 switch (dwidth) {
1531 case VME_D16:
1532 *attr |= TSI148_LCSR_DSAT_DBW_16;
1533 break;
1534 case VME_D32:
1535 *attr |= TSI148_LCSR_DSAT_DBW_32;
1536 break;
1537 default:
1538 printk("Invalid data width\n");
1539 return -EINVAL;
1540 }
1541
1542 /* Setup address space */
1543 switch (aspace) {
1544 case VME_A16:
1545 *attr |= TSI148_LCSR_DSAT_AMODE_A16;
1546 break;
1547 case VME_A24:
1548 *attr |= TSI148_LCSR_DSAT_AMODE_A24;
1549 break;
1550 case VME_A32:
1551 *attr |= TSI148_LCSR_DSAT_AMODE_A32;
1552 break;
1553 case VME_A64:
1554 *attr |= TSI148_LCSR_DSAT_AMODE_A64;
1555 break;
1556 case VME_CRCSR:
1557 *attr |= TSI148_LCSR_DSAT_AMODE_CRCSR;
1558 break;
1559 case VME_USER1:
1560 *attr |= TSI148_LCSR_DSAT_AMODE_USER1;
1561 break;
1562 case VME_USER2:
1563 *attr |= TSI148_LCSR_DSAT_AMODE_USER2;
1564 break;
1565 case VME_USER3:
1566 *attr |= TSI148_LCSR_DSAT_AMODE_USER3;
1567 break;
1568 case VME_USER4:
1569 *attr |= TSI148_LCSR_DSAT_AMODE_USER4;
1570 break;
1571 default:
1572 printk("Invalid address space\n");
1573 return -EINVAL;
1574 break;
1575 }
1576
1577 if (cycle & VME_SUPER)
1578 *attr |= TSI148_LCSR_DSAT_SUP;
1579 if (cycle & VME_PROG)
1580 *attr |= TSI148_LCSR_DSAT_PGM;
1581
1582 return 0;
1583}
1584
1585static int tsi148_dma_set_vme_dest_attributes(u32 *attr, vme_address_t aspace,
1586 vme_cycle_t cycle, vme_width_t dwidth)
1587{
1588 /* Setup 2eSST speeds */
1589 switch (cycle & (VME_2eSST160 | VME_2eSST267 | VME_2eSST320)) {
1590 case VME_2eSST160:
1591 *attr |= TSI148_LCSR_DDAT_2eSSTM_160;
1592 break;
1593 case VME_2eSST267:
1594 *attr |= TSI148_LCSR_DDAT_2eSSTM_267;
1595 break;
1596 case VME_2eSST320:
1597 *attr |= TSI148_LCSR_DDAT_2eSSTM_320;
1598 break;
1599 }
1600
1601 /* Setup cycle types */
1602 if (cycle & VME_SCT) {
1603 *attr |= TSI148_LCSR_DDAT_TM_SCT;
1604 }
1605 if (cycle & VME_BLT) {
1606 *attr |= TSI148_LCSR_DDAT_TM_BLT;
1607 }
1608 if (cycle & VME_MBLT) {
1609 *attr |= TSI148_LCSR_DDAT_TM_MBLT;
1610 }
1611 if (cycle & VME_2eVME) {
1612 *attr |= TSI148_LCSR_DDAT_TM_2eVME;
1613 }
1614 if (cycle & VME_2eSST) {
1615 *attr |= TSI148_LCSR_DDAT_TM_2eSST;
1616 }
1617 if (cycle & VME_2eSSTB) {
1618 printk("Currently not setting Broadcast Select Registers\n");
1619 *attr |= TSI148_LCSR_DDAT_TM_2eSSTB;
1620 }
1621
1622 /* Setup data width */
1623 switch (dwidth) {
1624 case VME_D16:
1625 *attr |= TSI148_LCSR_DDAT_DBW_16;
1626 break;
1627 case VME_D32:
1628 *attr |= TSI148_LCSR_DDAT_DBW_32;
1629 break;
1630 default:
1631 printk("Invalid data width\n");
1632 return -EINVAL;
1633 }
1634
1635 /* Setup address space */
1636 switch (aspace) {
1637 case VME_A16:
1638 *attr |= TSI148_LCSR_DDAT_AMODE_A16;
1639 break;
1640 case VME_A24:
1641 *attr |= TSI148_LCSR_DDAT_AMODE_A24;
1642 break;
1643 case VME_A32:
1644 *attr |= TSI148_LCSR_DDAT_AMODE_A32;
1645 break;
1646 case VME_A64:
1647 *attr |= TSI148_LCSR_DDAT_AMODE_A64;
1648 break;
1649 case VME_CRCSR:
1650 *attr |= TSI148_LCSR_DDAT_AMODE_CRCSR;
1651 break;
1652 case VME_USER1:
1653 *attr |= TSI148_LCSR_DDAT_AMODE_USER1;
1654 break;
1655 case VME_USER2:
1656 *attr |= TSI148_LCSR_DDAT_AMODE_USER2;
1657 break;
1658 case VME_USER3:
1659 *attr |= TSI148_LCSR_DDAT_AMODE_USER3;
1660 break;
1661 case VME_USER4:
1662 *attr |= TSI148_LCSR_DDAT_AMODE_USER4;
1663 break;
1664 default:
1665 printk("Invalid address space\n");
1666 return -EINVAL;
1667 break;
1668 }
1669
1670 if (cycle & VME_SUPER)
1671 *attr |= TSI148_LCSR_DDAT_SUP;
1672 if (cycle & VME_PROG)
1673 *attr |= TSI148_LCSR_DDAT_PGM;
1674
1675 return 0;
1676}
1677
1678/*
1679 * Add a link list descriptor to the list
1680 *
1681 * XXX Need to handle 2eSST Broadcast select bits
1682 */
1683int tsi148_dma_list_add (struct vme_dma_list *list, struct vme_dma_attr *src,
1684 struct vme_dma_attr *dest, size_t count)
1685{
1686 struct tsi148_dma_entry *entry, *prev;
1687 u32 address_high, address_low;
1688 struct vme_dma_pattern *pattern_attr;
1689 struct vme_dma_pci *pci_attr;
1690 struct vme_dma_vme *vme_attr;
1691 dma_addr_t desc_ptr;
1692 int retval = 0;
1693
1694 /* XXX descriptor must be aligned on 64-bit boundaries */
1695 entry = (struct tsi148_dma_entry *)kmalloc(
1696 sizeof(struct tsi148_dma_entry), GFP_KERNEL);
1697 if (entry == NULL) {
1698 printk("Failed to allocate memory for dma resource "
1699 "structure\n");
1700 retval = -ENOMEM;
1701 goto err_mem;
1702 }
1703
1704 /* Test descriptor alignment */
1705 if ((unsigned long)&(entry->descriptor) & 0x7) {
1706 printk("Descriptor not aligned to 8 byte boundary as "
1707 "required: %p\n", &(entry->descriptor));
1708 retval = -EINVAL;
1709 goto err_align;
1710 }
1711
1712 /* Given we are going to fill out the structure, we probably don't
1713 * need to zero it, but better safe than sorry for now.
1714 */
1715 memset(&(entry->descriptor), 0, sizeof(struct tsi148_dma_descriptor));
1716
1717 /* Fill out source part */
1718 switch (src->type) {
1719 case VME_DMA_PATTERN:
1720 pattern_attr = (struct vme_dma_pattern *)src->private;
1721
1722 entry->descriptor.dsal = pattern_attr->pattern;
1723 entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_PAT;
1724 /* Default behaviour is 32 bit pattern */
1725 if (pattern_attr->type & VME_DMA_PATTERN_BYTE) {
1726 entry->descriptor.dsat |= TSI148_LCSR_DSAT_PSZ;
1727 }
1728 /* It seems that the default behaviour is to increment */
1729 if ((pattern_attr->type & VME_DMA_PATTERN_INCREMENT) == 0) {
1730 entry->descriptor.dsat |= TSI148_LCSR_DSAT_NIN;
1731 }
1732 break;
1733 case VME_DMA_PCI:
1734 pci_attr = (struct vme_dma_pci *)src->private;
1735
1736 reg_split((unsigned long long)pci_attr->address, &address_high,
1737 &address_low);
1738 entry->descriptor.dsau = address_high;
1739 entry->descriptor.dsal = address_low;
1740 entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_PCI;
1741 break;
1742 case VME_DMA_VME:
1743 vme_attr = (struct vme_dma_vme *)src->private;
1744
1745 reg_split((unsigned long long)vme_attr->address, &address_high,
1746 &address_low);
1747 entry->descriptor.dsau = address_high;
1748 entry->descriptor.dsal = address_low;
1749 entry->descriptor.dsat = TSI148_LCSR_DSAT_TYP_VME;
1750
1751 retval = tsi148_dma_set_vme_src_attributes(
1752 &(entry->descriptor.dsat), vme_attr->aspace,
1753 vme_attr->cycle, vme_attr->dwidth);
1754 if(retval < 0 )
1755 goto err_source;
1756 break;
1757 default:
1758 printk("Invalid source type\n");
1759 retval = -EINVAL;
1760 goto err_source;
1761 break;
1762 }
1763
1764 /* Assume last link - this will be over-written by adding another */
1765 entry->descriptor.dnlau = 0;
1766 entry->descriptor.dnlal = TSI148_LCSR_DNLAL_LLA;
1767
1768
1769 /* Fill out destination part */
1770 switch (dest->type) {
1771 case VME_DMA_PCI:
1772 pci_attr = (struct vme_dma_pci *)dest->private;
1773
1774 reg_split((unsigned long long)pci_attr->address, &address_high,
1775 &address_low);
1776 entry->descriptor.ddau = address_high;
1777 entry->descriptor.ddal = address_low;
1778 entry->descriptor.ddat = TSI148_LCSR_DDAT_TYP_PCI;
1779 break;
1780 case VME_DMA_VME:
1781 vme_attr = (struct vme_dma_vme *)dest->private;
1782
1783 reg_split((unsigned long long)vme_attr->address, &address_high,
1784 &address_low);
1785 entry->descriptor.ddau = address_high;
1786 entry->descriptor.ddal = address_low;
1787 entry->descriptor.ddat = TSI148_LCSR_DDAT_TYP_VME;
1788
1789 retval = tsi148_dma_set_vme_dest_attributes(
1790 &(entry->descriptor.ddat), vme_attr->aspace,
1791 vme_attr->cycle, vme_attr->dwidth);
1792 if(retval < 0 )
1793 goto err_dest;
1794 break;
1795 default:
1796 printk("Invalid destination type\n");
1797 retval = -EINVAL;
1798 goto err_dest;
1799 break;
1800 }
1801
1802 /* Fill out count */
1803 entry->descriptor.dcnt = (u32)count;
1804
1805 /* Add to list */
1806 list_add_tail(&(entry->list), &(list->entries));
1807
1808 /* Fill out previous descriptors "Next Address" */
1809 if(entry->list.prev != &(list->entries)){
1810 prev = list_entry(entry->list.prev, struct tsi148_dma_entry,
1811 list);
1812 /* We need the bus address for the pointer */
1813 desc_ptr = virt_to_bus(&(entry->descriptor));
1814 reg_split(desc_ptr, &(prev->descriptor.dnlau),
1815 &(prev->descriptor.dnlal));
1816 }
1817
1818 return 0;
1819
1820err_dest:
1821err_source:
1822err_align:
1823 kfree(entry);
1824err_mem:
1825 return retval;
1826}
1827
1828/*
1829 * Check to see if the provided DMA channel is busy.
1830 */
1831static int tsi148_dma_busy(int channel)
1832{
1833 u32 tmp;
1834
1835 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_DMA[channel] +
1836 TSI148_LCSR_OFFSET_DSTA);
1837
1838 if (tmp & TSI148_LCSR_DSTA_BSY)
1839 return 0;
1840 else
1841 return 1;
1842
1843}
1844
1845/*
1846 * Execute a previously generated link list
1847 *
1848 * XXX Need to provide control register configuration.
1849 */
1850int tsi148_dma_list_exec(struct vme_dma_list *list)
1851{
1852 struct vme_dma_resource *ctrlr;
1853 int channel, retval = 0;
1854 struct tsi148_dma_entry *entry;
1855 dma_addr_t bus_addr;
1856 u32 bus_addr_high, bus_addr_low;
1857 u32 val, dctlreg = 0;
1858#if 0
1859 int x;
1860#endif
1861
1862 ctrlr = list->parent;
1863
1864 down(&(ctrlr->sem));
1865
1866 channel = ctrlr->number;
1867
1868 if (! list_empty(&(ctrlr->running))) {
1869 /*
1870 * XXX We have an active DMA transfer and currently haven't
1871 * sorted out the mechanism for "pending" DMA transfers.
1872 * Return busy.
1873 */
1874 /* Need to add to pending here */
1875 up(&(ctrlr->sem));
1876 return -EBUSY;
1877 } else {
1878 list_add(&(list->list), &(ctrlr->running));
1879 }
1880#if 0
1881 /* XXX Still todo */
1882 for (x = 0; x < 8; x++) { /* vme block size */
1883 if ((32 << x) >= vmeDma->maxVmeBlockSize) {
1884 break;
1885 }
1886 }
1887 if (x == 8)
1888 x = 7;
1889 dctlreg |= (x << 12);
1890
1891 for (x = 0; x < 8; x++) { /* pci block size */
1892 if ((32 << x) >= vmeDma->maxPciBlockSize) {
1893 break;
1894 }
1895 }
1896 if (x == 8)
1897 x = 7;
1898 dctlreg |= (x << 4);
1899
1900 if (vmeDma->vmeBackOffTimer) {
1901 for (x = 1; x < 8; x++) { /* vme timer */
1902 if ((1 << (x - 1)) >= vmeDma->vmeBackOffTimer) {
1903 break;
1904 }
1905 }
1906 if (x == 8)
1907 x = 7;
1908 dctlreg |= (x << 8);
1909 }
1910
1911 if (vmeDma->pciBackOffTimer) {
1912 for (x = 1; x < 8; x++) { /* pci timer */
1913 if ((1 << (x - 1)) >= vmeDma->pciBackOffTimer) {
1914 break;
1915 }
1916 }
1917 if (x == 8)
1918 x = 7;
1919 dctlreg |= (x << 0);
1920 }
1921#endif
1922
1923 /* Get first bus address and write into registers */
1924 entry = list_first_entry(&(list->entries), struct tsi148_dma_entry,
1925 list);
1926
1927 bus_addr = virt_to_bus(&(entry->descriptor));
1928
1929 up(&(ctrlr->sem));
1930
1931 reg_split(bus_addr, &bus_addr_high, &bus_addr_low);
1932
1933 iowrite32be(bus_addr_high, tsi148_bridge->base +
1934 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAU);
1935 iowrite32be(bus_addr_low, tsi148_bridge->base +
1936 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DNLAL);
1937
1938 /* Start the operation */
1939 iowrite32be(dctlreg | TSI148_LCSR_DCTL_DGO, tsi148_bridge->base +
1940 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
1941
1942 wait_event_interruptible(dma_queue[channel], tsi148_dma_busy(channel));
1943 /*
1944 * Read status register, this register is valid until we kick off a
1945 * new transfer.
1946 */
1947 val = ioread32be(tsi148_bridge->base + TSI148_LCSR_DMA[channel] +
1948 TSI148_LCSR_OFFSET_DSTA);
1949
1950 if (val & TSI148_LCSR_DSTA_VBE) {
1951 printk(KERN_ERR "tsi148: DMA Error. DSTA=%08X\n", val);
1952 retval = -EIO;
1953 }
1954
1955 /* Remove list from running list */
1956 down(&(ctrlr->sem));
1957 list_del(&(list->list));
1958 up(&(ctrlr->sem));
1959
1960 return retval;
1961}
1962
1963/*
1964 * Clean up a previously generated link list
1965 *
1966 * We have a separate function, don't assume that the chain can't be reused.
1967 */
1968int tsi148_dma_list_empty(struct vme_dma_list *list)
1969{
1970 struct list_head *pos, *temp;
1971 struct tsi148_dma_entry *entry;
1972
1973 /* detach and free each entry */
1974 list_for_each_safe(pos, temp, &(list->entries)) {
1975 list_del(pos);
1976 entry = list_entry(pos, struct tsi148_dma_entry, list);
1977 kfree(entry);
1978 }
1979
1980 return (0);
1981}
1982
1983/*
1984 * All 4 location monitors reside at the same base - this is therefore a
1985 * system wide configuration.
1986 *
1987 * This does not enable the LM monitor - that should be done when the first
1988 * callback is attached and disabled when the last callback is removed.
1989 */
1990int tsi148_lm_set(unsigned long long lm_base, vme_address_t aspace,
1991 vme_cycle_t cycle)
1992{
1993 u32 lm_base_high, lm_base_low, lm_ctl = 0;
1994 int i;
1995
1996 /* Get semaphore */
1997 down(&(vme_lm));
1998
1999 /* If we already have a callback attached, we can't move it! */
2000 for (i = 0; i < 4; i++) {
2001 if(lm_callback[i] != NULL) {
2002 up(&(vme_lm));
2003 printk("Location monitor callback attached, can't "
2004 "reset\n");
2005 return -EBUSY;
2006 }
2007 }
2008
2009 switch (aspace) {
2010 case VME_A16:
2011 lm_ctl |= TSI148_LCSR_LMAT_AS_A16;
2012 break;
2013 case VME_A24:
2014 lm_ctl |= TSI148_LCSR_LMAT_AS_A24;
2015 break;
2016 case VME_A32:
2017 lm_ctl |= TSI148_LCSR_LMAT_AS_A32;
2018 break;
2019 case VME_A64:
2020 lm_ctl |= TSI148_LCSR_LMAT_AS_A64;
2021 break;
2022 default:
2023 up(&(vme_lm));
2024 printk("Invalid address space\n");
2025 return -EINVAL;
2026 break;
2027 }
2028
2029 if (cycle & VME_SUPER)
2030 lm_ctl |= TSI148_LCSR_LMAT_SUPR ;
2031 if (cycle & VME_USER)
2032 lm_ctl |= TSI148_LCSR_LMAT_NPRIV;
2033 if (cycle & VME_PROG)
2034 lm_ctl |= TSI148_LCSR_LMAT_PGM;
2035 if (cycle & VME_DATA)
2036 lm_ctl |= TSI148_LCSR_LMAT_DATA;
2037
2038 reg_split(lm_base, &lm_base_high, &lm_base_low);
2039
2040 iowrite32be(lm_base_high, tsi148_bridge->base + TSI148_LCSR_LMBAU);
2041 iowrite32be(lm_base_low, tsi148_bridge->base + TSI148_LCSR_LMBAL);
2042 iowrite32be(lm_ctl, tsi148_bridge->base + TSI148_LCSR_LMAT);
2043
2044 up(&(vme_lm));
2045
2046 return 0;
2047}
2048
2049/* Get configuration of the callback monitor and return whether it is enabled
2050 * or disabled.
2051 */
2052int tsi148_lm_get(unsigned long long *lm_base, vme_address_t *aspace,
2053 vme_cycle_t *cycle)
2054{
2055 u32 lm_base_high, lm_base_low, lm_ctl, enabled = 0;
2056
2057 /* Get semaphore */
2058 down(&(vme_lm));
2059
2060 lm_base_high = ioread32be(tsi148_bridge->base + TSI148_LCSR_LMBAU);
2061 lm_base_low = ioread32be(tsi148_bridge->base + TSI148_LCSR_LMBAL);
2062 lm_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_LMAT);
2063
2064 reg_join(lm_base_high, lm_base_low, lm_base);
2065
2066 if (lm_ctl & TSI148_LCSR_LMAT_EN)
2067 enabled = 1;
2068
2069 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A16) {
2070 *aspace |= VME_A16;
2071 }
2072 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A24) {
2073 *aspace |= VME_A24;
2074 }
2075 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A32) {
2076 *aspace |= VME_A32;
2077 }
2078 if ((lm_ctl & TSI148_LCSR_LMAT_AS_M) == TSI148_LCSR_LMAT_AS_A64) {
2079 *aspace |= VME_A64;
2080 }
2081
2082 if (lm_ctl & TSI148_LCSR_LMAT_SUPR)
2083 *cycle |= VME_SUPER;
2084 if (lm_ctl & TSI148_LCSR_LMAT_NPRIV)
2085 *cycle |= VME_USER;
2086 if (lm_ctl & TSI148_LCSR_LMAT_PGM)
2087 *cycle |= VME_PROG;
2088 if (lm_ctl & TSI148_LCSR_LMAT_DATA)
2089 *cycle |= VME_DATA;
2090
2091 up(&(vme_lm));
2092
2093 return enabled;
2094}
2095
2096/*
2097 * Attach a callback to a specific location monitor.
2098 *
2099 * Callback will be passed the monitor triggered.
2100 */
2101int tsi148_lm_attach(int monitor, void (*callback)(int))
2102{
2103 u32 lm_ctl, tmp;
2104
2105 /* Get semaphore */
2106 down(&(vme_lm));
2107
2108 /* Ensure that the location monitor is configured - need PGM or DATA */
2109 lm_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_LMAT);
2110 if ((lm_ctl & (TSI148_LCSR_LMAT_PGM | TSI148_LCSR_LMAT_DATA)) == 0) {
2111 up(&(vme_lm));
2112 printk("Location monitor not properly configured\n");
2113 return -EINVAL;
2114 }
2115
2116 /* Check that a callback isn't already attached */
2117 if (lm_callback[monitor] != NULL) {
2118 up(&(vme_lm));
2119 printk("Existing callback attached\n");
2120 return -EBUSY;
2121 }
2122
2123 /* Attach callback */
2124 lm_callback[monitor] = callback;
2125
2126 /* Enable Location Monitor interrupt */
2127 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEN);
2128 tmp |= TSI148_LCSR_INTEN_LMEN[monitor];
2129 iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_INTEN);
2130
2131 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEO);
2132 tmp |= TSI148_LCSR_INTEO_LMEO[monitor];
2133 iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_INTEO);
2134
2135 /* Ensure that global Location Monitor Enable set */
2136 if ((lm_ctl & TSI148_LCSR_LMAT_EN) == 0) {
2137 lm_ctl |= TSI148_LCSR_LMAT_EN;
2138 iowrite32be(lm_ctl, tsi148_bridge->base + TSI148_LCSR_LMAT);
2139 }
2140
2141 up(&(vme_lm));
2142
2143 return 0;
2144}
2145
2146/*
2147 * Detach a callback function forn a specific location monitor.
2148 */
2149int tsi148_lm_detach(int monitor)
2150{
2151 u32 lm_en, tmp;
2152
2153 /* Get semaphore */
2154 down(&(vme_lm));
2155
2156 /* Disable Location Monitor and ensure previous interrupts are clear */
2157 lm_en = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEN);
2158 lm_en &= ~TSI148_LCSR_INTEN_LMEN[monitor];
2159 iowrite32be(lm_en, tsi148_bridge->base + TSI148_LCSR_INTEN);
2160
2161 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_INTEO);
2162 tmp &= ~TSI148_LCSR_INTEO_LMEO[monitor];
2163 iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_INTEO);
2164
2165 iowrite32be(TSI148_LCSR_INTC_LMC[monitor],
2166 tsi148_bridge->base + TSI148_LCSR_INTEO);
2167
2168 /* Detach callback */
2169 lm_callback[monitor] = NULL;
2170
2171 /* If all location monitors disabled, disable global Location Monitor */
2172 if ((lm_en & (TSI148_LCSR_INTS_LM0S | TSI148_LCSR_INTS_LM1S |
2173 TSI148_LCSR_INTS_LM2S | TSI148_LCSR_INTS_LM3S)) == 0) {
2174 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_LMAT);
2175 tmp &= ~TSI148_LCSR_LMAT_EN;
2176 iowrite32be(tmp, tsi148_bridge->base + TSI148_LCSR_LMAT);
2177 }
2178
2179 up(&(vme_lm));
2180
2181 return 0;
2182}
2183
2184/*
2185 * Determine Geographical Addressing
2186 */
2187int tsi148_slot_get(void)
2188{
2189 u32 slot = 0;
2190
2191 slot = ioread32be(tsi148_bridge->base + TSI148_LCSR_VSTAT);
2192 slot = slot & TSI148_LCSR_VSTAT_GA_M;
2193 return (int)slot;
2194}
2195
2196static int __init tsi148_init(void)
2197{
2198 return pci_register_driver(&tsi148_driver);
2199}
2200
2201/*
2202 * Configure CR/CSR space
2203 *
2204 * Access to the CR/CSR can be configured at power-up. The location of the
2205 * CR/CSR registers in the CR/CSR address space is determined by the boards
2206 * Auto-ID or Geographic address. This function ensures that the window is
2207 * enabled at an offset consistent with the boards geopgraphic address.
2208 *
2209 * Each board has a 512kB window, with the highest 4kB being used for the
2210 * boards registers, this means there is a fix length 508kB window which must
2211 * be mapped onto PCI memory.
2212 */
2213static int tsi148_crcsr_init(struct pci_dev *pdev)
2214{
2215 u32 cbar, crat, vstat;
2216 u32 crcsr_bus_high, crcsr_bus_low;
2217 int retval;
2218
2219 /* Allocate mem for CR/CSR image */
2220 crcsr_kernel = pci_alloc_consistent(pdev, VME_CRCSR_BUF_SIZE,
2221 &crcsr_bus);
2222 if (crcsr_kernel == NULL) {
2223 dev_err(&pdev->dev, "Failed to allocate memory for CR/CSR "
2224 "image\n");
2225 return -ENOMEM;
2226 }
2227
2228 memset(crcsr_kernel, 0, VME_CRCSR_BUF_SIZE);
2229
2230 reg_split(crcsr_bus, &crcsr_bus_high, &crcsr_bus_low);
2231
2232 iowrite32be(crcsr_bus_high, tsi148_bridge->base + TSI148_LCSR_CROU);
2233 iowrite32be(crcsr_bus_low, tsi148_bridge->base + TSI148_LCSR_CROL);
2234
2235 /* Ensure that the CR/CSR is configured at the correct offset */
2236 cbar = ioread32be(tsi148_bridge->base + TSI148_CBAR);
2237 cbar = (cbar & TSI148_CRCSR_CBAR_M)>>3;
2238
2239 vstat = tsi148_slot_get();
2240
2241 if (cbar != vstat) {
2242 dev_info(&pdev->dev, "Setting CR/CSR offset\n");
2243 iowrite32be(cbar<<3, tsi148_bridge->base + TSI148_CBAR);
2244 }
2245 dev_info(&pdev->dev, "CR/CSR Offset: %d\n", cbar);
2246
2247 crat = ioread32be(tsi148_bridge->base + TSI148_LCSR_CRAT);
2248 if (crat & TSI148_LCSR_CRAT_EN) {
2249 dev_info(&pdev->dev, "Enabling CR/CSR space\n");
2250 iowrite32be(crat | TSI148_LCSR_CRAT_EN,
2251 tsi148_bridge->base + TSI148_LCSR_CRAT);
2252 } else
2253 dev_info(&pdev->dev, "CR/CSR already enabled\n");
2254
2255 /* If we want flushed, error-checked writes, set up a window
2256 * over the CR/CSR registers. We read from here to safely flush
2257 * through VME writes.
2258 */
2259 if(err_chk) {
2260 retval = tsi148_master_set(flush_image, 1, (vstat * 0x80000),
2261 0x80000, VME_CRCSR, VME_SCT, VME_D16);
2262 if (retval)
2263 dev_err(&pdev->dev, "Configuring flush image failed\n");
2264 }
2265
2266 return 0;
2267
2268}
2269
2270static void tsi148_crcsr_exit(struct pci_dev *pdev)
2271{
2272 u32 crat;
2273
2274 /* Turn off CR/CSR space */
2275 crat = ioread32be(tsi148_bridge->base + TSI148_LCSR_CRAT);
2276 iowrite32be(crat & ~TSI148_LCSR_CRAT_EN,
2277 tsi148_bridge->base + TSI148_LCSR_CRAT);
2278
2279 /* Free image */
2280 iowrite32be(0, tsi148_bridge->base + TSI148_LCSR_CROU);
2281 iowrite32be(0, tsi148_bridge->base + TSI148_LCSR_CROL);
2282
2283 pci_free_consistent(pdev, VME_CRCSR_BUF_SIZE, crcsr_kernel, crcsr_bus);
2284}
2285
2286static int tsi148_probe(struct pci_dev *pdev, const struct pci_device_id *id)
2287{
2288 int retval, i, master_num;
2289 u32 data;
2290 struct list_head *pos = NULL;
2291 struct vme_master_resource *master_image;
2292 struct vme_slave_resource *slave_image;
2293 struct vme_dma_resource *dma_ctrlr;
2294
2295 /* If we want to support more than one of each bridge, we need to
2296 * dynamically generate this so we get one per device
2297 */
2298 tsi148_bridge = (struct vme_bridge *)kmalloc(sizeof(struct vme_bridge),
2299 GFP_KERNEL);
2300 if (tsi148_bridge == NULL) {
2301 dev_err(&pdev->dev, "Failed to allocate memory for device "
2302 "structure\n");
2303 retval = -ENOMEM;
2304 goto err_struct;
2305 }
2306
2307 memset(tsi148_bridge, 0, sizeof(struct vme_bridge));
2308
2309 /* Enable the device */
2310 retval = pci_enable_device(pdev);
2311 if (retval) {
2312 dev_err(&pdev->dev, "Unable to enable device\n");
2313 goto err_enable;
2314 }
2315
2316 /* Map Registers */
2317 retval = pci_request_regions(pdev, driver_name);
2318 if (retval) {
2319 dev_err(&pdev->dev, "Unable to reserve resources\n");
2320 goto err_resource;
2321 }
2322
2323 /* map registers in BAR 0 */
2324 tsi148_bridge->base = ioremap_nocache(pci_resource_start(pdev, 0), 4096);
2325 if (!tsi148_bridge->base) {
2326 dev_err(&pdev->dev, "Unable to remap CRG region\n");
2327 retval = -EIO;
2328 goto err_remap;
2329 }
2330
2331 /* Check to see if the mapping worked out */
2332 data = ioread32(tsi148_bridge->base + TSI148_PCFS_ID) & 0x0000FFFF;
2333 if (data != PCI_VENDOR_ID_TUNDRA) {
2334 dev_err(&pdev->dev, "CRG region check failed\n");
2335 retval = -EIO;
2336 goto err_test;
2337 }
2338
2339 /* Initialize wait queues & mutual exclusion flags */
2340 /* XXX These need to be moved to the vme_bridge structure */
2341 init_waitqueue_head(&dma_queue[0]);
2342 init_waitqueue_head(&dma_queue[1]);
2343 init_waitqueue_head(&iack_queue);
2344 init_MUTEX(&(vme_int));
2345 init_MUTEX(&(vme_irq));
2346 init_MUTEX(&(vme_rmw));
2347 init_MUTEX(&(vme_lm));
2348
2349 tsi148_bridge->parent = &(pdev->dev);
2350 strcpy(tsi148_bridge->name, driver_name);
2351
2352 /* Setup IRQ */
2353 retval = tsi148_irq_init(tsi148_bridge);
2354 if (retval != 0) {
2355 dev_err(&pdev->dev, "Chip Initialization failed.\n");
2356 goto err_irq;
2357 }
2358
2359 /* If we are going to flush writes, we need to read from the VME bus.
2360 * We need to do this safely, thus we read the devices own CR/CSR
2361 * register. To do this we must set up a window in CR/CSR space and
2362 * hence have one less master window resource available.
2363 */
2364 master_num = TSI148_MAX_MASTER;
2365 if(err_chk){
2366 master_num--;
2367 /* XXX */
2368 flush_image = (struct vme_master_resource *)kmalloc(
2369 sizeof(struct vme_master_resource), GFP_KERNEL);
2370 if (flush_image == NULL) {
2371 dev_err(&pdev->dev, "Failed to allocate memory for "
2372 "flush resource structure\n");
2373 retval = -ENOMEM;
2374 goto err_master;
2375 }
2376 flush_image->parent = tsi148_bridge;
2377 spin_lock_init(&(flush_image->lock));
2378 flush_image->locked = 1;
2379 flush_image->number = master_num;
2380 flush_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2381 VME_A64;
2382 flush_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2383 VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2384 VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2385 VME_PROG | VME_DATA;
2386 flush_image->width_attr = VME_D16 | VME_D32;
2387 memset(&(flush_image->pci_resource), 0,
2388 sizeof(struct resource));
2389 flush_image->kern_base = NULL;
2390 }
2391
2392 /* Add master windows to list */
2393 INIT_LIST_HEAD(&(tsi148_bridge->master_resources));
2394 for (i = 0; i < master_num; i++) {
2395 master_image = (struct vme_master_resource *)kmalloc(
2396 sizeof(struct vme_master_resource), GFP_KERNEL);
2397 if (master_image == NULL) {
2398 dev_err(&pdev->dev, "Failed to allocate memory for "
2399 "master resource structure\n");
2400 retval = -ENOMEM;
2401 goto err_master;
2402 }
2403 master_image->parent = tsi148_bridge;
2404 spin_lock_init(&(master_image->lock));
2405 master_image->locked = 0;
2406 master_image->number = i;
2407 master_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2408 VME_A64;
2409 master_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2410 VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2411 VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2412 VME_PROG | VME_DATA;
2413 master_image->width_attr = VME_D16 | VME_D32;
2414 memset(&(master_image->pci_resource), 0,
2415 sizeof(struct resource));
2416 master_image->kern_base = NULL;
2417 list_add_tail(&(master_image->list),
2418 &(tsi148_bridge->master_resources));
2419 }
2420
2421 /* Add slave windows to list */
2422 INIT_LIST_HEAD(&(tsi148_bridge->slave_resources));
2423 for (i = 0; i < TSI148_MAX_SLAVE; i++) {
2424 slave_image = (struct vme_slave_resource *)kmalloc(
2425 sizeof(struct vme_slave_resource), GFP_KERNEL);
2426 if (slave_image == NULL) {
2427 dev_err(&pdev->dev, "Failed to allocate memory for "
2428 "slave resource structure\n");
2429 retval = -ENOMEM;
2430 goto err_slave;
2431 }
2432 slave_image->parent = tsi148_bridge;
2433 init_MUTEX(&(slave_image->sem));
2434 slave_image->locked = 0;
2435 slave_image->number = i;
2436 slave_image->address_attr = VME_A16 | VME_A24 | VME_A32 |
2437 VME_A64 | VME_CRCSR | VME_USER1 | VME_USER2 |
2438 VME_USER3 | VME_USER4;
2439 slave_image->cycle_attr = VME_SCT | VME_BLT | VME_MBLT |
2440 VME_2eVME | VME_2eSST | VME_2eSSTB | VME_2eSST160 |
2441 VME_2eSST267 | VME_2eSST320 | VME_SUPER | VME_USER |
2442 VME_PROG | VME_DATA;
2443 list_add_tail(&(slave_image->list),
2444 &(tsi148_bridge->slave_resources));
2445 }
2446
2447 /* Add dma engines to list */
2448 INIT_LIST_HEAD(&(tsi148_bridge->dma_resources));
2449 for (i = 0; i < TSI148_MAX_DMA; i++) {
2450 dma_ctrlr = (struct vme_dma_resource *)kmalloc(
2451 sizeof(struct vme_dma_resource), GFP_KERNEL);
2452 if (dma_ctrlr == NULL) {
2453 dev_err(&pdev->dev, "Failed to allocate memory for "
2454 "dma resource structure\n");
2455 retval = -ENOMEM;
2456 goto err_dma;
2457 }
2458 dma_ctrlr->parent = tsi148_bridge;
2459 init_MUTEX(&(dma_ctrlr->sem));
2460 dma_ctrlr->locked = 0;
2461 dma_ctrlr->number = i;
2462 INIT_LIST_HEAD(&(dma_ctrlr->pending));
2463 INIT_LIST_HEAD(&(dma_ctrlr->running));
2464 list_add_tail(&(dma_ctrlr->list),
2465 &(tsi148_bridge->dma_resources));
2466 }
2467
2468 tsi148_bridge->slave_get = tsi148_slave_get;
2469 tsi148_bridge->slave_set = tsi148_slave_set;
2470 tsi148_bridge->master_get = tsi148_master_get;
2471 tsi148_bridge->master_set = tsi148_master_set;
2472 tsi148_bridge->master_read = tsi148_master_read;
2473 tsi148_bridge->master_write = tsi148_master_write;
2474 tsi148_bridge->master_rmw = tsi148_master_rmw;
2475 tsi148_bridge->dma_list_add = tsi148_dma_list_add;
2476 tsi148_bridge->dma_list_exec = tsi148_dma_list_exec;
2477 tsi148_bridge->dma_list_empty = tsi148_dma_list_empty;
2478 tsi148_bridge->request_irq = tsi148_request_irq;
2479 tsi148_bridge->free_irq = tsi148_free_irq;
2480 tsi148_bridge->generate_irq = tsi148_generate_irq;
2481 tsi148_bridge->lm_set = tsi148_lm_set;
2482 tsi148_bridge->lm_get = tsi148_lm_get;
2483 tsi148_bridge->lm_attach = tsi148_lm_attach;
2484 tsi148_bridge->lm_detach = tsi148_lm_detach;
2485 tsi148_bridge->slot_get = tsi148_slot_get;
2486
2487 data = ioread32be(tsi148_bridge->base + TSI148_LCSR_VSTAT);
2488 dev_info(&pdev->dev, "Board is%s the VME system controller\n",
2489 (data & TSI148_LCSR_VSTAT_SCONS)? "" : " not");
2490 dev_info(&pdev->dev, "VME geographical address is %d\n",
2491 data & TSI148_LCSR_VSTAT_GA_M);
2492 dev_info(&pdev->dev, "VME Write and flush and error check is %s\n",
2493 err_chk ? "enabled" : "disabled");
2494
2495 if(tsi148_crcsr_init(pdev)) {
2496 dev_err(&pdev->dev, "CR/CSR configuration failed.\n");
2497 goto err_crcsr;
2498
2499 }
2500
2501 /* Need to save tsi148_bridge pointer locally in link list for use in
2502 * tsi148_remove()
2503 */
2504 retval = vme_register_bridge(tsi148_bridge);
2505 if (retval != 0) {
2506 dev_err(&pdev->dev, "Chip Registration failed.\n");
2507 goto err_reg;
2508 }
2509
2510 /* Clear VME bus "board fail", and "power-up reset" lines */
2511 data = ioread32be(tsi148_bridge->base + TSI148_LCSR_VSTAT);
2512 data &= ~TSI148_LCSR_VSTAT_BRDFL;
2513 data |= TSI148_LCSR_VSTAT_CPURST;
2514 iowrite32be(data, tsi148_bridge->base + TSI148_LCSR_VSTAT);
2515
2516 return 0;
2517
2518 vme_unregister_bridge(tsi148_bridge);
2519err_reg:
2520 tsi148_crcsr_exit(pdev);
2521err_crcsr:
2522err_dma:
2523 /* resources are stored in link list */
2524 list_for_each(pos, &(tsi148_bridge->dma_resources)) {
2525 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2526 list_del(pos);
2527 kfree(dma_ctrlr);
2528 }
2529err_slave:
2530 /* resources are stored in link list */
2531 list_for_each(pos, &(tsi148_bridge->slave_resources)) {
2532 slave_image = list_entry(pos, struct vme_slave_resource, list);
2533 list_del(pos);
2534 kfree(slave_image);
2535 }
2536err_master:
2537 /* resources are stored in link list */
2538 list_for_each(pos, &(tsi148_bridge->master_resources)) {
2539 master_image = list_entry(pos, struct vme_master_resource, list);
2540 list_del(pos);
2541 kfree(master_image);
2542 }
2543
2544 tsi148_irq_exit(pdev);
2545err_irq:
2546err_test:
2547 iounmap(tsi148_bridge->base);
2548err_remap:
2549 pci_release_regions(pdev);
2550err_resource:
2551 pci_disable_device(pdev);
2552err_enable:
2553 kfree(tsi148_bridge);
2554err_struct:
2555 return retval;
2556
2557}
2558
2559static void tsi148_remove(struct pci_dev *pdev)
2560{
2561 struct list_head *pos = NULL;
2562 struct vme_master_resource *master_image;
2563 struct vme_slave_resource *slave_image;
2564 struct vme_dma_resource *dma_ctrlr;
2565 int i;
2566
2567 dev_dbg(&pdev->dev, "Driver is being unloaded.\n");
2568
2569 /* XXX We need to find the pdev->dev in the list of vme_bridge->dev's */
2570
2571 /*
2572 * Shutdown all inbound and outbound windows.
2573 */
2574 for (i = 0; i < 8; i++) {
2575 iowrite32be(0, tsi148_bridge->base + TSI148_LCSR_IT[i] +
2576 TSI148_LCSR_OFFSET_ITAT);
2577 iowrite32be(0, tsi148_bridge->base + TSI148_LCSR_OT[i] +
2578 TSI148_LCSR_OFFSET_OTAT);
2579 }
2580
2581 /*
2582 * Shutdown Location monitor.
2583 */
2584 iowrite32be(0, tsi148_bridge->base + TSI148_LCSR_LMAT);
2585
2586 /*
2587 * Shutdown CRG map.
2588 */
2589 iowrite32be(0, tsi148_bridge->base + TSI148_LCSR_CSRAT);
2590
2591 /*
2592 * Clear error status.
2593 */
2594 iowrite32be(0xFFFFFFFF, tsi148_bridge->base + TSI148_LCSR_EDPAT);
2595 iowrite32be(0xFFFFFFFF, tsi148_bridge->base + TSI148_LCSR_VEAT);
2596 iowrite32be(0x07000700, tsi148_bridge->base + TSI148_LCSR_PSTAT);
2597
2598 /*
2599 * Remove VIRQ interrupt (if any)
2600 */
2601 if (ioread32be(tsi148_bridge->base + TSI148_LCSR_VICR) & 0x800) {
2602 iowrite32be(0x8000, tsi148_bridge->base + TSI148_LCSR_VICR);
2603 }
2604
2605 /*
2606 * Disable and clear all interrupts.
2607 */
2608 iowrite32be(0x0, tsi148_bridge->base + TSI148_LCSR_INTEO);
2609 iowrite32be(0xFFFFFFFF, tsi148_bridge->base + TSI148_LCSR_INTC);
2610 iowrite32be(0xFFFFFFFF, tsi148_bridge->base + TSI148_LCSR_INTEN);
2611
2612 /*
2613 * Map all Interrupts to PCI INTA
2614 */
2615 iowrite32be(0x0, tsi148_bridge->base + TSI148_LCSR_INTM1);
2616 iowrite32be(0x0, tsi148_bridge->base + TSI148_LCSR_INTM2);
2617
2618 tsi148_irq_exit(pdev);
2619
2620 vme_unregister_bridge(tsi148_bridge);
2621
2622 tsi148_crcsr_exit(pdev);
2623
2624 /* resources are stored in link list */
2625 list_for_each(pos, &(tsi148_bridge->dma_resources)) {
2626 dma_ctrlr = list_entry(pos, struct vme_dma_resource, list);
2627 list_del(pos);
2628 kfree(dma_ctrlr);
2629 }
2630
2631 /* resources are stored in link list */
2632 list_for_each(pos, &(tsi148_bridge->slave_resources)) {
2633 slave_image = list_entry(pos, struct vme_slave_resource, list);
2634 list_del(pos);
2635 kfree(slave_image);
2636 }
2637
2638 /* resources are stored in link list */
2639 list_for_each(pos, &(tsi148_bridge->master_resources)) {
2640 master_image = list_entry(pos, struct vme_master_resource, list);
2641 list_del(pos);
2642 kfree(master_image);
2643 }
2644
2645 tsi148_irq_exit(pdev);
2646
2647 iounmap(tsi148_bridge->base);
2648
2649 pci_release_regions(pdev);
2650
2651 pci_disable_device(pdev);
2652
2653 kfree(tsi148_bridge);
2654}
2655
2656static void __exit tsi148_exit(void)
2657{
2658 pci_unregister_driver(&tsi148_driver);
2659
2660 printk(KERN_DEBUG "Driver removed.\n");
2661}
2662
2663MODULE_PARM_DESC(err_chk, "Check for VME errors on reads and writes");
2664module_param(err_chk, bool, 0);
2665
2666MODULE_DESCRIPTION("VME driver for the Tundra Tempe VME bridge");
2667MODULE_LICENSE("GPL");
2668
2669module_init(tsi148_init);
2670module_exit(tsi148_exit);
2671
2672/*----------------------------------------------------------------------------
2673 * STAGING
2674 *--------------------------------------------------------------------------*/
2675
2676#if 0
2677/*
2678 * Direct Mode DMA transfer
2679 *
2680 * XXX Not looking at direct mode for now, we can always use link list mode
2681 * with a single entry.
2682 */
2683int tsi148_dma_run(struct vme_dma_resource *resource, struct vme_dma_attr src,
2684 struct vme_dma_attr dest, size_t count)
2685{
2686 u32 dctlreg = 0;
2687 unsigned int tmp;
2688 int val;
2689 int channel, x;
2690 struct vmeDmaPacket *cur_dma;
2691 struct tsi148_dma_descriptor *dmaLL;
2692
2693 /* direct mode */
2694 dctlreg = 0x800000;
2695
2696 for (x = 0; x < 8; x++) { /* vme block size */
2697 if ((32 << x) >= vmeDma->maxVmeBlockSize) {
2698 break;
2699 }
2700 }
2701 if (x == 8)
2702 x = 7;
2703 dctlreg |= (x << 12);
2704
2705 for (x = 0; x < 8; x++) { /* pci block size */
2706 if ((32 << x) >= vmeDma->maxPciBlockSize) {
2707 break;
2708 }
2709 }
2710 if (x == 8)
2711 x = 7;
2712 dctlreg |= (x << 4);
2713
2714 if (vmeDma->vmeBackOffTimer) {
2715 for (x = 1; x < 8; x++) { /* vme timer */
2716 if ((1 << (x - 1)) >= vmeDma->vmeBackOffTimer) {
2717 break;
2718 }
2719 }
2720 if (x == 8)
2721 x = 7;
2722 dctlreg |= (x << 8);
2723 }
2724
2725 if (vmeDma->pciBackOffTimer) {
2726 for (x = 1; x < 8; x++) { /* pci timer */
2727 if ((1 << (x - 1)) >= vmeDma->pciBackOffTimer) {
2728 break;
2729 }
2730 }
2731 if (x == 8)
2732 x = 7;
2733 dctlreg |= (x << 0);
2734 }
2735
2736 /* Program registers for DMA transfer */
2737 iowrite32be(dmaLL->dsau, tsi148_bridge->base +
2738 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DSAU);
2739 iowrite32be(dmaLL->dsal, tsi148_bridge->base +
2740 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DSAL);
2741 iowrite32be(dmaLL->ddau, tsi148_bridge->base +
2742 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DDAU);
2743 iowrite32be(dmaLL->ddal, tsi148_bridge->base +
2744 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DDAL);
2745 iowrite32be(dmaLL->dsat, tsi148_bridge->base +
2746 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DSAT);
2747 iowrite32be(dmaLL->ddat, tsi148_bridge->base +
2748 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DDAT);
2749 iowrite32be(dmaLL->dcnt, tsi148_bridge->base +
2750 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCNT);
2751 iowrite32be(dmaLL->ddbs, tsi148_bridge->base +
2752 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DDBS);
2753
2754 /* Start the operation */
2755 iowrite32be(dctlreg | 0x2000000, tsi148_bridge->base +
2756 TSI148_LCSR_DMA[channel] + TSI148_LCSR_OFFSET_DCTL);
2757
2758 tmp = ioread32be(tsi148_bridge->base + TSI148_LCSR_DMA[channel] +
2759 TSI148_LCSR_OFFSET_DSTA);
2760 wait_event_interruptible(dma_queue[channel], (tmp & 0x1000000) == 0);
2761
2762 /*
2763 * Read status register, we should probably do this in some error
2764 * handler rather than here so that we can be sure we haven't kicked off
2765 * another DMA transfer.
2766 */
2767 val = ioread32be(tsi148_bridge->base + TSI148_LCSR_DMA[channel] +
2768 TSI148_LCSR_OFFSET_DSTA);
2769
2770 vmeDma->vmeDmaStatus = 0;
2771 if (val & 0x10000000) {
2772 printk(KERN_ERR
2773 "DMA Error in DMA_tempe_irqhandler DSTA=%08X\n",
2774 val);
2775 vmeDma->vmeDmaStatus = val;
2776
2777 }
2778 return (0);
2779}
2780#endif
2781
2782#if 0
2783
2784/* Global VME controller information */
2785struct pci_dev *vme_pci_dev;
2786
2787/*
2788 * Set the VME bus arbiter with the requested attributes
2789 */
2790int tempe_set_arbiter(vmeArbiterCfg_t * vmeArb)
2791{
2792 int temp_ctl = 0;
2793 int gto = 0;
2794
2795 temp_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_VCTRL);
2796 temp_ctl &= 0xFFEFFF00;
2797
2798 if (vmeArb->globalTimeoutTimer == 0xFFFFFFFF) {
2799 gto = 8;
2800 } else if (vmeArb->globalTimeoutTimer > 2048) {
2801 return (-EINVAL);
2802 } else if (vmeArb->globalTimeoutTimer == 0) {
2803 gto = 0;
2804 } else {
2805 gto = 1;
2806 while ((16 * (1 << (gto - 1))) < vmeArb->globalTimeoutTimer) {
2807 gto += 1;
2808 }
2809 }
2810 temp_ctl |= gto;
2811
2812 if (vmeArb->arbiterMode != VME_PRIORITY_MODE) {
2813 temp_ctl |= 1 << 6;
2814 }
2815
2816 if (vmeArb->arbiterTimeoutFlag) {
2817 temp_ctl |= 1 << 7;
2818 }
2819
2820 if (vmeArb->noEarlyReleaseFlag) {
2821 temp_ctl |= 1 << 20;
2822 }
2823 iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_VCTRL);
2824
2825 return (0);
2826}
2827
2828/*
2829 * Return the attributes of the VME bus arbiter.
2830 */
2831int tempe_get_arbiter(vmeArbiterCfg_t * vmeArb)
2832{
2833 int temp_ctl = 0;
2834 int gto = 0;
2835
2836
2837 temp_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_VCTRL);
2838
2839 gto = temp_ctl & 0xF;
2840 if (gto != 0) {
2841 vmeArb->globalTimeoutTimer = (16 * (1 << (gto - 1)));
2842 }
2843
2844 if (temp_ctl & (1 << 6)) {
2845 vmeArb->arbiterMode = VME_R_ROBIN_MODE;
2846 } else {
2847 vmeArb->arbiterMode = VME_PRIORITY_MODE;
2848 }
2849
2850 if (temp_ctl & (1 << 7)) {
2851 vmeArb->arbiterTimeoutFlag = 1;
2852 }
2853
2854 if (temp_ctl & (1 << 20)) {
2855 vmeArb->noEarlyReleaseFlag = 1;
2856 }
2857
2858 return (0);
2859}
2860
2861/*
2862 * Set the VME bus requestor with the requested attributes
2863 */
2864int tempe_set_requestor(vmeRequesterCfg_t * vmeReq)
2865{
2866 int temp_ctl = 0;
2867
2868 temp_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_VMCTRL);
2869 temp_ctl &= 0xFFFF0000;
2870
2871 if (vmeReq->releaseMode == 1) {
2872 temp_ctl |= (1 << 3);
2873 }
2874
2875 if (vmeReq->fairMode == 1) {
2876 temp_ctl |= (1 << 2);
2877 }
2878
2879 temp_ctl |= (vmeReq->timeonTimeoutTimer & 7) << 8;
2880 temp_ctl |= (vmeReq->timeoffTimeoutTimer & 7) << 12;
2881 temp_ctl |= vmeReq->requestLevel;
2882
2883 iowrite32be(temp_ctl, tsi148_bridge->base + TSI148_LCSR_VMCTRL);
2884 return (0);
2885}
2886
2887/*
2888 * Return the attributes of the VME bus requestor
2889 */
2890int tempe_get_requestor(vmeRequesterCfg_t * vmeReq)
2891{
2892 int temp_ctl = 0;
2893
2894 temp_ctl = ioread32be(tsi148_bridge->base + TSI148_LCSR_VMCTRL);
2895
2896 if (temp_ctl & 0x18) {
2897 vmeReq->releaseMode = 1;
2898 }
2899
2900 if (temp_ctl & (1 << 2)) {
2901 vmeReq->fairMode = 1;
2902 }
2903
2904 vmeReq->requestLevel = temp_ctl & 3;
2905 vmeReq->timeonTimeoutTimer = (temp_ctl >> 8) & 7;
2906 vmeReq->timeoffTimeoutTimer = (temp_ctl >> 12) & 7;
2907
2908 return (0);
2909}
2910
2911
2912#endif