]> bbs.cooldavid.org Git - net-next-2.6.git/blame - arch/arm/mach-ixp4xx/ixp4xx_qmgr.c
arm: return both physical and virtual addresses from addruart
[net-next-2.6.git] / arch / arm / mach-ixp4xx / ixp4xx_qmgr.c
CommitLineData
82a96f57
KH
1/*
2 * Intel IXP4xx Queue Manager driver for Linux
3 *
4 * Copyright (C) 2007 Krzysztof Halasa <khc@pm.waw.pl>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of version 2 of the GNU General Public License
8 * as published by the Free Software Foundation.
9 */
10
11#include <linux/ioport.h>
12#include <linux/interrupt.h>
13#include <linux/kernel.h>
14#include <linux/module.h>
a09e64fb 15#include <mach/qmgr.h>
82a96f57 16
82a96f57
KH
17struct qmgr_regs __iomem *qmgr_regs;
18static struct resource *mem_res;
19static spinlock_t qmgr_lock;
20static u32 used_sram_bitmap[4]; /* 128 16-dword pages */
a6a9fb85
KH
21static void (*irq_handlers[QUEUES])(void *pdev);
22static void *irq_pdevs[QUEUES];
82a96f57 23
e6da96ac
KH
24#if DEBUG_QMGR
25char qmgr_queue_descs[QUEUES][32];
26#endif
27
82a96f57
KH
28void qmgr_set_irq(unsigned int queue, int src,
29 void (*handler)(void *pdev), void *pdev)
30{
82a96f57
KH
31 unsigned long flags;
32
82a96f57 33 spin_lock_irqsave(&qmgr_lock, flags);
a6a9fb85
KH
34 if (queue < HALF_QUEUES) {
35 u32 __iomem *reg;
36 int bit;
37 BUG_ON(src > QUEUE_IRQ_SRC_NOT_FULL);
38 reg = &qmgr_regs->irqsrc[queue >> 3]; /* 8 queues per u32 */
39 bit = (queue % 8) * 4; /* 3 bits + 1 reserved bit per queue */
40 __raw_writel((__raw_readl(reg) & ~(7 << bit)) | (src << bit),
41 reg);
42 } else
43 /* IRQ source for queues 32-63 is fixed */
44 BUG_ON(src != QUEUE_IRQ_SRC_NOT_NEARLY_EMPTY);
45
82a96f57
KH
46 irq_handlers[queue] = handler;
47 irq_pdevs[queue] = pdev;
48 spin_unlock_irqrestore(&qmgr_lock, flags);
49}
50
51
d4c9e9fc
KH
52static irqreturn_t qmgr_irq1_a0(int irq, void *pdev)
53{
54 int i, ret = 0;
0771c693 55 u32 en_bitmap, src, stat;
d4c9e9fc
KH
56
57 /* ACK - it may clear any bits so don't rely on it */
58 __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[0]);
59
0771c693
KH
60 en_bitmap = qmgr_regs->irqen[0];
61 while (en_bitmap) {
62 i = __fls(en_bitmap); /* number of the last "low" queue */
63 en_bitmap &= ~BIT(i);
d4c9e9fc
KH
64 src = qmgr_regs->irqsrc[i >> 3];
65 stat = qmgr_regs->stat1[i >> 3];
66 if (src & 4) /* the IRQ condition is inverted */
67 stat = ~stat;
68 if (stat & BIT(src & 3)) {
69 irq_handlers[i](irq_pdevs[i]);
70 ret = IRQ_HANDLED;
71 }
72 }
73 return ret;
74}
75
76
77static irqreturn_t qmgr_irq2_a0(int irq, void *pdev)
78{
79 int i, ret = 0;
80 u32 req_bitmap;
81
82 /* ACK - it may clear any bits so don't rely on it */
83 __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[1]);
84
85 req_bitmap = qmgr_regs->irqen[1] & qmgr_regs->statne_h;
0771c693
KH
86 while (req_bitmap) {
87 i = __fls(req_bitmap); /* number of the last "high" queue */
88 req_bitmap &= ~BIT(i);
d4c9e9fc
KH
89 irq_handlers[HALF_QUEUES + i](irq_pdevs[HALF_QUEUES + i]);
90 ret = IRQ_HANDLED;
91 }
92 return ret;
93}
94
95
a6a9fb85 96static irqreturn_t qmgr_irq(int irq, void *pdev)
82a96f57 97{
a6a9fb85 98 int i, half = (irq == IRQ_IXP4XX_QM1 ? 0 : 1);
0771c693 99 u32 req_bitmap = __raw_readl(&qmgr_regs->irqstat[half]);
82a96f57 100
0771c693
KH
101 if (!req_bitmap)
102 return 0;
103 __raw_writel(req_bitmap, &qmgr_regs->irqstat[half]); /* ACK */
104
105 while (req_bitmap) {
106 i = __fls(req_bitmap); /* number of the last queue */
107 req_bitmap &= ~BIT(i);
108 i += half * HALF_QUEUES;
109 irq_handlers[i](irq_pdevs[i]);
110 }
111 return IRQ_HANDLED;
82a96f57
KH
112}
113
114
115void qmgr_enable_irq(unsigned int queue)
116{
117 unsigned long flags;
a6a9fb85
KH
118 int half = queue / 32;
119 u32 mask = 1 << (queue & (HALF_QUEUES - 1));
82a96f57
KH
120
121 spin_lock_irqsave(&qmgr_lock, flags);
a6a9fb85
KH
122 __raw_writel(__raw_readl(&qmgr_regs->irqen[half]) | mask,
123 &qmgr_regs->irqen[half]);
82a96f57
KH
124 spin_unlock_irqrestore(&qmgr_lock, flags);
125}
126
127void qmgr_disable_irq(unsigned int queue)
128{
129 unsigned long flags;
a6a9fb85
KH
130 int half = queue / 32;
131 u32 mask = 1 << (queue & (HALF_QUEUES - 1));
82a96f57
KH
132
133 spin_lock_irqsave(&qmgr_lock, flags);
a6a9fb85
KH
134 __raw_writel(__raw_readl(&qmgr_regs->irqen[half]) & ~mask,
135 &qmgr_regs->irqen[half]);
136 __raw_writel(mask, &qmgr_regs->irqstat[half]); /* clear */
82a96f57
KH
137 spin_unlock_irqrestore(&qmgr_lock, flags);
138}
139
140static inline void shift_mask(u32 *mask)
141{
142 mask[3] = mask[3] << 1 | mask[2] >> 31;
143 mask[2] = mask[2] << 1 | mask[1] >> 31;
144 mask[1] = mask[1] << 1 | mask[0] >> 31;
145 mask[0] <<= 1;
146}
147
e6da96ac 148#if DEBUG_QMGR
82a96f57
KH
149int qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
150 unsigned int nearly_empty_watermark,
e6da96ac
KH
151 unsigned int nearly_full_watermark,
152 const char *desc_format, const char* name)
153#else
154int __qmgr_request_queue(unsigned int queue, unsigned int len /* dwords */,
155 unsigned int nearly_empty_watermark,
156 unsigned int nearly_full_watermark)
157#endif
82a96f57
KH
158{
159 u32 cfg, addr = 0, mask[4]; /* in 16-dwords */
160 int err;
161
a6a9fb85 162 BUG_ON(queue >= QUEUES);
82a96f57
KH
163
164 if ((nearly_empty_watermark | nearly_full_watermark) & ~7)
165 return -EINVAL;
166
167 switch (len) {
168 case 16:
169 cfg = 0 << 24;
170 mask[0] = 0x1;
171 break;
172 case 32:
173 cfg = 1 << 24;
174 mask[0] = 0x3;
175 break;
176 case 64:
177 cfg = 2 << 24;
178 mask[0] = 0xF;
179 break;
180 case 128:
181 cfg = 3 << 24;
182 mask[0] = 0xFF;
183 break;
184 default:
185 return -EINVAL;
186 }
187
188 cfg |= nearly_empty_watermark << 26;
189 cfg |= nearly_full_watermark << 29;
190 len /= 16; /* in 16-dwords: 1, 2, 4 or 8 */
191 mask[1] = mask[2] = mask[3] = 0;
192
193 if (!try_module_get(THIS_MODULE))
194 return -ENODEV;
195
196 spin_lock_irq(&qmgr_lock);
197 if (__raw_readl(&qmgr_regs->sram[queue])) {
198 err = -EBUSY;
199 goto err;
200 }
201
202 while (1) {
203 if (!(used_sram_bitmap[0] & mask[0]) &&
204 !(used_sram_bitmap[1] & mask[1]) &&
205 !(used_sram_bitmap[2] & mask[2]) &&
206 !(used_sram_bitmap[3] & mask[3]))
207 break; /* found free space */
208
209 addr++;
210 shift_mask(mask);
211 if (addr + len > ARRAY_SIZE(qmgr_regs->sram)) {
212 printk(KERN_ERR "qmgr: no free SRAM space for"
213 " queue %i\n", queue);
214 err = -ENOMEM;
215 goto err;
216 }
217 }
218
219 used_sram_bitmap[0] |= mask[0];
220 used_sram_bitmap[1] |= mask[1];
221 used_sram_bitmap[2] |= mask[2];
222 used_sram_bitmap[3] |= mask[3];
223 __raw_writel(cfg | (addr << 14), &qmgr_regs->sram[queue]);
e6da96ac
KH
224#if DEBUG_QMGR
225 snprintf(qmgr_queue_descs[queue], sizeof(qmgr_queue_descs[0]),
226 desc_format, name);
227 printk(KERN_DEBUG "qmgr: requested queue %s(%i) addr = 0x%02X\n",
228 qmgr_queue_descs[queue], queue, addr);
82a96f57 229#endif
e6da96ac 230 spin_unlock_irq(&qmgr_lock);
82a96f57
KH
231 return 0;
232
233err:
234 spin_unlock_irq(&qmgr_lock);
235 module_put(THIS_MODULE);
236 return err;
237}
238
239void qmgr_release_queue(unsigned int queue)
240{
241 u32 cfg, addr, mask[4];
242
a6a9fb85 243 BUG_ON(queue >= QUEUES); /* not in valid range */
82a96f57
KH
244
245 spin_lock_irq(&qmgr_lock);
246 cfg = __raw_readl(&qmgr_regs->sram[queue]);
247 addr = (cfg >> 14) & 0xFF;
248
249 BUG_ON(!addr); /* not requested */
250
251 switch ((cfg >> 24) & 3) {
252 case 0: mask[0] = 0x1; break;
253 case 1: mask[0] = 0x3; break;
254 case 2: mask[0] = 0xF; break;
255 case 3: mask[0] = 0xFF; break;
256 }
257
dac2f83f
KH
258 mask[1] = mask[2] = mask[3] = 0;
259
82a96f57
KH
260 while (addr--)
261 shift_mask(mask);
262
e6da96ac
KH
263#if DEBUG_QMGR
264 printk(KERN_DEBUG "qmgr: releasing queue %s(%i)\n",
265 qmgr_queue_descs[queue], queue);
266 qmgr_queue_descs[queue][0] = '\x0';
267#endif
82a96f57
KH
268 __raw_writel(0, &qmgr_regs->sram[queue]);
269
270 used_sram_bitmap[0] &= ~mask[0];
271 used_sram_bitmap[1] &= ~mask[1];
272 used_sram_bitmap[2] &= ~mask[2];
273 used_sram_bitmap[3] &= ~mask[3];
274 irq_handlers[queue] = NULL; /* catch IRQ bugs */
275 spin_unlock_irq(&qmgr_lock);
276
277 module_put(THIS_MODULE);
3edcfb29
KH
278
279 while ((addr = qmgr_get_entry(queue)))
e6da96ac 280 printk(KERN_ERR "qmgr: released queue %i not empty: 0x%08X\n",
3edcfb29 281 queue, addr);
82a96f57
KH
282}
283
284static int qmgr_init(void)
285{
286 int i, err;
d4c9e9fc
KH
287 irq_handler_t handler1, handler2;
288
82a96f57
KH
289 mem_res = request_mem_region(IXP4XX_QMGR_BASE_PHYS,
290 IXP4XX_QMGR_REGION_SIZE,
291 "IXP4xx Queue Manager");
292 if (mem_res == NULL)
293 return -EBUSY;
294
295 qmgr_regs = ioremap(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE);
296 if (qmgr_regs == NULL) {
297 err = -ENOMEM;
298 goto error_map;
299 }
300
301 /* reset qmgr registers */
302 for (i = 0; i < 4; i++) {
303 __raw_writel(0x33333333, &qmgr_regs->stat1[i]);
304 __raw_writel(0, &qmgr_regs->irqsrc[i]);
305 }
306 for (i = 0; i < 2; i++) {
307 __raw_writel(0, &qmgr_regs->stat2[i]);
308 __raw_writel(0xFFFFFFFF, &qmgr_regs->irqstat[i]); /* clear */
309 __raw_writel(0, &qmgr_regs->irqen[i]);
310 }
311
a6a9fb85
KH
312 __raw_writel(0xFFFFFFFF, &qmgr_regs->statne_h);
313 __raw_writel(0, &qmgr_regs->statf_h);
314
82a96f57
KH
315 for (i = 0; i < QUEUES; i++)
316 __raw_writel(0, &qmgr_regs->sram[i]);
317
d4c9e9fc
KH
318 if (cpu_is_ixp42x_rev_a0()) {
319 handler1 = qmgr_irq1_a0;
320 handler2 = qmgr_irq2_a0;
321 } else
322 handler1 = handler2 = qmgr_irq;
323
324 err = request_irq(IRQ_IXP4XX_QM1, handler1, 0, "IXP4xx Queue Manager",
325 NULL);
82a96f57 326 if (err) {
d4c9e9fc
KH
327 printk(KERN_ERR "qmgr: failed to request IRQ%i (%i)\n",
328 IRQ_IXP4XX_QM1, err);
82a96f57
KH
329 goto error_irq;
330 }
331
d4c9e9fc
KH
332 err = request_irq(IRQ_IXP4XX_QM2, handler2, 0, "IXP4xx Queue Manager",
333 NULL);
a6a9fb85 334 if (err) {
d4c9e9fc
KH
335 printk(KERN_ERR "qmgr: failed to request IRQ%i (%i)\n",
336 IRQ_IXP4XX_QM2, err);
a6a9fb85
KH
337 goto error_irq2;
338 }
339
82a96f57
KH
340 used_sram_bitmap[0] = 0xF; /* 4 first pages reserved for config */
341 spin_lock_init(&qmgr_lock);
342
343 printk(KERN_INFO "IXP4xx Queue Manager initialized.\n");
344 return 0;
345
a6a9fb85
KH
346error_irq2:
347 free_irq(IRQ_IXP4XX_QM1, NULL);
82a96f57
KH
348error_irq:
349 iounmap(qmgr_regs);
350error_map:
351 release_mem_region(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE);
352 return err;
353}
354
355static void qmgr_remove(void)
356{
357 free_irq(IRQ_IXP4XX_QM1, NULL);
a6a9fb85 358 free_irq(IRQ_IXP4XX_QM2, NULL);
82a96f57 359 synchronize_irq(IRQ_IXP4XX_QM1);
a6a9fb85 360 synchronize_irq(IRQ_IXP4XX_QM2);
82a96f57
KH
361 iounmap(qmgr_regs);
362 release_mem_region(IXP4XX_QMGR_BASE_PHYS, IXP4XX_QMGR_REGION_SIZE);
363}
364
365module_init(qmgr_init);
366module_exit(qmgr_remove);
367
368MODULE_LICENSE("GPL v2");
369MODULE_AUTHOR("Krzysztof Halasa");
370
371EXPORT_SYMBOL(qmgr_regs);
372EXPORT_SYMBOL(qmgr_set_irq);
373EXPORT_SYMBOL(qmgr_enable_irq);
374EXPORT_SYMBOL(qmgr_disable_irq);
e6da96ac
KH
375#if DEBUG_QMGR
376EXPORT_SYMBOL(qmgr_queue_descs);
82a96f57 377EXPORT_SYMBOL(qmgr_request_queue);
e6da96ac
KH
378#else
379EXPORT_SYMBOL(__qmgr_request_queue);
380#endif
82a96f57 381EXPORT_SYMBOL(qmgr_release_queue);