]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/sh/intc.c
sh: intc: Support virtual mappings for IRQ subgroups.
[net-next-2.6.git] / drivers / sh / intc.c
CommitLineData
02ab3f70
MD
1/*
2 * Shared interrupt handling code for IPR and INTC2 types of IRQs.
3 *
d58876e2 4 * Copyright (C) 2007, 2008 Magnus Damm
a8941dad 5 * Copyright (C) 2009, 2010 Paul Mundt
02ab3f70
MD
6 *
7 * Based on intc2.c and ipr.c
8 *
9 * Copyright (C) 1999 Niibe Yutaka & Takeshi Yaegashi
10 * Copyright (C) 2000 Kazumoto Kojima
11 * Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
12 * Copyright (C) 2003 Takashi Kusuda <kusuda-takashi@hitachi-ul.co.jp>
13 * Copyright (C) 2005, 2006 Paul Mundt
14 *
15 * This file is subject to the terms and conditions of the GNU General Public
16 * License. See the file "COPYING" in the main directory of this archive
17 * for more details.
18 */
ac422f94
PM
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
02ab3f70
MD
21#include <linux/init.h>
22#include <linux/irq.h>
23#include <linux/module.h>
24#include <linux/io.h>
5a0e3ad6 25#include <linux/slab.h>
02ab3f70 26#include <linux/interrupt.h>
bbfbd8b1 27#include <linux/sh_intc.h>
2dcec7a9
MD
28#include <linux/sysdev.h>
29#include <linux/list.h>
54ff328b 30#include <linux/topology.h>
1ce7b039 31#include <linux/bitmap.h>
a8941dad 32#include <linux/cpumask.h>
44629f57
PM
33#include <linux/spinlock.h>
34#include <linux/debugfs.h>
35#include <linux/seq_file.h>
36#include <linux/radix-tree.h>
37#include <linux/mutex.h>
c1e30ad9 38#include <linux/rcupdate.h>
43b8774d 39#include <asm/sizes.h>
73505b44
MD
40
41#define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \
42 ((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \
43 ((addr_e) << 16) | ((addr_d << 24)))
44
45#define _INTC_SHIFT(h) (h & 0x1f)
46#define _INTC_WIDTH(h) ((h >> 5) & 0xf)
47#define _INTC_FN(h) ((h >> 9) & 0xf)
48#define _INTC_MODE(h) ((h >> 13) & 0x7)
49#define _INTC_ADDR_E(h) ((h >> 16) & 0xff)
50#define _INTC_ADDR_D(h) ((h >> 24) & 0xff)
51
52struct intc_handle_int {
53 unsigned int irq;
54 unsigned long handle;
55};
02ab3f70 56
dec710b7
MD
57struct intc_window {
58 phys_addr_t phys;
59 void __iomem *virt;
60 unsigned long size;
61};
62
44629f57
PM
63struct intc_map_entry {
64 intc_enum enum_id;
65 struct intc_desc_int *desc;
66};
67
c1e30ad9
PM
68struct intc_subgroup_entry {
69 unsigned int pirq;
70 intc_enum enum_id;
71 unsigned long handle;
72};
73
73505b44 74struct intc_desc_int {
2dcec7a9
MD
75 struct list_head list;
76 struct sys_device sysdev;
44629f57 77 struct radix_tree_root tree;
7fd87b3f 78 pm_message_t state;
c1e30ad9
PM
79 spinlock_t lock;
80 unsigned int index;
73505b44 81 unsigned long *reg;
f18d533e
MD
82#ifdef CONFIG_SMP
83 unsigned long *smp;
84#endif
73505b44
MD
85 unsigned int nr_reg;
86 struct intc_handle_int *prio;
87 unsigned int nr_prio;
88 struct intc_handle_int *sense;
89 unsigned int nr_sense;
dec710b7
MD
90 struct intc_window *window;
91 unsigned int nr_windows;
73505b44
MD
92 struct irq_chip chip;
93};
02ab3f70 94
2dcec7a9 95static LIST_HEAD(intc_list);
c1e30ad9 96static unsigned int nr_intc_controllers;
2dcec7a9 97
1ce7b039
PM
98/*
99 * The intc_irq_map provides a global map of bound IRQ vectors for a
100 * given platform. Allocation of IRQs are either static through the CPU
101 * vector map, or dynamic in the case of board mux vectors or MSI.
102 *
103 * As this is a central point for all IRQ controllers on the system,
104 * each of the available sources are mapped out here. This combined with
105 * sparseirq makes it quite trivial to keep the vector map tightly packed
106 * when dynamically creating IRQs, as well as tying in to otherwise
107 * unused irq_desc positions in the sparse array.
108 */
109static DECLARE_BITMAP(intc_irq_map, NR_IRQS);
44629f57 110static struct intc_map_entry intc_irq_xlate[NR_IRQS];
1ce7b039 111static DEFINE_SPINLOCK(vector_lock);
c1e30ad9 112static DEFINE_SPINLOCK(xlate_lock);
1ce7b039 113
f18d533e
MD
114#ifdef CONFIG_SMP
115#define IS_SMP(x) x.smp
116#define INTC_REG(d, x, c) (d->reg[(x)] + ((d->smp[(x)] & 0xff) * c))
117#define SMP_NR(d, x) ((d->smp[(x)] >> 8) ? (d->smp[(x)] >> 8) : 1)
118#else
119#define IS_SMP(x) 0
120#define INTC_REG(d, x, c) (d->reg[(x)])
121#define SMP_NR(d, x) 1
122#endif
123
43b8774d
PM
124static unsigned int intc_prio_level[NR_IRQS]; /* for now */
125static unsigned int default_prio_level = 2; /* 2 - 16 */
d58876e2 126static unsigned long ack_handle[NR_IRQS];
dc825b17
PM
127#ifdef CONFIG_INTC_BALANCING
128static unsigned long dist_handle[NR_IRQS];
129#endif
02ab3f70 130
c1e30ad9
PM
131struct intc_virq_list {
132 unsigned int irq;
133 struct intc_virq_list *next;
134};
135
136#define for_each_virq(entry, head) \
137 for (entry = head; entry; entry = entry->next)
138
73505b44 139static inline struct intc_desc_int *get_intc_desc(unsigned int irq)
02ab3f70
MD
140{
141 struct irq_chip *chip = get_irq_chip(irq);
c1e30ad9 142
6000fc4d 143 return container_of(chip, struct intc_desc_int, chip);
02ab3f70
MD
144}
145
c1e30ad9
PM
146static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc)
147{
148 generic_handle_irq((unsigned int)get_irq_data(irq));
149}
150
151static inline void activate_irq(int irq)
152{
153#ifdef CONFIG_ARM
154 /* ARM requires an extra step to clear IRQ_NOREQUEST, which it
155 * sets on behalf of every irq_chip. Also sets IRQ_NOPROBE.
156 */
157 set_irq_flags(irq, IRQF_VALID);
158#else
159 /* same effect on other architectures */
160 set_irq_noprobe(irq);
161#endif
162}
163
dc825b17
PM
164static unsigned long intc_phys_to_virt(struct intc_desc_int *d,
165 unsigned long address)
166{
167 struct intc_window *window;
168 int k;
169
170 /* scan through physical windows and convert address */
171 for (k = 0; k < d->nr_windows; k++) {
172 window = d->window + k;
173
174 if (address < window->phys)
175 continue;
176
177 if (address >= (window->phys + window->size))
178 continue;
179
180 address -= window->phys;
181 address += (unsigned long)window->virt;
182
183 return address;
184 }
185
186 /* no windows defined, register must be 1:1 mapped virt:phys */
187 return address;
188}
189
190static unsigned int intc_get_reg(struct intc_desc_int *d, unsigned long address)
191{
192 unsigned int k;
193
194 address = intc_phys_to_virt(d, address);
195
196 for (k = 0; k < d->nr_reg; k++) {
197 if (d->reg[k] == address)
198 return k;
199 }
200
201 BUG();
202 return 0;
203}
204
02ab3f70
MD
205static inline unsigned int set_field(unsigned int value,
206 unsigned int field_value,
73505b44 207 unsigned int handle)
02ab3f70 208{
73505b44
MD
209 unsigned int width = _INTC_WIDTH(handle);
210 unsigned int shift = _INTC_SHIFT(handle);
211
02ab3f70
MD
212 value &= ~(((1 << width) - 1) << shift);
213 value |= field_value << shift;
214 return value;
215}
216
c1e30ad9
PM
217static inline unsigned long get_field(unsigned int value, unsigned int handle)
218{
219 unsigned int width = _INTC_WIDTH(handle);
220 unsigned int shift = _INTC_SHIFT(handle);
221 unsigned int mask = ((1 << width) - 1) << shift;
222
223 return (value & mask) >> shift;
224}
225
226static unsigned long test_8(unsigned long addr, unsigned long h,
227 unsigned long ignore)
228{
229 return get_field(__raw_readb(addr), h);
230}
231
232static unsigned long test_16(unsigned long addr, unsigned long h,
233 unsigned long ignore)
234{
235 return get_field(__raw_readw(addr), h);
236}
237
238static unsigned long test_32(unsigned long addr, unsigned long h,
239 unsigned long ignore)
240{
241 return get_field(__raw_readl(addr), h);
242}
243
244static unsigned long write_8(unsigned long addr, unsigned long h,
245 unsigned long data)
02ab3f70 246{
62429e03 247 __raw_writeb(set_field(0, data, h), addr);
6000fc4d 248 (void)__raw_readb(addr); /* Defeat write posting */
c1e30ad9 249 return 0;
02ab3f70
MD
250}
251
c1e30ad9
PM
252static unsigned long write_16(unsigned long addr, unsigned long h,
253 unsigned long data)
02ab3f70 254{
62429e03 255 __raw_writew(set_field(0, data, h), addr);
6000fc4d 256 (void)__raw_readw(addr); /* Defeat write posting */
c1e30ad9 257 return 0;
02ab3f70
MD
258}
259
c1e30ad9
PM
260static unsigned long write_32(unsigned long addr, unsigned long h,
261 unsigned long data)
02ab3f70 262{
62429e03 263 __raw_writel(set_field(0, data, h), addr);
6000fc4d 264 (void)__raw_readl(addr); /* Defeat write posting */
c1e30ad9 265 return 0;
02ab3f70
MD
266}
267
c1e30ad9
PM
268static unsigned long modify_8(unsigned long addr, unsigned long h,
269 unsigned long data)
02ab3f70 270{
4370fe1c
MD
271 unsigned long flags;
272 local_irq_save(flags);
62429e03 273 __raw_writeb(set_field(__raw_readb(addr), data, h), addr);
6000fc4d 274 (void)__raw_readb(addr); /* Defeat write posting */
4370fe1c 275 local_irq_restore(flags);
c1e30ad9 276 return 0;
02ab3f70
MD
277}
278
c1e30ad9
PM
279static unsigned long modify_16(unsigned long addr, unsigned long h,
280 unsigned long data)
02ab3f70 281{
4370fe1c
MD
282 unsigned long flags;
283 local_irq_save(flags);
62429e03 284 __raw_writew(set_field(__raw_readw(addr), data, h), addr);
6000fc4d 285 (void)__raw_readw(addr); /* Defeat write posting */
4370fe1c 286 local_irq_restore(flags);
c1e30ad9 287 return 0;
02ab3f70
MD
288}
289
c1e30ad9
PM
290static unsigned long modify_32(unsigned long addr, unsigned long h,
291 unsigned long data)
02ab3f70 292{
4370fe1c
MD
293 unsigned long flags;
294 local_irq_save(flags);
62429e03 295 __raw_writel(set_field(__raw_readl(addr), data, h), addr);
6000fc4d 296 (void)__raw_readl(addr); /* Defeat write posting */
4370fe1c 297 local_irq_restore(flags);
c1e30ad9 298 return 0;
02ab3f70
MD
299}
300
c1e30ad9
PM
301enum {
302 REG_FN_ERR = 0,
303 REG_FN_TEST_BASE = 1,
304 REG_FN_WRITE_BASE = 5,
305 REG_FN_MODIFY_BASE = 9
306};
73505b44 307
c1e30ad9
PM
308static unsigned long (*intc_reg_fns[])(unsigned long addr,
309 unsigned long h,
310 unsigned long data) = {
311 [REG_FN_TEST_BASE + 0] = test_8,
312 [REG_FN_TEST_BASE + 1] = test_16,
313 [REG_FN_TEST_BASE + 3] = test_32,
73505b44
MD
314 [REG_FN_WRITE_BASE + 0] = write_8,
315 [REG_FN_WRITE_BASE + 1] = write_16,
316 [REG_FN_WRITE_BASE + 3] = write_32,
317 [REG_FN_MODIFY_BASE + 0] = modify_8,
318 [REG_FN_MODIFY_BASE + 1] = modify_16,
319 [REG_FN_MODIFY_BASE + 3] = modify_32,
320};
02ab3f70 321
73505b44
MD
322enum { MODE_ENABLE_REG = 0, /* Bit(s) set -> interrupt enabled */
323 MODE_MASK_REG, /* Bit(s) set -> interrupt disabled */
324 MODE_DUAL_REG, /* Two registers, set bit to enable / disable */
325 MODE_PRIO_REG, /* Priority value written to enable interrupt */
326 MODE_PCLR_REG, /* Above plus all bits set to disable interrupt */
327};
02ab3f70 328
c1e30ad9
PM
329static unsigned long intc_mode_field(unsigned long addr,
330 unsigned long handle,
331 unsigned long (*fn)(unsigned long,
332 unsigned long,
333 unsigned long),
334 unsigned int irq)
02ab3f70 335{
c1e30ad9 336 return fn(addr, handle, ((1 << _INTC_WIDTH(handle)) - 1));
02ab3f70
MD
337}
338
c1e30ad9
PM
339static unsigned long intc_mode_zero(unsigned long addr,
340 unsigned long handle,
341 unsigned long (*fn)(unsigned long,
342 unsigned long,
343 unsigned long),
344 unsigned int irq)
51da6426 345{
c1e30ad9 346 return fn(addr, handle, 0);
51da6426
MD
347}
348
c1e30ad9
PM
349static unsigned long intc_mode_prio(unsigned long addr,
350 unsigned long handle,
351 unsigned long (*fn)(unsigned long,
352 unsigned long,
353 unsigned long),
354 unsigned int irq)
51da6426 355{
c1e30ad9 356 return fn(addr, handle, intc_prio_level[irq]);
51da6426
MD
357}
358
c1e30ad9
PM
359static unsigned long (*intc_enable_fns[])(unsigned long addr,
360 unsigned long handle,
361 unsigned long (*fn)(unsigned long,
362 unsigned long,
363 unsigned long),
364 unsigned int irq) = {
73505b44
MD
365 [MODE_ENABLE_REG] = intc_mode_field,
366 [MODE_MASK_REG] = intc_mode_zero,
367 [MODE_DUAL_REG] = intc_mode_field,
368 [MODE_PRIO_REG] = intc_mode_prio,
369 [MODE_PCLR_REG] = intc_mode_prio,
370};
51da6426 371
c1e30ad9 372static unsigned long (*intc_disable_fns[])(unsigned long addr,
73505b44 373 unsigned long handle,
c1e30ad9 374 unsigned long (*fn)(unsigned long,
73505b44
MD
375 unsigned long,
376 unsigned long),
377 unsigned int irq) = {
378 [MODE_ENABLE_REG] = intc_mode_zero,
379 [MODE_MASK_REG] = intc_mode_field,
380 [MODE_DUAL_REG] = intc_mode_field,
381 [MODE_PRIO_REG] = intc_mode_zero,
382 [MODE_PCLR_REG] = intc_mode_field,
383};
51da6426 384
dc825b17
PM
385#ifdef CONFIG_INTC_BALANCING
386static inline void intc_balancing_enable(unsigned int irq)
387{
388 struct intc_desc_int *d = get_intc_desc(irq);
389 unsigned long handle = dist_handle[irq];
390 unsigned long addr;
391
392 if (irq_balancing_disabled(irq) || !handle)
393 return;
394
395 addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
396 intc_reg_fns[_INTC_FN(handle)](addr, handle, 1);
397}
398
399static inline void intc_balancing_disable(unsigned int irq)
400{
401 struct intc_desc_int *d = get_intc_desc(irq);
402 unsigned long handle = dist_handle[irq];
403 unsigned long addr;
404
405 if (irq_balancing_disabled(irq) || !handle)
406 return;
407
408 addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
409 intc_reg_fns[_INTC_FN(handle)](addr, handle, 0);
410}
411
412static unsigned int intc_dist_data(struct intc_desc *desc,
413 struct intc_desc_int *d,
414 intc_enum enum_id)
415{
416 struct intc_mask_reg *mr = desc->hw.mask_regs;
417 unsigned int i, j, fn, mode;
418 unsigned long reg_e, reg_d;
419
420 for (i = 0; mr && enum_id && i < desc->hw.nr_mask_regs; i++) {
421 mr = desc->hw.mask_regs + i;
422
423 /*
424 * Skip this entry if there's no auto-distribution
425 * register associated with it.
426 */
427 if (!mr->dist_reg)
428 continue;
429
430 for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
431 if (mr->enum_ids[j] != enum_id)
432 continue;
433
434 fn = REG_FN_MODIFY_BASE;
435 mode = MODE_ENABLE_REG;
436 reg_e = mr->dist_reg;
437 reg_d = mr->dist_reg;
438
439 fn += (mr->reg_width >> 3) - 1;
440 return _INTC_MK(fn, mode,
441 intc_get_reg(d, reg_e),
442 intc_get_reg(d, reg_d),
443 1,
444 (mr->reg_width - 1) - j);
445 }
446 }
447
448 /*
449 * It's possible we've gotten here with no distribution options
450 * available for the IRQ in question, so we just skip over those.
451 */
452 return 0;
453}
454#else
455static inline void intc_balancing_enable(unsigned int irq)
456{
457}
458
459static inline void intc_balancing_disable(unsigned int irq)
460{
461}
462#endif
463
73505b44 464static inline void _intc_enable(unsigned int irq, unsigned long handle)
51da6426 465{
73505b44 466 struct intc_desc_int *d = get_intc_desc(irq);
f18d533e
MD
467 unsigned long addr;
468 unsigned int cpu;
51da6426 469
f18d533e 470 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
a8941dad
PM
471#ifdef CONFIG_SMP
472 if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity))
473 continue;
474#endif
f18d533e
MD
475 addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
476 intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\
477 [_INTC_FN(handle)], irq);
478 }
dc825b17
PM
479
480 intc_balancing_enable(irq);
51da6426
MD
481}
482
02ab3f70
MD
483static void intc_enable(unsigned int irq)
484{
73505b44 485 _intc_enable(irq, (unsigned long)get_irq_chip_data(irq));
02ab3f70
MD
486}
487
488static void intc_disable(unsigned int irq)
489{
f18d533e 490 struct intc_desc_int *d = get_intc_desc(irq);
dc825b17 491 unsigned long handle = (unsigned long)get_irq_chip_data(irq);
f18d533e
MD
492 unsigned long addr;
493 unsigned int cpu;
02ab3f70 494
dc825b17
PM
495 intc_balancing_disable(irq);
496
f18d533e 497 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
a8941dad
PM
498#ifdef CONFIG_SMP
499 if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity))
500 continue;
501#endif
f18d533e
MD
502 addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
503 intc_disable_fns[_INTC_MODE(handle)](addr, handle,intc_reg_fns\
504 [_INTC_FN(handle)], irq);
505 }
02ab3f70
MD
506}
507
c1e30ad9
PM
508static unsigned long
509(*intc_enable_noprio_fns[])(unsigned long addr,
510 unsigned long handle,
511 unsigned long (*fn)(unsigned long,
512 unsigned long,
513 unsigned long),
514 unsigned int irq) = {
d5190953
MD
515 [MODE_ENABLE_REG] = intc_mode_field,
516 [MODE_MASK_REG] = intc_mode_zero,
517 [MODE_DUAL_REG] = intc_mode_field,
518 [MODE_PRIO_REG] = intc_mode_field,
519 [MODE_PCLR_REG] = intc_mode_field,
520};
521
522static void intc_enable_disable(struct intc_desc_int *d,
523 unsigned long handle, int do_enable)
524{
525 unsigned long addr;
526 unsigned int cpu;
c1e30ad9
PM
527 unsigned long (*fn)(unsigned long, unsigned long,
528 unsigned long (*)(unsigned long, unsigned long,
529 unsigned long),
d5190953
MD
530 unsigned int);
531
532 if (do_enable) {
533 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
534 addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
535 fn = intc_enable_noprio_fns[_INTC_MODE(handle)];
536 fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0);
537 }
538 } else {
539 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
540 addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
541 fn = intc_disable_fns[_INTC_MODE(handle)];
542 fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0);
543 }
544 }
545}
546
2dcec7a9
MD
547static int intc_set_wake(unsigned int irq, unsigned int on)
548{
549 return 0; /* allow wakeup, but setup hardware in intc_suspend() */
550}
551
a8941dad
PM
552#ifdef CONFIG_SMP
553/*
554 * This is held with the irq desc lock held, so we don't require any
555 * additional locking here at the intc desc level. The affinity mask is
556 * later tested in the enable/disable paths.
557 */
558static int intc_set_affinity(unsigned int irq, const struct cpumask *cpumask)
559{
560 if (!cpumask_intersects(cpumask, cpu_online_mask))
561 return -1;
562
563 cpumask_copy(irq_to_desc(irq)->affinity, cpumask);
564
565 return 0;
566}
567#endif
568
d58876e2
MD
569static void intc_mask_ack(unsigned int irq)
570{
571 struct intc_desc_int *d = get_intc_desc(irq);
572 unsigned long handle = ack_handle[irq];
573 unsigned long addr;
574
575 intc_disable(irq);
576
dc825b17 577 /* read register and write zero only to the associated bit */
d58876e2
MD
578 if (handle) {
579 addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
6bdfb22a
YS
580 switch (_INTC_FN(handle)) {
581 case REG_FN_MODIFY_BASE + 0: /* 8bit */
62429e03
PM
582 __raw_readb(addr);
583 __raw_writeb(0xff ^ set_field(0, 1, handle), addr);
6bdfb22a
YS
584 break;
585 case REG_FN_MODIFY_BASE + 1: /* 16bit */
62429e03
PM
586 __raw_readw(addr);
587 __raw_writew(0xffff ^ set_field(0, 1, handle), addr);
6bdfb22a
YS
588 break;
589 case REG_FN_MODIFY_BASE + 3: /* 32bit */
62429e03
PM
590 __raw_readl(addr);
591 __raw_writel(0xffffffff ^ set_field(0, 1, handle), addr);
6bdfb22a
YS
592 break;
593 default:
594 BUG();
595 break;
596 }
d58876e2
MD
597 }
598}
d58876e2 599
73505b44
MD
600static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp,
601 unsigned int nr_hp,
602 unsigned int irq)
02ab3f70 603{
73505b44
MD
604 int i;
605
dc825b17
PM
606 /*
607 * this doesn't scale well, but...
3d37d94e
MD
608 *
609 * this function should only be used for cerain uncommon
610 * operations such as intc_set_priority() and intc_set_sense()
611 * and in those rare cases performance doesn't matter that much.
612 * keeping the memory footprint low is more important.
613 *
614 * one rather simple way to speed this up and still keep the
615 * memory footprint down is to make sure the array is sorted
616 * and then perform a bisect to lookup the irq.
617 */
73505b44
MD
618 for (i = 0; i < nr_hp; i++) {
619 if ((hp + i)->irq != irq)
620 continue;
621
622 return hp + i;
623 }
02ab3f70 624
73505b44 625 return NULL;
02ab3f70
MD
626}
627
73505b44 628int intc_set_priority(unsigned int irq, unsigned int prio)
02ab3f70 629{
73505b44
MD
630 struct intc_desc_int *d = get_intc_desc(irq);
631 struct intc_handle_int *ihp;
632
633 if (!intc_prio_level[irq] || prio <= 1)
634 return -EINVAL;
635
636 ihp = intc_find_irq(d->prio, d->nr_prio, irq);
637 if (ihp) {
3d37d94e 638 if (prio >= (1 << _INTC_WIDTH(ihp->handle)))
73505b44 639 return -EINVAL;
02ab3f70 640
73505b44
MD
641 intc_prio_level[irq] = prio;
642
643 /*
644 * only set secondary masking method directly
645 * primary masking method is using intc_prio_level[irq]
646 * priority level will be set during next enable()
647 */
3d37d94e 648 if (_INTC_FN(ihp->handle) != REG_FN_ERR)
73505b44
MD
649 _intc_enable(irq, ihp->handle);
650 }
651 return 0;
02ab3f70
MD
652}
653
654#define VALID(x) (x | 0x80)
655
656static unsigned char intc_irq_sense_table[IRQ_TYPE_SENSE_MASK + 1] = {
657 [IRQ_TYPE_EDGE_FALLING] = VALID(0),
658 [IRQ_TYPE_EDGE_RISING] = VALID(1),
659 [IRQ_TYPE_LEVEL_LOW] = VALID(2),
720be990
MD
660 /* SH7706, SH7707 and SH7709 do not support high level triggered */
661#if !defined(CONFIG_CPU_SUBTYPE_SH7706) && \
662 !defined(CONFIG_CPU_SUBTYPE_SH7707) && \
663 !defined(CONFIG_CPU_SUBTYPE_SH7709)
02ab3f70 664 [IRQ_TYPE_LEVEL_HIGH] = VALID(3),
720be990 665#endif
02ab3f70
MD
666};
667
668static int intc_set_sense(unsigned int irq, unsigned int type)
669{
73505b44 670 struct intc_desc_int *d = get_intc_desc(irq);
02ab3f70 671 unsigned char value = intc_irq_sense_table[type & IRQ_TYPE_SENSE_MASK];
73505b44
MD
672 struct intc_handle_int *ihp;
673 unsigned long addr;
02ab3f70 674
73505b44 675 if (!value)
02ab3f70
MD
676 return -EINVAL;
677
73505b44
MD
678 ihp = intc_find_irq(d->sense, d->nr_sense, irq);
679 if (ihp) {
f18d533e 680 addr = INTC_REG(d, _INTC_ADDR_E(ihp->handle), 0);
73505b44 681 intc_reg_fns[_INTC_FN(ihp->handle)](addr, ihp->handle, value);
02ab3f70 682 }
73505b44 683 return 0;
02ab3f70
MD
684}
685
73505b44
MD
686static intc_enum __init intc_grp_id(struct intc_desc *desc,
687 intc_enum enum_id)
680c4598 688{
577cd758 689 struct intc_group *g = desc->hw.groups;
680c4598
MD
690 unsigned int i, j;
691
577cd758
MD
692 for (i = 0; g && enum_id && i < desc->hw.nr_groups; i++) {
693 g = desc->hw.groups + i;
680c4598
MD
694
695 for (j = 0; g->enum_ids[j]; j++) {
696 if (g->enum_ids[j] != enum_id)
697 continue;
698
699 return g->enum_id;
700 }
701 }
702
703 return 0;
704}
705
d5190953
MD
706static unsigned int __init _intc_mask_data(struct intc_desc *desc,
707 struct intc_desc_int *d,
708 intc_enum enum_id,
709 unsigned int *reg_idx,
710 unsigned int *fld_idx)
02ab3f70 711{
577cd758 712 struct intc_mask_reg *mr = desc->hw.mask_regs;
d5190953 713 unsigned int fn, mode;
73505b44 714 unsigned long reg_e, reg_d;
02ab3f70 715
d5190953
MD
716 while (mr && enum_id && *reg_idx < desc->hw.nr_mask_regs) {
717 mr = desc->hw.mask_regs + *reg_idx;
02ab3f70 718
d5190953
MD
719 for (; *fld_idx < ARRAY_SIZE(mr->enum_ids); (*fld_idx)++) {
720 if (mr->enum_ids[*fld_idx] != enum_id)
02ab3f70
MD
721 continue;
722
73505b44
MD
723 if (mr->set_reg && mr->clr_reg) {
724 fn = REG_FN_WRITE_BASE;
725 mode = MODE_DUAL_REG;
726 reg_e = mr->clr_reg;
727 reg_d = mr->set_reg;
728 } else {
729 fn = REG_FN_MODIFY_BASE;
730 if (mr->set_reg) {
731 mode = MODE_ENABLE_REG;
732 reg_e = mr->set_reg;
733 reg_d = mr->set_reg;
734 } else {
735 mode = MODE_MASK_REG;
736 reg_e = mr->clr_reg;
737 reg_d = mr->clr_reg;
738 }
51da6426
MD
739 }
740
73505b44
MD
741 fn += (mr->reg_width >> 3) - 1;
742 return _INTC_MK(fn, mode,
743 intc_get_reg(d, reg_e),
744 intc_get_reg(d, reg_d),
745 1,
d5190953 746 (mr->reg_width - 1) - *fld_idx);
02ab3f70 747 }
d5190953
MD
748
749 *fld_idx = 0;
750 (*reg_idx)++;
02ab3f70
MD
751 }
752
d5190953
MD
753 return 0;
754}
755
756static unsigned int __init intc_mask_data(struct intc_desc *desc,
757 struct intc_desc_int *d,
758 intc_enum enum_id, int do_grps)
759{
760 unsigned int i = 0;
761 unsigned int j = 0;
762 unsigned int ret;
763
764 ret = _intc_mask_data(desc, d, enum_id, &i, &j);
765 if (ret)
766 return ret;
767
680c4598 768 if (do_grps)
73505b44 769 return intc_mask_data(desc, d, intc_grp_id(desc, enum_id), 0);
680c4598 770
02ab3f70
MD
771 return 0;
772}
773
d5190953
MD
774static unsigned int __init _intc_prio_data(struct intc_desc *desc,
775 struct intc_desc_int *d,
776 intc_enum enum_id,
777 unsigned int *reg_idx,
778 unsigned int *fld_idx)
02ab3f70 779{
577cd758 780 struct intc_prio_reg *pr = desc->hw.prio_regs;
d5190953 781 unsigned int fn, n, mode, bit;
73505b44 782 unsigned long reg_e, reg_d;
02ab3f70 783
d5190953
MD
784 while (pr && enum_id && *reg_idx < desc->hw.nr_prio_regs) {
785 pr = desc->hw.prio_regs + *reg_idx;
02ab3f70 786
d5190953
MD
787 for (; *fld_idx < ARRAY_SIZE(pr->enum_ids); (*fld_idx)++) {
788 if (pr->enum_ids[*fld_idx] != enum_id)
02ab3f70
MD
789 continue;
790
73505b44
MD
791 if (pr->set_reg && pr->clr_reg) {
792 fn = REG_FN_WRITE_BASE;
793 mode = MODE_PCLR_REG;
794 reg_e = pr->set_reg;
795 reg_d = pr->clr_reg;
796 } else {
797 fn = REG_FN_MODIFY_BASE;
798 mode = MODE_PRIO_REG;
799 if (!pr->set_reg)
800 BUG();
801 reg_e = pr->set_reg;
802 reg_d = pr->set_reg;
803 }
02ab3f70 804
73505b44 805 fn += (pr->reg_width >> 3) - 1;
d5190953 806 n = *fld_idx + 1;
02ab3f70 807
d5190953 808 BUG_ON(n * pr->field_width > pr->reg_width);
b21a9104 809
d5190953 810 bit = pr->reg_width - (n * pr->field_width);
02ab3f70 811
73505b44
MD
812 return _INTC_MK(fn, mode,
813 intc_get_reg(d, reg_e),
814 intc_get_reg(d, reg_d),
815 pr->field_width, bit);
02ab3f70 816 }
d5190953
MD
817
818 *fld_idx = 0;
819 (*reg_idx)++;
02ab3f70
MD
820 }
821
d5190953
MD
822 return 0;
823}
824
825static unsigned int __init intc_prio_data(struct intc_desc *desc,
826 struct intc_desc_int *d,
827 intc_enum enum_id, int do_grps)
828{
829 unsigned int i = 0;
830 unsigned int j = 0;
831 unsigned int ret;
832
833 ret = _intc_prio_data(desc, d, enum_id, &i, &j);
834 if (ret)
835 return ret;
836
680c4598 837 if (do_grps)
73505b44
MD
838 return intc_prio_data(desc, d, intc_grp_id(desc, enum_id), 0);
839
840 return 0;
841}
842
d5190953
MD
843static void __init intc_enable_disable_enum(struct intc_desc *desc,
844 struct intc_desc_int *d,
845 intc_enum enum_id, int enable)
846{
847 unsigned int i, j, data;
848
849 /* go through and enable/disable all mask bits */
850 i = j = 0;
851 do {
852 data = _intc_mask_data(desc, d, enum_id, &i, &j);
853 if (data)
854 intc_enable_disable(d, data, enable);
855 j++;
856 } while (data);
857
858 /* go through and enable/disable all priority fields */
859 i = j = 0;
860 do {
861 data = _intc_prio_data(desc, d, enum_id, &i, &j);
862 if (data)
863 intc_enable_disable(d, data, enable);
864
865 j++;
866 } while (data);
867}
868
d58876e2
MD
869static unsigned int __init intc_ack_data(struct intc_desc *desc,
870 struct intc_desc_int *d,
871 intc_enum enum_id)
872{
577cd758 873 struct intc_mask_reg *mr = desc->hw.ack_regs;
d58876e2
MD
874 unsigned int i, j, fn, mode;
875 unsigned long reg_e, reg_d;
876
577cd758
MD
877 for (i = 0; mr && enum_id && i < desc->hw.nr_ack_regs; i++) {
878 mr = desc->hw.ack_regs + i;
d58876e2
MD
879
880 for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
881 if (mr->enum_ids[j] != enum_id)
882 continue;
883
884 fn = REG_FN_MODIFY_BASE;
885 mode = MODE_ENABLE_REG;
886 reg_e = mr->set_reg;
887 reg_d = mr->set_reg;
888
889 fn += (mr->reg_width >> 3) - 1;
890 return _INTC_MK(fn, mode,
891 intc_get_reg(d, reg_e),
892 intc_get_reg(d, reg_d),
893 1,
894 (mr->reg_width - 1) - j);
895 }
896 }
897
898 return 0;
899}
d58876e2 900
73505b44
MD
901static unsigned int __init intc_sense_data(struct intc_desc *desc,
902 struct intc_desc_int *d,
903 intc_enum enum_id)
904{
577cd758 905 struct intc_sense_reg *sr = desc->hw.sense_regs;
73505b44
MD
906 unsigned int i, j, fn, bit;
907
577cd758
MD
908 for (i = 0; sr && enum_id && i < desc->hw.nr_sense_regs; i++) {
909 sr = desc->hw.sense_regs + i;
73505b44
MD
910
911 for (j = 0; j < ARRAY_SIZE(sr->enum_ids); j++) {
912 if (sr->enum_ids[j] != enum_id)
913 continue;
914
915 fn = REG_FN_MODIFY_BASE;
916 fn += (sr->reg_width >> 3) - 1;
73505b44 917
b21a9104 918 BUG_ON((j + 1) * sr->field_width > sr->reg_width);
919
920 bit = sr->reg_width - ((j + 1) * sr->field_width);
73505b44
MD
921
922 return _INTC_MK(fn, 0, intc_get_reg(d, sr->reg),
923 0, sr->field_width, bit);
924 }
925 }
680c4598 926
02ab3f70
MD
927 return 0;
928}
929
44629f57
PM
930unsigned int intc_irq_lookup(const char *chipname, intc_enum enum_id)
931{
932 struct intc_map_entry *ptr;
933 struct intc_desc_int *d;
934 unsigned int irq = 0;
935
936 list_for_each_entry(d, &intc_list, list) {
937 if (strcmp(d->chip.name, chipname) == 0) {
938 ptr = radix_tree_lookup(&d->tree, enum_id);
939 if (ptr) {
940 irq = ptr - intc_irq_xlate;
941 break;
942 }
943 }
944 }
945
946 return irq;
947}
948EXPORT_SYMBOL_GPL(intc_irq_lookup);
949
c1e30ad9
PM
950static int add_virq_to_pirq(unsigned int irq, unsigned int virq)
951{
952 struct intc_virq_list **last, *entry;
953 struct irq_desc *desc = irq_to_desc(irq);
954
955 /* scan for duplicates */
956 last = (struct intc_virq_list **)&desc->handler_data;
957 for_each_virq(entry, desc->handler_data) {
958 if (entry->irq == virq)
959 return 0;
960 last = &entry->next;
961 }
962
963 entry = kzalloc(sizeof(struct intc_virq_list), GFP_ATOMIC);
964 if (!entry) {
965 pr_err("can't allocate VIRQ mapping for %d\n", virq);
966 return -ENOMEM;
967 }
968
969 entry->irq = virq;
970
971 *last = entry;
972
973 return 0;
974}
975
976static void intc_virq_handler(unsigned int irq, struct irq_desc *desc)
977{
978 struct intc_virq_list *entry, *vlist = get_irq_data(irq);
979 struct intc_desc_int *d = get_intc_desc(irq);
980
981 desc->chip->mask_ack(irq);
982
983 for_each_virq(entry, vlist) {
984 unsigned long addr, handle;
985
986 handle = (unsigned long)get_irq_data(entry->irq);
987 addr = INTC_REG(d, _INTC_ADDR_E(handle), 0);
988
989 if (intc_reg_fns[_INTC_FN(handle)](addr, handle, 0))
990 generic_handle_irq(entry->irq);
991 }
992
993 desc->chip->unmask(irq);
994}
995
996static unsigned long __init intc_subgroup_data(struct intc_subgroup *subgroup,
997 struct intc_desc_int *d,
998 unsigned int index)
999{
1000 unsigned int fn = REG_FN_TEST_BASE + (subgroup->reg_width >> 3) - 1;
1001
1002 return _INTC_MK(fn, MODE_ENABLE_REG, intc_get_reg(d, subgroup->reg),
1003 0, 1, (subgroup->reg_width - 1) - index);
1004}
1005
1006#define INTC_TAG_VIRQ_NEEDS_ALLOC 0
1007
1008static void __init intc_subgroup_init_one(struct intc_desc *desc,
1009 struct intc_desc_int *d,
1010 struct intc_subgroup *subgroup)
1011{
1012 struct intc_map_entry *mapped;
1013 unsigned int pirq;
1014 unsigned long flags;
1015 int i;
1016
1017 mapped = radix_tree_lookup(&d->tree, subgroup->parent_id);
1018 if (!mapped) {
1019 WARN_ON(1);
1020 return;
1021 }
1022
1023 pirq = mapped - intc_irq_xlate;
1024
1025 spin_lock_irqsave(&d->lock, flags);
1026
1027 for (i = 0; i < ARRAY_SIZE(subgroup->enum_ids); i++) {
1028 struct intc_subgroup_entry *entry;
1029 int err;
1030
1031 if (!subgroup->enum_ids[i])
1032 continue;
1033
1034 entry = kmalloc(sizeof(*entry), GFP_NOWAIT);
1035 if (!entry)
1036 break;
1037
1038 entry->pirq = pirq;
1039 entry->enum_id = subgroup->enum_ids[i];
1040 entry->handle = intc_subgroup_data(subgroup, d, i);
1041
1042 err = radix_tree_insert(&d->tree, entry->enum_id, entry);
1043 if (unlikely(err < 0))
1044 break;
1045
1046 radix_tree_tag_set(&d->tree, entry->enum_id,
1047 INTC_TAG_VIRQ_NEEDS_ALLOC);
1048 }
1049
1050 spin_unlock_irqrestore(&d->lock, flags);
1051}
1052
1053static void __init intc_subgroup_init(struct intc_desc *desc,
1054 struct intc_desc_int *d)
1055{
1056 int i;
1057
1058 if (!desc->hw.subgroups)
1059 return;
1060
1061 for (i = 0; i < desc->hw.nr_subgroups; i++)
1062 intc_subgroup_init_one(desc, d, desc->hw.subgroups + i);
1063}
1064
1065static void __init intc_subgroup_map(struct intc_desc_int *d)
1066{
1067 struct intc_subgroup_entry *entries[32];
1068 unsigned long flags;
1069 unsigned int nr_found;
1070 int i;
1071
1072 spin_lock_irqsave(&d->lock, flags);
1073
1074restart:
1075 nr_found = radix_tree_gang_lookup_tag_slot(&d->tree,
1076 (void ***)entries, 0, ARRAY_SIZE(entries),
1077 INTC_TAG_VIRQ_NEEDS_ALLOC);
1078
1079 for (i = 0; i < nr_found; i++) {
1080 struct intc_subgroup_entry *entry;
1081 int irq;
1082
1083 entry = radix_tree_deref_slot((void **)entries[i]);
1084 if (unlikely(!entry))
1085 continue;
1086 if (unlikely(entry == RADIX_TREE_RETRY))
1087 goto restart;
1088
1089 irq = create_irq();
1090 if (unlikely(irq < 0)) {
1091 pr_err("no more free IRQs, bailing..\n");
1092 break;
1093 }
1094
1095 pr_info("Setting up a chained VIRQ from %d -> %d\n",
1096 irq, entry->pirq);
1097
1098 spin_lock(&xlate_lock);
1099 intc_irq_xlate[irq].desc = d;
1100 intc_irq_xlate[irq].enum_id = entry->enum_id;
1101 spin_unlock(&xlate_lock);
1102
1103 set_irq_chip_and_handler_name(irq, get_irq_chip(entry->pirq),
1104 handle_simple_irq, "virq");
1105 set_irq_chip_data(irq, get_irq_chip_data(entry->pirq));
1106
1107 set_irq_data(irq, (void *)entry->handle);
1108
1109 set_irq_chained_handler(entry->pirq, intc_virq_handler);
1110 add_virq_to_pirq(entry->pirq, irq);
1111
1112 radix_tree_tag_clear(&d->tree, entry->enum_id,
1113 INTC_TAG_VIRQ_NEEDS_ALLOC);
1114 radix_tree_replace_slot((void **)entries[i],
1115 &intc_irq_xlate[irq]);
1116 }
1117
1118 spin_unlock_irqrestore(&d->lock, flags);
1119}
1120
1121void __init intc_finalize(void)
1122{
1123 struct intc_desc_int *d;
1124
1125 list_for_each_entry(d, &intc_list, list)
1126 if (radix_tree_tagged(&d->tree, INTC_TAG_VIRQ_NEEDS_ALLOC))
1127 intc_subgroup_map(d);
1128}
1129
73505b44
MD
1130static void __init intc_register_irq(struct intc_desc *desc,
1131 struct intc_desc_int *d,
1132 intc_enum enum_id,
02ab3f70
MD
1133 unsigned int irq)
1134{
3d37d94e 1135 struct intc_handle_int *hp;
680c4598 1136 unsigned int data[2], primary;
c1e30ad9 1137 unsigned long flags;
680c4598 1138
1ce7b039 1139 /*
44629f57
PM
1140 * Register the IRQ position with the global IRQ map, then insert
1141 * it in to the radix tree.
1ce7b039
PM
1142 */
1143 set_bit(irq, intc_irq_map);
1144
c1e30ad9 1145 spin_lock_irqsave(&xlate_lock, flags);
44629f57 1146 radix_tree_insert(&d->tree, enum_id, &intc_irq_xlate[irq]);
c1e30ad9 1147 spin_unlock_irqrestore(&xlate_lock, flags);
44629f57 1148
dc825b17
PM
1149 /*
1150 * Prefer single interrupt source bitmap over other combinations:
1151 *
680c4598
MD
1152 * 1. bitmap, single interrupt source
1153 * 2. priority, single interrupt source
1154 * 3. bitmap, multiple interrupt sources (groups)
1155 * 4. priority, multiple interrupt sources (groups)
1156 */
73505b44
MD
1157 data[0] = intc_mask_data(desc, d, enum_id, 0);
1158 data[1] = intc_prio_data(desc, d, enum_id, 0);
680c4598
MD
1159
1160 primary = 0;
1161 if (!data[0] && data[1])
1162 primary = 1;
1163
bdaa6e80 1164 if (!data[0] && !data[1])
ac422f94
PM
1165 pr_warning("missing unique irq mask for irq %d (vect 0x%04x)\n",
1166 irq, irq2evt(irq));
bdaa6e80 1167
73505b44
MD
1168 data[0] = data[0] ? data[0] : intc_mask_data(desc, d, enum_id, 1);
1169 data[1] = data[1] ? data[1] : intc_prio_data(desc, d, enum_id, 1);
680c4598
MD
1170
1171 if (!data[primary])
1172 primary ^= 1;
1173
1174 BUG_ON(!data[primary]); /* must have primary masking method */
02ab3f70
MD
1175
1176 disable_irq_nosync(irq);
73505b44 1177 set_irq_chip_and_handler_name(irq, &d->chip,
02ab3f70 1178 handle_level_irq, "level");
680c4598 1179 set_irq_chip_data(irq, (void *)data[primary]);
02ab3f70 1180
dc825b17
PM
1181 /*
1182 * set priority level
7f3edee8
MD
1183 * - this needs to be at least 2 for 5-bit priorities on 7780
1184 */
43b8774d 1185 intc_prio_level[irq] = default_prio_level;
73505b44 1186
680c4598
MD
1187 /* enable secondary masking method if present */
1188 if (data[!primary])
73505b44
MD
1189 _intc_enable(irq, data[!primary]);
1190
1191 /* add irq to d->prio list if priority is available */
1192 if (data[1]) {
3d37d94e
MD
1193 hp = d->prio + d->nr_prio;
1194 hp->irq = irq;
1195 hp->handle = data[1];
1196
1197 if (primary) {
1198 /*
1199 * only secondary priority should access registers, so
1200 * set _INTC_FN(h) = REG_FN_ERR for intc_set_priority()
1201 */
3d37d94e
MD
1202 hp->handle &= ~_INTC_MK(0x0f, 0, 0, 0, 0, 0);
1203 hp->handle |= _INTC_MK(REG_FN_ERR, 0, 0, 0, 0, 0);
1204 }
73505b44
MD
1205 d->nr_prio++;
1206 }
1207
1208 /* add irq to d->sense list if sense is available */
1209 data[0] = intc_sense_data(desc, d, enum_id);
1210 if (data[0]) {
1211 (d->sense + d->nr_sense)->irq = irq;
1212 (d->sense + d->nr_sense)->handle = data[0];
1213 d->nr_sense++;
1214 }
02ab3f70
MD
1215
1216 /* irq should be disabled by default */
73505b44 1217 d->chip.mask(irq);
d58876e2 1218
577cd758 1219 if (desc->hw.ack_regs)
d58876e2 1220 ack_handle[irq] = intc_ack_data(desc, d, enum_id);
65a5b28f 1221
dc825b17
PM
1222#ifdef CONFIG_INTC_BALANCING
1223 if (desc->hw.mask_regs)
1224 dist_handle[irq] = intc_dist_data(desc, d, enum_id);
1225#endif
1226
c1e30ad9 1227 activate_irq(irq);
02ab3f70
MD
1228}
1229
f18d533e
MD
1230static unsigned int __init save_reg(struct intc_desc_int *d,
1231 unsigned int cnt,
1232 unsigned long value,
1233 unsigned int smp)
1234{
1235 if (value) {
dec710b7
MD
1236 value = intc_phys_to_virt(d, value);
1237
f18d533e
MD
1238 d->reg[cnt] = value;
1239#ifdef CONFIG_SMP
1240 d->smp[cnt] = smp;
1241#endif
1242 return 1;
1243 }
1244
1245 return 0;
1246}
1247
01e9651a 1248int __init register_intc_controller(struct intc_desc *desc)
02ab3f70 1249{
54ff328b 1250 unsigned int i, k, smp;
577cd758 1251 struct intc_hw_desc *hw = &desc->hw;
73505b44 1252 struct intc_desc_int *d;
dec710b7 1253 struct resource *res;
73505b44 1254
ac422f94 1255 pr_info("Registered controller '%s' with %u IRQs\n",
12129fea
PM
1256 desc->name, hw->nr_vectors);
1257
11b6aa95 1258 d = kzalloc(sizeof(*d), GFP_NOWAIT);
01e9651a
MD
1259 if (!d)
1260 goto err0;
73505b44 1261
2dcec7a9 1262 INIT_LIST_HEAD(&d->list);
c1e30ad9
PM
1263 list_add_tail(&d->list, &intc_list);
1264
1265 spin_lock_init(&d->lock);
1266
1267 d->index = nr_intc_controllers;
2dcec7a9 1268
dec710b7
MD
1269 if (desc->num_resources) {
1270 d->nr_windows = desc->num_resources;
1271 d->window = kzalloc(d->nr_windows * sizeof(*d->window),
1272 GFP_NOWAIT);
1273 if (!d->window)
1274 goto err1;
1275
1276 for (k = 0; k < d->nr_windows; k++) {
1277 res = desc->resource + k;
1278 WARN_ON(resource_type(res) != IORESOURCE_MEM);
1279 d->window[k].phys = res->start;
1280 d->window[k].size = resource_size(res);
1281 d->window[k].virt = ioremap_nocache(res->start,
1282 resource_size(res));
1283 if (!d->window[k].virt)
1284 goto err2;
1285 }
1286 }
1287
577cd758 1288 d->nr_reg = hw->mask_regs ? hw->nr_mask_regs * 2 : 0;
dc825b17
PM
1289#ifdef CONFIG_INTC_BALANCING
1290 if (d->nr_reg)
1291 d->nr_reg += hw->nr_mask_regs;
1292#endif
577cd758
MD
1293 d->nr_reg += hw->prio_regs ? hw->nr_prio_regs * 2 : 0;
1294 d->nr_reg += hw->sense_regs ? hw->nr_sense_regs : 0;
1295 d->nr_reg += hw->ack_regs ? hw->nr_ack_regs : 0;
c1e30ad9 1296 d->nr_reg += hw->subgroups ? hw->nr_subgroups : 0;
9b798d50 1297
11b6aa95 1298 d->reg = kzalloc(d->nr_reg * sizeof(*d->reg), GFP_NOWAIT);
01e9651a 1299 if (!d->reg)
dec710b7 1300 goto err2;
01e9651a 1301
f18d533e 1302#ifdef CONFIG_SMP
11b6aa95 1303 d->smp = kzalloc(d->nr_reg * sizeof(*d->smp), GFP_NOWAIT);
01e9651a 1304 if (!d->smp)
dec710b7 1305 goto err3;
f18d533e 1306#endif
73505b44
MD
1307 k = 0;
1308
577cd758
MD
1309 if (hw->mask_regs) {
1310 for (i = 0; i < hw->nr_mask_regs; i++) {
1311 smp = IS_SMP(hw->mask_regs[i]);
1312 k += save_reg(d, k, hw->mask_regs[i].set_reg, smp);
1313 k += save_reg(d, k, hw->mask_regs[i].clr_reg, smp);
dc825b17
PM
1314#ifdef CONFIG_INTC_BALANCING
1315 k += save_reg(d, k, hw->mask_regs[i].dist_reg, 0);
1316#endif
73505b44
MD
1317 }
1318 }
1319
577cd758
MD
1320 if (hw->prio_regs) {
1321 d->prio = kzalloc(hw->nr_vectors * sizeof(*d->prio),
1322 GFP_NOWAIT);
01e9651a 1323 if (!d->prio)
dec710b7 1324 goto err4;
73505b44 1325
577cd758
MD
1326 for (i = 0; i < hw->nr_prio_regs; i++) {
1327 smp = IS_SMP(hw->prio_regs[i]);
1328 k += save_reg(d, k, hw->prio_regs[i].set_reg, smp);
1329 k += save_reg(d, k, hw->prio_regs[i].clr_reg, smp);
73505b44
MD
1330 }
1331 }
1332
577cd758
MD
1333 if (hw->sense_regs) {
1334 d->sense = kzalloc(hw->nr_vectors * sizeof(*d->sense),
1335 GFP_NOWAIT);
01e9651a 1336 if (!d->sense)
dec710b7 1337 goto err5;
73505b44 1338
577cd758
MD
1339 for (i = 0; i < hw->nr_sense_regs; i++)
1340 k += save_reg(d, k, hw->sense_regs[i].reg, 0);
73505b44
MD
1341 }
1342
c1e30ad9
PM
1343 if (hw->subgroups)
1344 for (i = 0; i < hw->nr_subgroups; i++)
1345 if (hw->subgroups[i].reg)
1346 k+= save_reg(d, k, hw->subgroups[i].reg, 0);
1347
73505b44
MD
1348 d->chip.name = desc->name;
1349 d->chip.mask = intc_disable;
1350 d->chip.unmask = intc_enable;
1351 d->chip.mask_ack = intc_disable;
f7dd2548
MD
1352 d->chip.enable = intc_enable;
1353 d->chip.disable = intc_disable;
1354 d->chip.shutdown = intc_disable;
73505b44 1355 d->chip.set_type = intc_set_sense;
2dcec7a9 1356 d->chip.set_wake = intc_set_wake;
a8941dad
PM
1357#ifdef CONFIG_SMP
1358 d->chip.set_affinity = intc_set_affinity;
1359#endif
02ab3f70 1360
577cd758
MD
1361 if (hw->ack_regs) {
1362 for (i = 0; i < hw->nr_ack_regs; i++)
1363 k += save_reg(d, k, hw->ack_regs[i].set_reg, 0);
d58876e2
MD
1364
1365 d->chip.mask_ack = intc_mask_ack;
1366 }
d58876e2 1367
d85429a3
MD
1368 /* disable bits matching force_disable before registering irqs */
1369 if (desc->force_disable)
1370 intc_enable_disable_enum(desc, d, desc->force_disable, 0);
d5190953
MD
1371
1372 /* disable bits matching force_enable before registering irqs */
1373 if (desc->force_enable)
1374 intc_enable_disable_enum(desc, d, desc->force_enable, 0);
1375
d58876e2
MD
1376 BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */
1377
bdaa6e80 1378 /* register the vectors one by one */
577cd758
MD
1379 for (i = 0; i < hw->nr_vectors; i++) {
1380 struct intc_vect *vect = hw->vectors + i;
05ff3004 1381 unsigned int irq = evt2irq(vect->vect);
c1e30ad9 1382 unsigned long flags;
05ff3004 1383 struct irq_desc *irq_desc;
54ff328b 1384
bdaa6e80
MD
1385 if (!vect->enum_id)
1386 continue;
1387
54ff328b 1388 irq_desc = irq_to_desc_alloc_node(irq, numa_node_id());
05ff3004 1389 if (unlikely(!irq_desc)) {
12129fea 1390 pr_err("can't get irq_desc for %d\n", irq);
05ff3004
PM
1391 continue;
1392 }
1393
c1e30ad9 1394 spin_lock_irqsave(&xlate_lock, flags);
44629f57
PM
1395 intc_irq_xlate[irq].enum_id = vect->enum_id;
1396 intc_irq_xlate[irq].desc = d;
c1e30ad9 1397 spin_unlock_irqrestore(&xlate_lock, flags);
44629f57 1398
05ff3004 1399 intc_register_irq(desc, d, vect->enum_id, irq);
05ecd5a1 1400
577cd758
MD
1401 for (k = i + 1; k < hw->nr_vectors; k++) {
1402 struct intc_vect *vect2 = hw->vectors + k;
05ecd5a1
PM
1403 unsigned int irq2 = evt2irq(vect2->vect);
1404
1405 if (vect->enum_id != vect2->enum_id)
1406 continue;
1407
1279b7f1
PM
1408 /*
1409 * In the case of multi-evt handling and sparse
1410 * IRQ support, each vector still needs to have
1411 * its own backing irq_desc.
1412 */
1413 irq_desc = irq_to_desc_alloc_node(irq2, numa_node_id());
1414 if (unlikely(!irq_desc)) {
12129fea 1415 pr_err("can't get irq_desc for %d\n", irq2);
1279b7f1
PM
1416 continue;
1417 }
1418
05ecd5a1
PM
1419 vect2->enum_id = 0;
1420
1421 /* redirect this interrupts to the first one */
4d2185d9 1422 set_irq_chip(irq2, &dummy_irq_chip);
e6f07759 1423 set_irq_chained_handler(irq2, intc_redirect_irq);
05ecd5a1
PM
1424 set_irq_data(irq2, (void *)irq);
1425 }
02ab3f70 1426 }
d5190953 1427
c1e30ad9
PM
1428 intc_subgroup_init(desc, d);
1429
d5190953
MD
1430 /* enable bits matching force_enable after registering irqs */
1431 if (desc->force_enable)
1432 intc_enable_disable_enum(desc, d, desc->force_enable, 1);
01e9651a 1433
c1e30ad9
PM
1434 nr_intc_controllers++;
1435
01e9651a 1436 return 0;
dec710b7 1437err5:
01e9651a 1438 kfree(d->prio);
dec710b7 1439err4:
01e9651a
MD
1440#ifdef CONFIG_SMP
1441 kfree(d->smp);
dec710b7 1442err3:
01e9651a
MD
1443#endif
1444 kfree(d->reg);
dec710b7
MD
1445err2:
1446 for (k = 0; k < d->nr_windows; k++)
1447 if (d->window[k].virt)
1448 iounmap(d->window[k].virt);
1449
1450 kfree(d->window);
1451err1:
01e9651a 1452 kfree(d);
dec710b7 1453err0:
01e9651a
MD
1454 pr_err("unable to allocate INTC memory\n");
1455
1456 return -ENOMEM;
02ab3f70 1457}
2dcec7a9 1458
43b8774d
PM
1459#ifdef CONFIG_INTC_USERIMASK
1460static void __iomem *uimask;
1461
1462int register_intc_userimask(unsigned long addr)
1463{
1464 if (unlikely(uimask))
1465 return -EBUSY;
1466
1467 uimask = ioremap_nocache(addr, SZ_4K);
1468 if (unlikely(!uimask))
1469 return -ENOMEM;
1470
ac422f94 1471 pr_info("userimask support registered for levels 0 -> %d\n",
43b8774d
PM
1472 default_prio_level - 1);
1473
1474 return 0;
1475}
1476
1477static ssize_t
1478show_intc_userimask(struct sysdev_class *cls,
1479 struct sysdev_class_attribute *attr, char *buf)
1480{
1481 return sprintf(buf, "%d\n", (__raw_readl(uimask) >> 4) & 0xf);
1482}
1483
1484static ssize_t
1485store_intc_userimask(struct sysdev_class *cls,
1486 struct sysdev_class_attribute *attr,
1487 const char *buf, size_t count)
1488{
1489 unsigned long level;
1490
1491 level = simple_strtoul(buf, NULL, 10);
1492
1493 /*
1494 * Minimal acceptable IRQ levels are in the 2 - 16 range, but
1495 * these are chomped so as to not interfere with normal IRQs.
1496 *
1497 * Level 1 is a special case on some CPUs in that it's not
1498 * directly settable, but given that USERIMASK cuts off below a
1499 * certain level, we don't care about this limitation here.
1500 * Level 0 on the other hand equates to user masking disabled.
1501 *
1502 * We use default_prio_level as a cut off so that only special
1503 * case opt-in IRQs can be mangled.
1504 */
1505 if (level >= default_prio_level)
1506 return -EINVAL;
1507
1508 __raw_writel(0xa5 << 24 | level << 4, uimask);
1509
1510 return count;
1511}
1512
1513static SYSDEV_CLASS_ATTR(userimask, S_IRUSR | S_IWUSR,
1514 show_intc_userimask, store_intc_userimask);
1515#endif
1516
44629f57
PM
1517#ifdef CONFIG_INTC_MAPPING_DEBUG
1518static int intc_irq_xlate_debug(struct seq_file *m, void *priv)
1519{
1520 int i;
1521
1522 seq_printf(m, "%-5s %-7s %-15s\n", "irq", "enum", "chip name");
1523
1524 for (i = 1; i < nr_irqs; i++) {
1525 struct intc_desc_int *desc = intc_irq_xlate[i].desc;
1526
1527 if (!desc)
1528 continue;
1529
1530 seq_printf(m, "%5d ", i);
1531 seq_printf(m, "0x%05x ", intc_irq_xlate[i].enum_id);
1532 seq_printf(m, "%-15s\n", desc->chip.name);
1533 }
1534
1535 return 0;
1536}
1537
1538static int intc_irq_xlate_open(struct inode *inode, struct file *file)
1539{
1540 return single_open(file, intc_irq_xlate_debug, inode->i_private);
1541}
1542
1543static const struct file_operations intc_irq_xlate_fops = {
1544 .open = intc_irq_xlate_open,
1545 .read = seq_read,
1546 .llseek = seq_lseek,
1547 .release = single_release,
1548};
1549
1550static int __init intc_irq_xlate_init(void)
1551{
1552 /*
1553 * XXX.. use arch_debugfs_dir here when all of the intc users are
1554 * converted.
1555 */
1556 if (debugfs_create_file("intc_irq_xlate", S_IRUGO, NULL, NULL,
1557 &intc_irq_xlate_fops) == NULL)
1558 return -ENOMEM;
1559
1560 return 0;
1561}
1562fs_initcall(intc_irq_xlate_init);
1563#endif
1564
0ded7542
PM
1565static ssize_t
1566show_intc_name(struct sys_device *dev, struct sysdev_attribute *attr, char *buf)
1567{
1568 struct intc_desc_int *d;
1569
1570 d = container_of(dev, struct intc_desc_int, sysdev);
1571
1572 return sprintf(buf, "%s\n", d->chip.name);
1573}
1574
1575static SYSDEV_ATTR(name, S_IRUGO, show_intc_name, NULL);
1576
2dcec7a9
MD
1577static int intc_suspend(struct sys_device *dev, pm_message_t state)
1578{
1579 struct intc_desc_int *d;
1580 struct irq_desc *desc;
1581 int irq;
1582
1583 /* get intc controller associated with this sysdev */
1584 d = container_of(dev, struct intc_desc_int, sysdev);
1585
7fd87b3f
FV
1586 switch (state.event) {
1587 case PM_EVENT_ON:
1588 if (d->state.event != PM_EVENT_FREEZE)
1589 break;
1590 for_each_irq_desc(irq, desc) {
87a705dd 1591 if (desc->handle_irq == intc_redirect_irq)
0a753d58 1592 continue;
7fd87b3f
FV
1593 if (desc->chip != &d->chip)
1594 continue;
1595 if (desc->status & IRQ_DISABLED)
1596 intc_disable(irq);
1597 else
1598 intc_enable(irq);
1599 }
1600 break;
1601 case PM_EVENT_FREEZE:
1602 /* nothing has to be done */
1603 break;
1604 case PM_EVENT_SUSPEND:
1605 /* enable wakeup irqs belonging to this intc controller */
1606 for_each_irq_desc(irq, desc) {
1607 if ((desc->status & IRQ_WAKEUP) && (desc->chip == &d->chip))
1608 intc_enable(irq);
1609 }
1610 break;
2dcec7a9 1611 }
7fd87b3f 1612 d->state = state;
2dcec7a9
MD
1613
1614 return 0;
1615}
1616
7fd87b3f
FV
1617static int intc_resume(struct sys_device *dev)
1618{
1619 return intc_suspend(dev, PMSG_ON);
1620}
1621
2dcec7a9
MD
1622static struct sysdev_class intc_sysdev_class = {
1623 .name = "intc",
1624 .suspend = intc_suspend,
7fd87b3f 1625 .resume = intc_resume,
2dcec7a9
MD
1626};
1627
1628/* register this intc as sysdev to allow suspend/resume */
1629static int __init register_intc_sysdevs(void)
1630{
1631 struct intc_desc_int *d;
1632 int error;
2dcec7a9
MD
1633
1634 error = sysdev_class_register(&intc_sysdev_class);
43b8774d
PM
1635#ifdef CONFIG_INTC_USERIMASK
1636 if (!error && uimask)
1637 error = sysdev_class_create_file(&intc_sysdev_class,
1638 &attr_userimask);
1639#endif
2dcec7a9
MD
1640 if (!error) {
1641 list_for_each_entry(d, &intc_list, list) {
c1e30ad9 1642 d->sysdev.id = d->index;
2dcec7a9
MD
1643 d->sysdev.cls = &intc_sysdev_class;
1644 error = sysdev_register(&d->sysdev);
0ded7542
PM
1645 if (error == 0)
1646 error = sysdev_create_file(&d->sysdev,
1647 &attr_name);
2dcec7a9
MD
1648 if (error)
1649 break;
2dcec7a9
MD
1650 }
1651 }
1652
1653 if (error)
ac422f94 1654 pr_err("sysdev registration error\n");
2dcec7a9
MD
1655
1656 return error;
1657}
2dcec7a9 1658device_initcall(register_intc_sysdevs);
1ce7b039
PM
1659
1660/*
1661 * Dynamic IRQ allocation and deallocation
1662 */
e9867c56 1663unsigned int create_irq_nr(unsigned int irq_want, int node)
1ce7b039
PM
1664{
1665 unsigned int irq = 0, new;
1666 unsigned long flags;
1667 struct irq_desc *desc;
1668
1669 spin_lock_irqsave(&vector_lock, flags);
1670
1671 /*
e9867c56 1672 * First try the wanted IRQ
1ce7b039 1673 */
e9867c56
PM
1674 if (test_and_set_bit(irq_want, intc_irq_map) == 0) {
1675 new = irq_want;
1676 } else {
1677 /* .. then fall back to scanning. */
1ce7b039
PM
1678 new = find_first_zero_bit(intc_irq_map, nr_irqs);
1679 if (unlikely(new == nr_irqs))
1680 goto out_unlock;
1681
1ce7b039 1682 __set_bit(new, intc_irq_map);
1ce7b039
PM
1683 }
1684
e9867c56
PM
1685 desc = irq_to_desc_alloc_node(new, node);
1686 if (unlikely(!desc)) {
12129fea 1687 pr_err("can't get irq_desc for %d\n", new);
e9867c56
PM
1688 goto out_unlock;
1689 }
1690
1691 desc = move_irq_desc(desc, node);
1692 irq = new;
1693
1ce7b039
PM
1694out_unlock:
1695 spin_unlock_irqrestore(&vector_lock, flags);
1696
65a5b28f 1697 if (irq > 0) {
1ce7b039 1698 dynamic_irq_init(irq);
c1e30ad9 1699 activate_irq(irq);
65a5b28f 1700 }
1ce7b039
PM
1701
1702 return irq;
1703}
1704
1705int create_irq(void)
1706{
1707 int nid = cpu_to_node(smp_processor_id());
1708 int irq;
1709
e9867c56 1710 irq = create_irq_nr(NR_IRQS_LEGACY, nid);
1ce7b039
PM
1711 if (irq == 0)
1712 irq = -1;
1713
1714 return irq;
1715}
1716
1717void destroy_irq(unsigned int irq)
1718{
1719 unsigned long flags;
1720
1721 dynamic_irq_cleanup(irq);
1722
1723 spin_lock_irqsave(&vector_lock, flags);
1724 __clear_bit(irq, intc_irq_map);
1725 spin_unlock_irqrestore(&vector_lock, flags);
1726}
45b9deaf
PM
1727
1728int reserve_irq_vector(unsigned int irq)
1729{
1730 unsigned long flags;
1731 int ret = 0;
1732
1733 spin_lock_irqsave(&vector_lock, flags);
1734 if (test_and_set_bit(irq, intc_irq_map))
1735 ret = -EBUSY;
1736 spin_unlock_irqrestore(&vector_lock, flags);
1737
1738 return ret;
1739}
1740
4bacd796
PM
1741void reserve_intc_vectors(struct intc_vect *vectors, unsigned int nr_vecs)
1742{
1743 unsigned long flags;
1744 int i;
1745
1746 spin_lock_irqsave(&vector_lock, flags);
1747 for (i = 0; i < nr_vecs; i++)
1748 __set_bit(evt2irq(vectors[i].vect), intc_irq_map);
1749 spin_unlock_irqrestore(&vector_lock, flags);
1750}
1751
45b9deaf
PM
1752void reserve_irq_legacy(void)
1753{
1754 unsigned long flags;
1755 int i, j;
1756
1757 spin_lock_irqsave(&vector_lock, flags);
1758 j = find_first_bit(intc_irq_map, nr_irqs);
1759 for (i = 0; i < j; i++)
1760 __set_bit(i, intc_irq_map);
1761 spin_unlock_irqrestore(&vector_lock, flags);
1762}