]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/sh/intc.c
sh: intc: Implement reverse mapping for IRQs to per-controller IDs.
[net-next-2.6.git] / drivers / sh / intc.c
CommitLineData
02ab3f70
MD
1/*
2 * Shared interrupt handling code for IPR and INTC2 types of IRQs.
3 *
d58876e2 4 * Copyright (C) 2007, 2008 Magnus Damm
a8941dad 5 * Copyright (C) 2009, 2010 Paul Mundt
02ab3f70
MD
6 *
7 * Based on intc2.c and ipr.c
8 *
9 * Copyright (C) 1999 Niibe Yutaka & Takeshi Yaegashi
10 * Copyright (C) 2000 Kazumoto Kojima
11 * Copyright (C) 2001 David J. Mckay (david.mckay@st.com)
12 * Copyright (C) 2003 Takashi Kusuda <kusuda-takashi@hitachi-ul.co.jp>
13 * Copyright (C) 2005, 2006 Paul Mundt
14 *
15 * This file is subject to the terms and conditions of the GNU General Public
16 * License. See the file "COPYING" in the main directory of this archive
17 * for more details.
18 */
ac422f94
PM
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
02ab3f70
MD
21#include <linux/init.h>
22#include <linux/irq.h>
23#include <linux/module.h>
24#include <linux/io.h>
5a0e3ad6 25#include <linux/slab.h>
02ab3f70 26#include <linux/interrupt.h>
bbfbd8b1 27#include <linux/sh_intc.h>
2dcec7a9
MD
28#include <linux/sysdev.h>
29#include <linux/list.h>
54ff328b 30#include <linux/topology.h>
1ce7b039 31#include <linux/bitmap.h>
a8941dad 32#include <linux/cpumask.h>
44629f57
PM
33#include <linux/spinlock.h>
34#include <linux/debugfs.h>
35#include <linux/seq_file.h>
36#include <linux/radix-tree.h>
37#include <linux/mutex.h>
43b8774d 38#include <asm/sizes.h>
73505b44
MD
39
40#define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \
41 ((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \
42 ((addr_e) << 16) | ((addr_d << 24)))
43
44#define _INTC_SHIFT(h) (h & 0x1f)
45#define _INTC_WIDTH(h) ((h >> 5) & 0xf)
46#define _INTC_FN(h) ((h >> 9) & 0xf)
47#define _INTC_MODE(h) ((h >> 13) & 0x7)
48#define _INTC_ADDR_E(h) ((h >> 16) & 0xff)
49#define _INTC_ADDR_D(h) ((h >> 24) & 0xff)
50
51struct intc_handle_int {
52 unsigned int irq;
53 unsigned long handle;
54};
02ab3f70 55
dec710b7
MD
56struct intc_window {
57 phys_addr_t phys;
58 void __iomem *virt;
59 unsigned long size;
60};
61
44629f57
PM
62struct intc_map_entry {
63 intc_enum enum_id;
64 struct intc_desc_int *desc;
65};
66
73505b44 67struct intc_desc_int {
2dcec7a9
MD
68 struct list_head list;
69 struct sys_device sysdev;
44629f57 70 struct radix_tree_root tree;
7fd87b3f 71 pm_message_t state;
73505b44 72 unsigned long *reg;
f18d533e
MD
73#ifdef CONFIG_SMP
74 unsigned long *smp;
75#endif
73505b44
MD
76 unsigned int nr_reg;
77 struct intc_handle_int *prio;
78 unsigned int nr_prio;
79 struct intc_handle_int *sense;
80 unsigned int nr_sense;
dec710b7
MD
81 struct intc_window *window;
82 unsigned int nr_windows;
73505b44
MD
83 struct irq_chip chip;
84};
02ab3f70 85
2dcec7a9
MD
86static LIST_HEAD(intc_list);
87
1ce7b039
PM
88/*
89 * The intc_irq_map provides a global map of bound IRQ vectors for a
90 * given platform. Allocation of IRQs are either static through the CPU
91 * vector map, or dynamic in the case of board mux vectors or MSI.
92 *
93 * As this is a central point for all IRQ controllers on the system,
94 * each of the available sources are mapped out here. This combined with
95 * sparseirq makes it quite trivial to keep the vector map tightly packed
96 * when dynamically creating IRQs, as well as tying in to otherwise
97 * unused irq_desc positions in the sparse array.
98 */
99static DECLARE_BITMAP(intc_irq_map, NR_IRQS);
44629f57 100static struct intc_map_entry intc_irq_xlate[NR_IRQS];
1ce7b039 101static DEFINE_SPINLOCK(vector_lock);
44629f57 102static DEFINE_MUTEX(irq_xlate_mutex);
1ce7b039 103
f18d533e
MD
104#ifdef CONFIG_SMP
105#define IS_SMP(x) x.smp
106#define INTC_REG(d, x, c) (d->reg[(x)] + ((d->smp[(x)] & 0xff) * c))
107#define SMP_NR(d, x) ((d->smp[(x)] >> 8) ? (d->smp[(x)] >> 8) : 1)
108#else
109#define IS_SMP(x) 0
110#define INTC_REG(d, x, c) (d->reg[(x)])
111#define SMP_NR(d, x) 1
112#endif
113
43b8774d
PM
114static unsigned int intc_prio_level[NR_IRQS]; /* for now */
115static unsigned int default_prio_level = 2; /* 2 - 16 */
d58876e2 116static unsigned long ack_handle[NR_IRQS];
dc825b17
PM
117#ifdef CONFIG_INTC_BALANCING
118static unsigned long dist_handle[NR_IRQS];
119#endif
02ab3f70 120
73505b44 121static inline struct intc_desc_int *get_intc_desc(unsigned int irq)
02ab3f70
MD
122{
123 struct irq_chip *chip = get_irq_chip(irq);
6000fc4d 124 return container_of(chip, struct intc_desc_int, chip);
02ab3f70
MD
125}
126
dc825b17
PM
127static unsigned long intc_phys_to_virt(struct intc_desc_int *d,
128 unsigned long address)
129{
130 struct intc_window *window;
131 int k;
132
133 /* scan through physical windows and convert address */
134 for (k = 0; k < d->nr_windows; k++) {
135 window = d->window + k;
136
137 if (address < window->phys)
138 continue;
139
140 if (address >= (window->phys + window->size))
141 continue;
142
143 address -= window->phys;
144 address += (unsigned long)window->virt;
145
146 return address;
147 }
148
149 /* no windows defined, register must be 1:1 mapped virt:phys */
150 return address;
151}
152
153static unsigned int intc_get_reg(struct intc_desc_int *d, unsigned long address)
154{
155 unsigned int k;
156
157 address = intc_phys_to_virt(d, address);
158
159 for (k = 0; k < d->nr_reg; k++) {
160 if (d->reg[k] == address)
161 return k;
162 }
163
164 BUG();
165 return 0;
166}
167
02ab3f70
MD
168static inline unsigned int set_field(unsigned int value,
169 unsigned int field_value,
73505b44 170 unsigned int handle)
02ab3f70 171{
73505b44
MD
172 unsigned int width = _INTC_WIDTH(handle);
173 unsigned int shift = _INTC_SHIFT(handle);
174
02ab3f70
MD
175 value &= ~(((1 << width) - 1) << shift);
176 value |= field_value << shift;
177 return value;
178}
179
73505b44 180static void write_8(unsigned long addr, unsigned long h, unsigned long data)
02ab3f70 181{
62429e03 182 __raw_writeb(set_field(0, data, h), addr);
6000fc4d 183 (void)__raw_readb(addr); /* Defeat write posting */
02ab3f70
MD
184}
185
73505b44 186static void write_16(unsigned long addr, unsigned long h, unsigned long data)
02ab3f70 187{
62429e03 188 __raw_writew(set_field(0, data, h), addr);
6000fc4d 189 (void)__raw_readw(addr); /* Defeat write posting */
02ab3f70
MD
190}
191
73505b44 192static void write_32(unsigned long addr, unsigned long h, unsigned long data)
02ab3f70 193{
62429e03 194 __raw_writel(set_field(0, data, h), addr);
6000fc4d 195 (void)__raw_readl(addr); /* Defeat write posting */
02ab3f70
MD
196}
197
73505b44 198static void modify_8(unsigned long addr, unsigned long h, unsigned long data)
02ab3f70 199{
4370fe1c
MD
200 unsigned long flags;
201 local_irq_save(flags);
62429e03 202 __raw_writeb(set_field(__raw_readb(addr), data, h), addr);
6000fc4d 203 (void)__raw_readb(addr); /* Defeat write posting */
4370fe1c 204 local_irq_restore(flags);
02ab3f70
MD
205}
206
73505b44 207static void modify_16(unsigned long addr, unsigned long h, unsigned long data)
02ab3f70 208{
4370fe1c
MD
209 unsigned long flags;
210 local_irq_save(flags);
62429e03 211 __raw_writew(set_field(__raw_readw(addr), data, h), addr);
6000fc4d 212 (void)__raw_readw(addr); /* Defeat write posting */
4370fe1c 213 local_irq_restore(flags);
02ab3f70
MD
214}
215
73505b44 216static void modify_32(unsigned long addr, unsigned long h, unsigned long data)
02ab3f70 217{
4370fe1c
MD
218 unsigned long flags;
219 local_irq_save(flags);
62429e03 220 __raw_writel(set_field(__raw_readl(addr), data, h), addr);
6000fc4d 221 (void)__raw_readl(addr); /* Defeat write posting */
4370fe1c 222 local_irq_restore(flags);
02ab3f70
MD
223}
224
73505b44
MD
225enum { REG_FN_ERR = 0, REG_FN_WRITE_BASE = 1, REG_FN_MODIFY_BASE = 5 };
226
227static void (*intc_reg_fns[])(unsigned long addr,
228 unsigned long h,
229 unsigned long data) = {
230 [REG_FN_WRITE_BASE + 0] = write_8,
231 [REG_FN_WRITE_BASE + 1] = write_16,
232 [REG_FN_WRITE_BASE + 3] = write_32,
233 [REG_FN_MODIFY_BASE + 0] = modify_8,
234 [REG_FN_MODIFY_BASE + 1] = modify_16,
235 [REG_FN_MODIFY_BASE + 3] = modify_32,
236};
02ab3f70 237
73505b44
MD
238enum { MODE_ENABLE_REG = 0, /* Bit(s) set -> interrupt enabled */
239 MODE_MASK_REG, /* Bit(s) set -> interrupt disabled */
240 MODE_DUAL_REG, /* Two registers, set bit to enable / disable */
241 MODE_PRIO_REG, /* Priority value written to enable interrupt */
242 MODE_PCLR_REG, /* Above plus all bits set to disable interrupt */
243};
02ab3f70 244
73505b44
MD
245static void intc_mode_field(unsigned long addr,
246 unsigned long handle,
247 void (*fn)(unsigned long,
248 unsigned long,
249 unsigned long),
250 unsigned int irq)
02ab3f70 251{
73505b44 252 fn(addr, handle, ((1 << _INTC_WIDTH(handle)) - 1));
02ab3f70
MD
253}
254
73505b44
MD
255static void intc_mode_zero(unsigned long addr,
256 unsigned long handle,
257 void (*fn)(unsigned long,
258 unsigned long,
259 unsigned long),
260 unsigned int irq)
51da6426 261{
73505b44 262 fn(addr, handle, 0);
51da6426
MD
263}
264
73505b44
MD
265static void intc_mode_prio(unsigned long addr,
266 unsigned long handle,
267 void (*fn)(unsigned long,
268 unsigned long,
269 unsigned long),
270 unsigned int irq)
51da6426 271{
73505b44 272 fn(addr, handle, intc_prio_level[irq]);
51da6426
MD
273}
274
73505b44
MD
275static void (*intc_enable_fns[])(unsigned long addr,
276 unsigned long handle,
277 void (*fn)(unsigned long,
278 unsigned long,
279 unsigned long),
280 unsigned int irq) = {
281 [MODE_ENABLE_REG] = intc_mode_field,
282 [MODE_MASK_REG] = intc_mode_zero,
283 [MODE_DUAL_REG] = intc_mode_field,
284 [MODE_PRIO_REG] = intc_mode_prio,
285 [MODE_PCLR_REG] = intc_mode_prio,
286};
51da6426 287
73505b44
MD
288static void (*intc_disable_fns[])(unsigned long addr,
289 unsigned long handle,
290 void (*fn)(unsigned long,
291 unsigned long,
292 unsigned long),
293 unsigned int irq) = {
294 [MODE_ENABLE_REG] = intc_mode_zero,
295 [MODE_MASK_REG] = intc_mode_field,
296 [MODE_DUAL_REG] = intc_mode_field,
297 [MODE_PRIO_REG] = intc_mode_zero,
298 [MODE_PCLR_REG] = intc_mode_field,
299};
51da6426 300
dc825b17
PM
301#ifdef CONFIG_INTC_BALANCING
302static inline void intc_balancing_enable(unsigned int irq)
303{
304 struct intc_desc_int *d = get_intc_desc(irq);
305 unsigned long handle = dist_handle[irq];
306 unsigned long addr;
307
308 if (irq_balancing_disabled(irq) || !handle)
309 return;
310
311 addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
312 intc_reg_fns[_INTC_FN(handle)](addr, handle, 1);
313}
314
315static inline void intc_balancing_disable(unsigned int irq)
316{
317 struct intc_desc_int *d = get_intc_desc(irq);
318 unsigned long handle = dist_handle[irq];
319 unsigned long addr;
320
321 if (irq_balancing_disabled(irq) || !handle)
322 return;
323
324 addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
325 intc_reg_fns[_INTC_FN(handle)](addr, handle, 0);
326}
327
328static unsigned int intc_dist_data(struct intc_desc *desc,
329 struct intc_desc_int *d,
330 intc_enum enum_id)
331{
332 struct intc_mask_reg *mr = desc->hw.mask_regs;
333 unsigned int i, j, fn, mode;
334 unsigned long reg_e, reg_d;
335
336 for (i = 0; mr && enum_id && i < desc->hw.nr_mask_regs; i++) {
337 mr = desc->hw.mask_regs + i;
338
339 /*
340 * Skip this entry if there's no auto-distribution
341 * register associated with it.
342 */
343 if (!mr->dist_reg)
344 continue;
345
346 for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
347 if (mr->enum_ids[j] != enum_id)
348 continue;
349
350 fn = REG_FN_MODIFY_BASE;
351 mode = MODE_ENABLE_REG;
352 reg_e = mr->dist_reg;
353 reg_d = mr->dist_reg;
354
355 fn += (mr->reg_width >> 3) - 1;
356 return _INTC_MK(fn, mode,
357 intc_get_reg(d, reg_e),
358 intc_get_reg(d, reg_d),
359 1,
360 (mr->reg_width - 1) - j);
361 }
362 }
363
364 /*
365 * It's possible we've gotten here with no distribution options
366 * available for the IRQ in question, so we just skip over those.
367 */
368 return 0;
369}
370#else
371static inline void intc_balancing_enable(unsigned int irq)
372{
373}
374
375static inline void intc_balancing_disable(unsigned int irq)
376{
377}
378#endif
379
73505b44 380static inline void _intc_enable(unsigned int irq, unsigned long handle)
51da6426 381{
73505b44 382 struct intc_desc_int *d = get_intc_desc(irq);
f18d533e
MD
383 unsigned long addr;
384 unsigned int cpu;
51da6426 385
f18d533e 386 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
a8941dad
PM
387#ifdef CONFIG_SMP
388 if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity))
389 continue;
390#endif
f18d533e
MD
391 addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
392 intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\
393 [_INTC_FN(handle)], irq);
394 }
dc825b17
PM
395
396 intc_balancing_enable(irq);
51da6426
MD
397}
398
02ab3f70
MD
399static void intc_enable(unsigned int irq)
400{
73505b44 401 _intc_enable(irq, (unsigned long)get_irq_chip_data(irq));
02ab3f70
MD
402}
403
404static void intc_disable(unsigned int irq)
405{
f18d533e 406 struct intc_desc_int *d = get_intc_desc(irq);
dc825b17 407 unsigned long handle = (unsigned long)get_irq_chip_data(irq);
f18d533e
MD
408 unsigned long addr;
409 unsigned int cpu;
02ab3f70 410
dc825b17
PM
411 intc_balancing_disable(irq);
412
f18d533e 413 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
a8941dad
PM
414#ifdef CONFIG_SMP
415 if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity))
416 continue;
417#endif
f18d533e
MD
418 addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
419 intc_disable_fns[_INTC_MODE(handle)](addr, handle,intc_reg_fns\
420 [_INTC_FN(handle)], irq);
421 }
02ab3f70
MD
422}
423
d5190953
MD
424static void (*intc_enable_noprio_fns[])(unsigned long addr,
425 unsigned long handle,
426 void (*fn)(unsigned long,
427 unsigned long,
428 unsigned long),
429 unsigned int irq) = {
430 [MODE_ENABLE_REG] = intc_mode_field,
431 [MODE_MASK_REG] = intc_mode_zero,
432 [MODE_DUAL_REG] = intc_mode_field,
433 [MODE_PRIO_REG] = intc_mode_field,
434 [MODE_PCLR_REG] = intc_mode_field,
435};
436
437static void intc_enable_disable(struct intc_desc_int *d,
438 unsigned long handle, int do_enable)
439{
440 unsigned long addr;
441 unsigned int cpu;
442 void (*fn)(unsigned long, unsigned long,
443 void (*)(unsigned long, unsigned long, unsigned long),
444 unsigned int);
445
446 if (do_enable) {
447 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
448 addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
449 fn = intc_enable_noprio_fns[_INTC_MODE(handle)];
450 fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0);
451 }
452 } else {
453 for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
454 addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
455 fn = intc_disable_fns[_INTC_MODE(handle)];
456 fn(addr, handle, intc_reg_fns[_INTC_FN(handle)], 0);
457 }
458 }
459}
460
2dcec7a9
MD
461static int intc_set_wake(unsigned int irq, unsigned int on)
462{
463 return 0; /* allow wakeup, but setup hardware in intc_suspend() */
464}
465
a8941dad
PM
466#ifdef CONFIG_SMP
467/*
468 * This is held with the irq desc lock held, so we don't require any
469 * additional locking here at the intc desc level. The affinity mask is
470 * later tested in the enable/disable paths.
471 */
472static int intc_set_affinity(unsigned int irq, const struct cpumask *cpumask)
473{
474 if (!cpumask_intersects(cpumask, cpu_online_mask))
475 return -1;
476
477 cpumask_copy(irq_to_desc(irq)->affinity, cpumask);
478
479 return 0;
480}
481#endif
482
d58876e2
MD
483static void intc_mask_ack(unsigned int irq)
484{
485 struct intc_desc_int *d = get_intc_desc(irq);
486 unsigned long handle = ack_handle[irq];
487 unsigned long addr;
488
489 intc_disable(irq);
490
dc825b17 491 /* read register and write zero only to the associated bit */
d58876e2
MD
492 if (handle) {
493 addr = INTC_REG(d, _INTC_ADDR_D(handle), 0);
6bdfb22a
YS
494 switch (_INTC_FN(handle)) {
495 case REG_FN_MODIFY_BASE + 0: /* 8bit */
62429e03
PM
496 __raw_readb(addr);
497 __raw_writeb(0xff ^ set_field(0, 1, handle), addr);
6bdfb22a
YS
498 break;
499 case REG_FN_MODIFY_BASE + 1: /* 16bit */
62429e03
PM
500 __raw_readw(addr);
501 __raw_writew(0xffff ^ set_field(0, 1, handle), addr);
6bdfb22a
YS
502 break;
503 case REG_FN_MODIFY_BASE + 3: /* 32bit */
62429e03
PM
504 __raw_readl(addr);
505 __raw_writel(0xffffffff ^ set_field(0, 1, handle), addr);
6bdfb22a
YS
506 break;
507 default:
508 BUG();
509 break;
510 }
d58876e2
MD
511 }
512}
d58876e2 513
73505b44
MD
514static struct intc_handle_int *intc_find_irq(struct intc_handle_int *hp,
515 unsigned int nr_hp,
516 unsigned int irq)
02ab3f70 517{
73505b44
MD
518 int i;
519
dc825b17
PM
520 /*
521 * this doesn't scale well, but...
3d37d94e
MD
522 *
523 * this function should only be used for cerain uncommon
524 * operations such as intc_set_priority() and intc_set_sense()
525 * and in those rare cases performance doesn't matter that much.
526 * keeping the memory footprint low is more important.
527 *
528 * one rather simple way to speed this up and still keep the
529 * memory footprint down is to make sure the array is sorted
530 * and then perform a bisect to lookup the irq.
531 */
73505b44
MD
532 for (i = 0; i < nr_hp; i++) {
533 if ((hp + i)->irq != irq)
534 continue;
535
536 return hp + i;
537 }
02ab3f70 538
73505b44 539 return NULL;
02ab3f70
MD
540}
541
73505b44 542int intc_set_priority(unsigned int irq, unsigned int prio)
02ab3f70 543{
73505b44
MD
544 struct intc_desc_int *d = get_intc_desc(irq);
545 struct intc_handle_int *ihp;
546
547 if (!intc_prio_level[irq] || prio <= 1)
548 return -EINVAL;
549
550 ihp = intc_find_irq(d->prio, d->nr_prio, irq);
551 if (ihp) {
3d37d94e 552 if (prio >= (1 << _INTC_WIDTH(ihp->handle)))
73505b44 553 return -EINVAL;
02ab3f70 554
73505b44
MD
555 intc_prio_level[irq] = prio;
556
557 /*
558 * only set secondary masking method directly
559 * primary masking method is using intc_prio_level[irq]
560 * priority level will be set during next enable()
561 */
3d37d94e 562 if (_INTC_FN(ihp->handle) != REG_FN_ERR)
73505b44
MD
563 _intc_enable(irq, ihp->handle);
564 }
565 return 0;
02ab3f70
MD
566}
567
568#define VALID(x) (x | 0x80)
569
570static unsigned char intc_irq_sense_table[IRQ_TYPE_SENSE_MASK + 1] = {
571 [IRQ_TYPE_EDGE_FALLING] = VALID(0),
572 [IRQ_TYPE_EDGE_RISING] = VALID(1),
573 [IRQ_TYPE_LEVEL_LOW] = VALID(2),
720be990
MD
574 /* SH7706, SH7707 and SH7709 do not support high level triggered */
575#if !defined(CONFIG_CPU_SUBTYPE_SH7706) && \
576 !defined(CONFIG_CPU_SUBTYPE_SH7707) && \
577 !defined(CONFIG_CPU_SUBTYPE_SH7709)
02ab3f70 578 [IRQ_TYPE_LEVEL_HIGH] = VALID(3),
720be990 579#endif
02ab3f70
MD
580};
581
582static int intc_set_sense(unsigned int irq, unsigned int type)
583{
73505b44 584 struct intc_desc_int *d = get_intc_desc(irq);
02ab3f70 585 unsigned char value = intc_irq_sense_table[type & IRQ_TYPE_SENSE_MASK];
73505b44
MD
586 struct intc_handle_int *ihp;
587 unsigned long addr;
02ab3f70 588
73505b44 589 if (!value)
02ab3f70
MD
590 return -EINVAL;
591
73505b44
MD
592 ihp = intc_find_irq(d->sense, d->nr_sense, irq);
593 if (ihp) {
f18d533e 594 addr = INTC_REG(d, _INTC_ADDR_E(ihp->handle), 0);
73505b44 595 intc_reg_fns[_INTC_FN(ihp->handle)](addr, ihp->handle, value);
02ab3f70 596 }
73505b44 597 return 0;
02ab3f70
MD
598}
599
73505b44
MD
600static intc_enum __init intc_grp_id(struct intc_desc *desc,
601 intc_enum enum_id)
680c4598 602{
577cd758 603 struct intc_group *g = desc->hw.groups;
680c4598
MD
604 unsigned int i, j;
605
577cd758
MD
606 for (i = 0; g && enum_id && i < desc->hw.nr_groups; i++) {
607 g = desc->hw.groups + i;
680c4598
MD
608
609 for (j = 0; g->enum_ids[j]; j++) {
610 if (g->enum_ids[j] != enum_id)
611 continue;
612
613 return g->enum_id;
614 }
615 }
616
617 return 0;
618}
619
d5190953
MD
620static unsigned int __init _intc_mask_data(struct intc_desc *desc,
621 struct intc_desc_int *d,
622 intc_enum enum_id,
623 unsigned int *reg_idx,
624 unsigned int *fld_idx)
02ab3f70 625{
577cd758 626 struct intc_mask_reg *mr = desc->hw.mask_regs;
d5190953 627 unsigned int fn, mode;
73505b44 628 unsigned long reg_e, reg_d;
02ab3f70 629
d5190953
MD
630 while (mr && enum_id && *reg_idx < desc->hw.nr_mask_regs) {
631 mr = desc->hw.mask_regs + *reg_idx;
02ab3f70 632
d5190953
MD
633 for (; *fld_idx < ARRAY_SIZE(mr->enum_ids); (*fld_idx)++) {
634 if (mr->enum_ids[*fld_idx] != enum_id)
02ab3f70
MD
635 continue;
636
73505b44
MD
637 if (mr->set_reg && mr->clr_reg) {
638 fn = REG_FN_WRITE_BASE;
639 mode = MODE_DUAL_REG;
640 reg_e = mr->clr_reg;
641 reg_d = mr->set_reg;
642 } else {
643 fn = REG_FN_MODIFY_BASE;
644 if (mr->set_reg) {
645 mode = MODE_ENABLE_REG;
646 reg_e = mr->set_reg;
647 reg_d = mr->set_reg;
648 } else {
649 mode = MODE_MASK_REG;
650 reg_e = mr->clr_reg;
651 reg_d = mr->clr_reg;
652 }
51da6426
MD
653 }
654
73505b44
MD
655 fn += (mr->reg_width >> 3) - 1;
656 return _INTC_MK(fn, mode,
657 intc_get_reg(d, reg_e),
658 intc_get_reg(d, reg_d),
659 1,
d5190953 660 (mr->reg_width - 1) - *fld_idx);
02ab3f70 661 }
d5190953
MD
662
663 *fld_idx = 0;
664 (*reg_idx)++;
02ab3f70
MD
665 }
666
d5190953
MD
667 return 0;
668}
669
670static unsigned int __init intc_mask_data(struct intc_desc *desc,
671 struct intc_desc_int *d,
672 intc_enum enum_id, int do_grps)
673{
674 unsigned int i = 0;
675 unsigned int j = 0;
676 unsigned int ret;
677
678 ret = _intc_mask_data(desc, d, enum_id, &i, &j);
679 if (ret)
680 return ret;
681
680c4598 682 if (do_grps)
73505b44 683 return intc_mask_data(desc, d, intc_grp_id(desc, enum_id), 0);
680c4598 684
02ab3f70
MD
685 return 0;
686}
687
d5190953
MD
688static unsigned int __init _intc_prio_data(struct intc_desc *desc,
689 struct intc_desc_int *d,
690 intc_enum enum_id,
691 unsigned int *reg_idx,
692 unsigned int *fld_idx)
02ab3f70 693{
577cd758 694 struct intc_prio_reg *pr = desc->hw.prio_regs;
d5190953 695 unsigned int fn, n, mode, bit;
73505b44 696 unsigned long reg_e, reg_d;
02ab3f70 697
d5190953
MD
698 while (pr && enum_id && *reg_idx < desc->hw.nr_prio_regs) {
699 pr = desc->hw.prio_regs + *reg_idx;
02ab3f70 700
d5190953
MD
701 for (; *fld_idx < ARRAY_SIZE(pr->enum_ids); (*fld_idx)++) {
702 if (pr->enum_ids[*fld_idx] != enum_id)
02ab3f70
MD
703 continue;
704
73505b44
MD
705 if (pr->set_reg && pr->clr_reg) {
706 fn = REG_FN_WRITE_BASE;
707 mode = MODE_PCLR_REG;
708 reg_e = pr->set_reg;
709 reg_d = pr->clr_reg;
710 } else {
711 fn = REG_FN_MODIFY_BASE;
712 mode = MODE_PRIO_REG;
713 if (!pr->set_reg)
714 BUG();
715 reg_e = pr->set_reg;
716 reg_d = pr->set_reg;
717 }
02ab3f70 718
73505b44 719 fn += (pr->reg_width >> 3) - 1;
d5190953 720 n = *fld_idx + 1;
02ab3f70 721
d5190953 722 BUG_ON(n * pr->field_width > pr->reg_width);
b21a9104 723
d5190953 724 bit = pr->reg_width - (n * pr->field_width);
02ab3f70 725
73505b44
MD
726 return _INTC_MK(fn, mode,
727 intc_get_reg(d, reg_e),
728 intc_get_reg(d, reg_d),
729 pr->field_width, bit);
02ab3f70 730 }
d5190953
MD
731
732 *fld_idx = 0;
733 (*reg_idx)++;
02ab3f70
MD
734 }
735
d5190953
MD
736 return 0;
737}
738
739static unsigned int __init intc_prio_data(struct intc_desc *desc,
740 struct intc_desc_int *d,
741 intc_enum enum_id, int do_grps)
742{
743 unsigned int i = 0;
744 unsigned int j = 0;
745 unsigned int ret;
746
747 ret = _intc_prio_data(desc, d, enum_id, &i, &j);
748 if (ret)
749 return ret;
750
680c4598 751 if (do_grps)
73505b44
MD
752 return intc_prio_data(desc, d, intc_grp_id(desc, enum_id), 0);
753
754 return 0;
755}
756
d5190953
MD
757static void __init intc_enable_disable_enum(struct intc_desc *desc,
758 struct intc_desc_int *d,
759 intc_enum enum_id, int enable)
760{
761 unsigned int i, j, data;
762
763 /* go through and enable/disable all mask bits */
764 i = j = 0;
765 do {
766 data = _intc_mask_data(desc, d, enum_id, &i, &j);
767 if (data)
768 intc_enable_disable(d, data, enable);
769 j++;
770 } while (data);
771
772 /* go through and enable/disable all priority fields */
773 i = j = 0;
774 do {
775 data = _intc_prio_data(desc, d, enum_id, &i, &j);
776 if (data)
777 intc_enable_disable(d, data, enable);
778
779 j++;
780 } while (data);
781}
782
d58876e2
MD
783static unsigned int __init intc_ack_data(struct intc_desc *desc,
784 struct intc_desc_int *d,
785 intc_enum enum_id)
786{
577cd758 787 struct intc_mask_reg *mr = desc->hw.ack_regs;
d58876e2
MD
788 unsigned int i, j, fn, mode;
789 unsigned long reg_e, reg_d;
790
577cd758
MD
791 for (i = 0; mr && enum_id && i < desc->hw.nr_ack_regs; i++) {
792 mr = desc->hw.ack_regs + i;
d58876e2
MD
793
794 for (j = 0; j < ARRAY_SIZE(mr->enum_ids); j++) {
795 if (mr->enum_ids[j] != enum_id)
796 continue;
797
798 fn = REG_FN_MODIFY_BASE;
799 mode = MODE_ENABLE_REG;
800 reg_e = mr->set_reg;
801 reg_d = mr->set_reg;
802
803 fn += (mr->reg_width >> 3) - 1;
804 return _INTC_MK(fn, mode,
805 intc_get_reg(d, reg_e),
806 intc_get_reg(d, reg_d),
807 1,
808 (mr->reg_width - 1) - j);
809 }
810 }
811
812 return 0;
813}
d58876e2 814
73505b44
MD
815static unsigned int __init intc_sense_data(struct intc_desc *desc,
816 struct intc_desc_int *d,
817 intc_enum enum_id)
818{
577cd758 819 struct intc_sense_reg *sr = desc->hw.sense_regs;
73505b44
MD
820 unsigned int i, j, fn, bit;
821
577cd758
MD
822 for (i = 0; sr && enum_id && i < desc->hw.nr_sense_regs; i++) {
823 sr = desc->hw.sense_regs + i;
73505b44
MD
824
825 for (j = 0; j < ARRAY_SIZE(sr->enum_ids); j++) {
826 if (sr->enum_ids[j] != enum_id)
827 continue;
828
829 fn = REG_FN_MODIFY_BASE;
830 fn += (sr->reg_width >> 3) - 1;
73505b44 831
b21a9104 832 BUG_ON((j + 1) * sr->field_width > sr->reg_width);
833
834 bit = sr->reg_width - ((j + 1) * sr->field_width);
73505b44
MD
835
836 return _INTC_MK(fn, 0, intc_get_reg(d, sr->reg),
837 0, sr->field_width, bit);
838 }
839 }
680c4598 840
02ab3f70
MD
841 return 0;
842}
843
44629f57
PM
844unsigned int intc_irq_lookup(const char *chipname, intc_enum enum_id)
845{
846 struct intc_map_entry *ptr;
847 struct intc_desc_int *d;
848 unsigned int irq = 0;
849
850 list_for_each_entry(d, &intc_list, list) {
851 if (strcmp(d->chip.name, chipname) == 0) {
852 ptr = radix_tree_lookup(&d->tree, enum_id);
853 if (ptr) {
854 irq = ptr - intc_irq_xlate;
855 break;
856 }
857 }
858 }
859
860 return irq;
861}
862EXPORT_SYMBOL_GPL(intc_irq_lookup);
863
73505b44
MD
864static void __init intc_register_irq(struct intc_desc *desc,
865 struct intc_desc_int *d,
866 intc_enum enum_id,
02ab3f70
MD
867 unsigned int irq)
868{
3d37d94e 869 struct intc_handle_int *hp;
680c4598
MD
870 unsigned int data[2], primary;
871
1ce7b039 872 /*
44629f57
PM
873 * Register the IRQ position with the global IRQ map, then insert
874 * it in to the radix tree.
1ce7b039
PM
875 */
876 set_bit(irq, intc_irq_map);
877
44629f57
PM
878 mutex_lock(&irq_xlate_mutex);
879 radix_tree_insert(&d->tree, enum_id, &intc_irq_xlate[irq]);
880 mutex_unlock(&irq_xlate_mutex);
881
dc825b17
PM
882 /*
883 * Prefer single interrupt source bitmap over other combinations:
884 *
680c4598
MD
885 * 1. bitmap, single interrupt source
886 * 2. priority, single interrupt source
887 * 3. bitmap, multiple interrupt sources (groups)
888 * 4. priority, multiple interrupt sources (groups)
889 */
73505b44
MD
890 data[0] = intc_mask_data(desc, d, enum_id, 0);
891 data[1] = intc_prio_data(desc, d, enum_id, 0);
680c4598
MD
892
893 primary = 0;
894 if (!data[0] && data[1])
895 primary = 1;
896
bdaa6e80 897 if (!data[0] && !data[1])
ac422f94
PM
898 pr_warning("missing unique irq mask for irq %d (vect 0x%04x)\n",
899 irq, irq2evt(irq));
bdaa6e80 900
73505b44
MD
901 data[0] = data[0] ? data[0] : intc_mask_data(desc, d, enum_id, 1);
902 data[1] = data[1] ? data[1] : intc_prio_data(desc, d, enum_id, 1);
680c4598
MD
903
904 if (!data[primary])
905 primary ^= 1;
906
907 BUG_ON(!data[primary]); /* must have primary masking method */
02ab3f70
MD
908
909 disable_irq_nosync(irq);
73505b44 910 set_irq_chip_and_handler_name(irq, &d->chip,
02ab3f70 911 handle_level_irq, "level");
680c4598 912 set_irq_chip_data(irq, (void *)data[primary]);
02ab3f70 913
dc825b17
PM
914 /*
915 * set priority level
7f3edee8
MD
916 * - this needs to be at least 2 for 5-bit priorities on 7780
917 */
43b8774d 918 intc_prio_level[irq] = default_prio_level;
73505b44 919
680c4598
MD
920 /* enable secondary masking method if present */
921 if (data[!primary])
73505b44
MD
922 _intc_enable(irq, data[!primary]);
923
924 /* add irq to d->prio list if priority is available */
925 if (data[1]) {
3d37d94e
MD
926 hp = d->prio + d->nr_prio;
927 hp->irq = irq;
928 hp->handle = data[1];
929
930 if (primary) {
931 /*
932 * only secondary priority should access registers, so
933 * set _INTC_FN(h) = REG_FN_ERR for intc_set_priority()
934 */
3d37d94e
MD
935 hp->handle &= ~_INTC_MK(0x0f, 0, 0, 0, 0, 0);
936 hp->handle |= _INTC_MK(REG_FN_ERR, 0, 0, 0, 0, 0);
937 }
73505b44
MD
938 d->nr_prio++;
939 }
940
941 /* add irq to d->sense list if sense is available */
942 data[0] = intc_sense_data(desc, d, enum_id);
943 if (data[0]) {
944 (d->sense + d->nr_sense)->irq = irq;
945 (d->sense + d->nr_sense)->handle = data[0];
946 d->nr_sense++;
947 }
02ab3f70
MD
948
949 /* irq should be disabled by default */
73505b44 950 d->chip.mask(irq);
d58876e2 951
577cd758 952 if (desc->hw.ack_regs)
d58876e2 953 ack_handle[irq] = intc_ack_data(desc, d, enum_id);
65a5b28f 954
dc825b17
PM
955#ifdef CONFIG_INTC_BALANCING
956 if (desc->hw.mask_regs)
957 dist_handle[irq] = intc_dist_data(desc, d, enum_id);
958#endif
959
65a5b28f
MD
960#ifdef CONFIG_ARM
961 set_irq_flags(irq, IRQF_VALID); /* Enable IRQ on ARM systems */
962#endif
02ab3f70
MD
963}
964
f18d533e
MD
965static unsigned int __init save_reg(struct intc_desc_int *d,
966 unsigned int cnt,
967 unsigned long value,
968 unsigned int smp)
969{
970 if (value) {
dec710b7
MD
971 value = intc_phys_to_virt(d, value);
972
f18d533e
MD
973 d->reg[cnt] = value;
974#ifdef CONFIG_SMP
975 d->smp[cnt] = smp;
976#endif
977 return 1;
978 }
979
980 return 0;
981}
982
05ecd5a1 983static void intc_redirect_irq(unsigned int irq, struct irq_desc *desc)
bdaa6e80 984{
05ecd5a1 985 generic_handle_irq((unsigned int)get_irq_data(irq));
bdaa6e80 986}
f18d533e 987
01e9651a 988int __init register_intc_controller(struct intc_desc *desc)
02ab3f70 989{
54ff328b 990 unsigned int i, k, smp;
577cd758 991 struct intc_hw_desc *hw = &desc->hw;
73505b44 992 struct intc_desc_int *d;
dec710b7 993 struct resource *res;
73505b44 994
ac422f94 995 pr_info("Registered controller '%s' with %u IRQs\n",
12129fea
PM
996 desc->name, hw->nr_vectors);
997
11b6aa95 998 d = kzalloc(sizeof(*d), GFP_NOWAIT);
01e9651a
MD
999 if (!d)
1000 goto err0;
73505b44 1001
2dcec7a9
MD
1002 INIT_LIST_HEAD(&d->list);
1003 list_add(&d->list, &intc_list);
1004
dec710b7
MD
1005 if (desc->num_resources) {
1006 d->nr_windows = desc->num_resources;
1007 d->window = kzalloc(d->nr_windows * sizeof(*d->window),
1008 GFP_NOWAIT);
1009 if (!d->window)
1010 goto err1;
1011
1012 for (k = 0; k < d->nr_windows; k++) {
1013 res = desc->resource + k;
1014 WARN_ON(resource_type(res) != IORESOURCE_MEM);
1015 d->window[k].phys = res->start;
1016 d->window[k].size = resource_size(res);
1017 d->window[k].virt = ioremap_nocache(res->start,
1018 resource_size(res));
1019 if (!d->window[k].virt)
1020 goto err2;
1021 }
1022 }
1023
577cd758 1024 d->nr_reg = hw->mask_regs ? hw->nr_mask_regs * 2 : 0;
dc825b17
PM
1025#ifdef CONFIG_INTC_BALANCING
1026 if (d->nr_reg)
1027 d->nr_reg += hw->nr_mask_regs;
1028#endif
577cd758
MD
1029 d->nr_reg += hw->prio_regs ? hw->nr_prio_regs * 2 : 0;
1030 d->nr_reg += hw->sense_regs ? hw->nr_sense_regs : 0;
1031 d->nr_reg += hw->ack_regs ? hw->nr_ack_regs : 0;
9b798d50 1032
11b6aa95 1033 d->reg = kzalloc(d->nr_reg * sizeof(*d->reg), GFP_NOWAIT);
01e9651a 1034 if (!d->reg)
dec710b7 1035 goto err2;
01e9651a 1036
f18d533e 1037#ifdef CONFIG_SMP
11b6aa95 1038 d->smp = kzalloc(d->nr_reg * sizeof(*d->smp), GFP_NOWAIT);
01e9651a 1039 if (!d->smp)
dec710b7 1040 goto err3;
f18d533e 1041#endif
73505b44
MD
1042 k = 0;
1043
577cd758
MD
1044 if (hw->mask_regs) {
1045 for (i = 0; i < hw->nr_mask_regs; i++) {
1046 smp = IS_SMP(hw->mask_regs[i]);
1047 k += save_reg(d, k, hw->mask_regs[i].set_reg, smp);
1048 k += save_reg(d, k, hw->mask_regs[i].clr_reg, smp);
dc825b17
PM
1049#ifdef CONFIG_INTC_BALANCING
1050 k += save_reg(d, k, hw->mask_regs[i].dist_reg, 0);
1051#endif
73505b44
MD
1052 }
1053 }
1054
577cd758
MD
1055 if (hw->prio_regs) {
1056 d->prio = kzalloc(hw->nr_vectors * sizeof(*d->prio),
1057 GFP_NOWAIT);
01e9651a 1058 if (!d->prio)
dec710b7 1059 goto err4;
73505b44 1060
577cd758
MD
1061 for (i = 0; i < hw->nr_prio_regs; i++) {
1062 smp = IS_SMP(hw->prio_regs[i]);
1063 k += save_reg(d, k, hw->prio_regs[i].set_reg, smp);
1064 k += save_reg(d, k, hw->prio_regs[i].clr_reg, smp);
73505b44
MD
1065 }
1066 }
1067
577cd758
MD
1068 if (hw->sense_regs) {
1069 d->sense = kzalloc(hw->nr_vectors * sizeof(*d->sense),
1070 GFP_NOWAIT);
01e9651a 1071 if (!d->sense)
dec710b7 1072 goto err5;
73505b44 1073
577cd758
MD
1074 for (i = 0; i < hw->nr_sense_regs; i++)
1075 k += save_reg(d, k, hw->sense_regs[i].reg, 0);
73505b44
MD
1076 }
1077
73505b44
MD
1078 d->chip.name = desc->name;
1079 d->chip.mask = intc_disable;
1080 d->chip.unmask = intc_enable;
1081 d->chip.mask_ack = intc_disable;
f7dd2548
MD
1082 d->chip.enable = intc_enable;
1083 d->chip.disable = intc_disable;
1084 d->chip.shutdown = intc_disable;
73505b44 1085 d->chip.set_type = intc_set_sense;
2dcec7a9 1086 d->chip.set_wake = intc_set_wake;
a8941dad
PM
1087#ifdef CONFIG_SMP
1088 d->chip.set_affinity = intc_set_affinity;
1089#endif
02ab3f70 1090
577cd758
MD
1091 if (hw->ack_regs) {
1092 for (i = 0; i < hw->nr_ack_regs; i++)
1093 k += save_reg(d, k, hw->ack_regs[i].set_reg, 0);
d58876e2
MD
1094
1095 d->chip.mask_ack = intc_mask_ack;
1096 }
d58876e2 1097
d85429a3
MD
1098 /* disable bits matching force_disable before registering irqs */
1099 if (desc->force_disable)
1100 intc_enable_disable_enum(desc, d, desc->force_disable, 0);
d5190953
MD
1101
1102 /* disable bits matching force_enable before registering irqs */
1103 if (desc->force_enable)
1104 intc_enable_disable_enum(desc, d, desc->force_enable, 0);
1105
d58876e2
MD
1106 BUG_ON(k > 256); /* _INTC_ADDR_E() and _INTC_ADDR_D() are 8 bits */
1107
bdaa6e80 1108 /* register the vectors one by one */
577cd758
MD
1109 for (i = 0; i < hw->nr_vectors; i++) {
1110 struct intc_vect *vect = hw->vectors + i;
05ff3004
PM
1111 unsigned int irq = evt2irq(vect->vect);
1112 struct irq_desc *irq_desc;
54ff328b 1113
bdaa6e80
MD
1114 if (!vect->enum_id)
1115 continue;
1116
54ff328b 1117 irq_desc = irq_to_desc_alloc_node(irq, numa_node_id());
05ff3004 1118 if (unlikely(!irq_desc)) {
12129fea 1119 pr_err("can't get irq_desc for %d\n", irq);
05ff3004
PM
1120 continue;
1121 }
1122
44629f57
PM
1123 intc_irq_xlate[irq].enum_id = vect->enum_id;
1124 intc_irq_xlate[irq].desc = d;
1125
05ff3004 1126 intc_register_irq(desc, d, vect->enum_id, irq);
05ecd5a1 1127
577cd758
MD
1128 for (k = i + 1; k < hw->nr_vectors; k++) {
1129 struct intc_vect *vect2 = hw->vectors + k;
05ecd5a1
PM
1130 unsigned int irq2 = evt2irq(vect2->vect);
1131
1132 if (vect->enum_id != vect2->enum_id)
1133 continue;
1134
1279b7f1
PM
1135 /*
1136 * In the case of multi-evt handling and sparse
1137 * IRQ support, each vector still needs to have
1138 * its own backing irq_desc.
1139 */
1140 irq_desc = irq_to_desc_alloc_node(irq2, numa_node_id());
1141 if (unlikely(!irq_desc)) {
12129fea 1142 pr_err("can't get irq_desc for %d\n", irq2);
1279b7f1
PM
1143 continue;
1144 }
1145
05ecd5a1
PM
1146 vect2->enum_id = 0;
1147
1148 /* redirect this interrupts to the first one */
4d2185d9 1149 set_irq_chip(irq2, &dummy_irq_chip);
e6f07759 1150 set_irq_chained_handler(irq2, intc_redirect_irq);
05ecd5a1
PM
1151 set_irq_data(irq2, (void *)irq);
1152 }
02ab3f70 1153 }
d5190953
MD
1154
1155 /* enable bits matching force_enable after registering irqs */
1156 if (desc->force_enable)
1157 intc_enable_disable_enum(desc, d, desc->force_enable, 1);
01e9651a
MD
1158
1159 return 0;
dec710b7 1160err5:
01e9651a 1161 kfree(d->prio);
dec710b7 1162err4:
01e9651a
MD
1163#ifdef CONFIG_SMP
1164 kfree(d->smp);
dec710b7 1165err3:
01e9651a
MD
1166#endif
1167 kfree(d->reg);
dec710b7
MD
1168err2:
1169 for (k = 0; k < d->nr_windows; k++)
1170 if (d->window[k].virt)
1171 iounmap(d->window[k].virt);
1172
1173 kfree(d->window);
1174err1:
01e9651a 1175 kfree(d);
dec710b7 1176err0:
01e9651a
MD
1177 pr_err("unable to allocate INTC memory\n");
1178
1179 return -ENOMEM;
02ab3f70 1180}
2dcec7a9 1181
43b8774d
PM
1182#ifdef CONFIG_INTC_USERIMASK
1183static void __iomem *uimask;
1184
1185int register_intc_userimask(unsigned long addr)
1186{
1187 if (unlikely(uimask))
1188 return -EBUSY;
1189
1190 uimask = ioremap_nocache(addr, SZ_4K);
1191 if (unlikely(!uimask))
1192 return -ENOMEM;
1193
ac422f94 1194 pr_info("userimask support registered for levels 0 -> %d\n",
43b8774d
PM
1195 default_prio_level - 1);
1196
1197 return 0;
1198}
1199
1200static ssize_t
1201show_intc_userimask(struct sysdev_class *cls,
1202 struct sysdev_class_attribute *attr, char *buf)
1203{
1204 return sprintf(buf, "%d\n", (__raw_readl(uimask) >> 4) & 0xf);
1205}
1206
1207static ssize_t
1208store_intc_userimask(struct sysdev_class *cls,
1209 struct sysdev_class_attribute *attr,
1210 const char *buf, size_t count)
1211{
1212 unsigned long level;
1213
1214 level = simple_strtoul(buf, NULL, 10);
1215
1216 /*
1217 * Minimal acceptable IRQ levels are in the 2 - 16 range, but
1218 * these are chomped so as to not interfere with normal IRQs.
1219 *
1220 * Level 1 is a special case on some CPUs in that it's not
1221 * directly settable, but given that USERIMASK cuts off below a
1222 * certain level, we don't care about this limitation here.
1223 * Level 0 on the other hand equates to user masking disabled.
1224 *
1225 * We use default_prio_level as a cut off so that only special
1226 * case opt-in IRQs can be mangled.
1227 */
1228 if (level >= default_prio_level)
1229 return -EINVAL;
1230
1231 __raw_writel(0xa5 << 24 | level << 4, uimask);
1232
1233 return count;
1234}
1235
1236static SYSDEV_CLASS_ATTR(userimask, S_IRUSR | S_IWUSR,
1237 show_intc_userimask, store_intc_userimask);
1238#endif
1239
44629f57
PM
1240#ifdef CONFIG_INTC_MAPPING_DEBUG
1241static int intc_irq_xlate_debug(struct seq_file *m, void *priv)
1242{
1243 int i;
1244
1245 seq_printf(m, "%-5s %-7s %-15s\n", "irq", "enum", "chip name");
1246
1247 for (i = 1; i < nr_irqs; i++) {
1248 struct intc_desc_int *desc = intc_irq_xlate[i].desc;
1249
1250 if (!desc)
1251 continue;
1252
1253 seq_printf(m, "%5d ", i);
1254 seq_printf(m, "0x%05x ", intc_irq_xlate[i].enum_id);
1255 seq_printf(m, "%-15s\n", desc->chip.name);
1256 }
1257
1258 return 0;
1259}
1260
1261static int intc_irq_xlate_open(struct inode *inode, struct file *file)
1262{
1263 return single_open(file, intc_irq_xlate_debug, inode->i_private);
1264}
1265
1266static const struct file_operations intc_irq_xlate_fops = {
1267 .open = intc_irq_xlate_open,
1268 .read = seq_read,
1269 .llseek = seq_lseek,
1270 .release = single_release,
1271};
1272
1273static int __init intc_irq_xlate_init(void)
1274{
1275 /*
1276 * XXX.. use arch_debugfs_dir here when all of the intc users are
1277 * converted.
1278 */
1279 if (debugfs_create_file("intc_irq_xlate", S_IRUGO, NULL, NULL,
1280 &intc_irq_xlate_fops) == NULL)
1281 return -ENOMEM;
1282
1283 return 0;
1284}
1285fs_initcall(intc_irq_xlate_init);
1286#endif
1287
0ded7542
PM
1288static ssize_t
1289show_intc_name(struct sys_device *dev, struct sysdev_attribute *attr, char *buf)
1290{
1291 struct intc_desc_int *d;
1292
1293 d = container_of(dev, struct intc_desc_int, sysdev);
1294
1295 return sprintf(buf, "%s\n", d->chip.name);
1296}
1297
1298static SYSDEV_ATTR(name, S_IRUGO, show_intc_name, NULL);
1299
2dcec7a9
MD
1300static int intc_suspend(struct sys_device *dev, pm_message_t state)
1301{
1302 struct intc_desc_int *d;
1303 struct irq_desc *desc;
1304 int irq;
1305
1306 /* get intc controller associated with this sysdev */
1307 d = container_of(dev, struct intc_desc_int, sysdev);
1308
7fd87b3f
FV
1309 switch (state.event) {
1310 case PM_EVENT_ON:
1311 if (d->state.event != PM_EVENT_FREEZE)
1312 break;
1313 for_each_irq_desc(irq, desc) {
87a705dd 1314 if (desc->handle_irq == intc_redirect_irq)
0a753d58 1315 continue;
7fd87b3f
FV
1316 if (desc->chip != &d->chip)
1317 continue;
1318 if (desc->status & IRQ_DISABLED)
1319 intc_disable(irq);
1320 else
1321 intc_enable(irq);
1322 }
1323 break;
1324 case PM_EVENT_FREEZE:
1325 /* nothing has to be done */
1326 break;
1327 case PM_EVENT_SUSPEND:
1328 /* enable wakeup irqs belonging to this intc controller */
1329 for_each_irq_desc(irq, desc) {
1330 if ((desc->status & IRQ_WAKEUP) && (desc->chip == &d->chip))
1331 intc_enable(irq);
1332 }
1333 break;
2dcec7a9 1334 }
7fd87b3f 1335 d->state = state;
2dcec7a9
MD
1336
1337 return 0;
1338}
1339
7fd87b3f
FV
1340static int intc_resume(struct sys_device *dev)
1341{
1342 return intc_suspend(dev, PMSG_ON);
1343}
1344
2dcec7a9
MD
1345static struct sysdev_class intc_sysdev_class = {
1346 .name = "intc",
1347 .suspend = intc_suspend,
7fd87b3f 1348 .resume = intc_resume,
2dcec7a9
MD
1349};
1350
1351/* register this intc as sysdev to allow suspend/resume */
1352static int __init register_intc_sysdevs(void)
1353{
1354 struct intc_desc_int *d;
1355 int error;
1356 int id = 0;
1357
1358 error = sysdev_class_register(&intc_sysdev_class);
43b8774d
PM
1359#ifdef CONFIG_INTC_USERIMASK
1360 if (!error && uimask)
1361 error = sysdev_class_create_file(&intc_sysdev_class,
1362 &attr_userimask);
1363#endif
2dcec7a9
MD
1364 if (!error) {
1365 list_for_each_entry(d, &intc_list, list) {
1366 d->sysdev.id = id;
1367 d->sysdev.cls = &intc_sysdev_class;
1368 error = sysdev_register(&d->sysdev);
0ded7542
PM
1369 if (error == 0)
1370 error = sysdev_create_file(&d->sysdev,
1371 &attr_name);
2dcec7a9
MD
1372 if (error)
1373 break;
0ded7542 1374
2dcec7a9
MD
1375 id++;
1376 }
1377 }
1378
1379 if (error)
ac422f94 1380 pr_err("sysdev registration error\n");
2dcec7a9
MD
1381
1382 return error;
1383}
2dcec7a9 1384device_initcall(register_intc_sysdevs);
1ce7b039
PM
1385
1386/*
1387 * Dynamic IRQ allocation and deallocation
1388 */
e9867c56 1389unsigned int create_irq_nr(unsigned int irq_want, int node)
1ce7b039
PM
1390{
1391 unsigned int irq = 0, new;
1392 unsigned long flags;
1393 struct irq_desc *desc;
1394
1395 spin_lock_irqsave(&vector_lock, flags);
1396
1397 /*
e9867c56 1398 * First try the wanted IRQ
1ce7b039 1399 */
e9867c56
PM
1400 if (test_and_set_bit(irq_want, intc_irq_map) == 0) {
1401 new = irq_want;
1402 } else {
1403 /* .. then fall back to scanning. */
1ce7b039
PM
1404 new = find_first_zero_bit(intc_irq_map, nr_irqs);
1405 if (unlikely(new == nr_irqs))
1406 goto out_unlock;
1407
1ce7b039 1408 __set_bit(new, intc_irq_map);
1ce7b039
PM
1409 }
1410
e9867c56
PM
1411 desc = irq_to_desc_alloc_node(new, node);
1412 if (unlikely(!desc)) {
12129fea 1413 pr_err("can't get irq_desc for %d\n", new);
e9867c56
PM
1414 goto out_unlock;
1415 }
1416
1417 desc = move_irq_desc(desc, node);
1418 irq = new;
1419
1ce7b039
PM
1420out_unlock:
1421 spin_unlock_irqrestore(&vector_lock, flags);
1422
65a5b28f 1423 if (irq > 0) {
1ce7b039 1424 dynamic_irq_init(irq);
65a5b28f
MD
1425#ifdef CONFIG_ARM
1426 set_irq_flags(irq, IRQF_VALID); /* Enable IRQ on ARM systems */
1427#endif
1428 }
1ce7b039
PM
1429
1430 return irq;
1431}
1432
1433int create_irq(void)
1434{
1435 int nid = cpu_to_node(smp_processor_id());
1436 int irq;
1437
e9867c56 1438 irq = create_irq_nr(NR_IRQS_LEGACY, nid);
1ce7b039
PM
1439 if (irq == 0)
1440 irq = -1;
1441
1442 return irq;
1443}
1444
1445void destroy_irq(unsigned int irq)
1446{
1447 unsigned long flags;
1448
1449 dynamic_irq_cleanup(irq);
1450
1451 spin_lock_irqsave(&vector_lock, flags);
1452 __clear_bit(irq, intc_irq_map);
1453 spin_unlock_irqrestore(&vector_lock, flags);
1454}
45b9deaf
PM
1455
1456int reserve_irq_vector(unsigned int irq)
1457{
1458 unsigned long flags;
1459 int ret = 0;
1460
1461 spin_lock_irqsave(&vector_lock, flags);
1462 if (test_and_set_bit(irq, intc_irq_map))
1463 ret = -EBUSY;
1464 spin_unlock_irqrestore(&vector_lock, flags);
1465
1466 return ret;
1467}
1468
4bacd796
PM
1469void reserve_intc_vectors(struct intc_vect *vectors, unsigned int nr_vecs)
1470{
1471 unsigned long flags;
1472 int i;
1473
1474 spin_lock_irqsave(&vector_lock, flags);
1475 for (i = 0; i < nr_vecs; i++)
1476 __set_bit(evt2irq(vectors[i].vect), intc_irq_map);
1477 spin_unlock_irqrestore(&vector_lock, flags);
1478}
1479
45b9deaf
PM
1480void reserve_irq_legacy(void)
1481{
1482 unsigned long flags;
1483 int i, j;
1484
1485 spin_lock_irqsave(&vector_lock, flags);
1486 j = find_first_bit(intc_irq_map, nr_irqs);
1487 for (i = 0; i < j; i++)
1488 __set_bit(i, intc_irq_map);
1489 spin_unlock_irqrestore(&vector_lock, flags);
1490}