]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
Merge branch 'irq-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Sat, 20 Jun 2009 18:30:01 +0000 (11:30 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 20 Jun 2009 18:30:01 +0000 (11:30 -0700)
* 'irq-fixes-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  genirq, irq.h: Fix kernel-doc warnings
  genirq: fix comment to say IRQ_WAKE_THREAD

1  2 
include/linux/irq.h
kernel/irq/manage.c

diff --combined include/linux/irq.h
index 1e50c34f0062854ef167130ad349f1385adcb4d2,0c001c15752ae92c6a6f324902eea8351d252b49..cb2e77a3f7f75a770a38b955d561aed8e77d3907
@@@ -117,7 -117,7 +117,7 @@@ struct irq_chip 
        void            (*eoi)(unsigned int irq);
  
        void            (*end)(unsigned int irq);
 -      void            (*set_affinity)(unsigned int irq,
 +      int             (*set_affinity)(unsigned int irq,
                                        const struct cpumask *dest);
        int             (*retrigger)(unsigned int irq);
        int             (*set_type)(unsigned int irq, unsigned int flow_type);
@@@ -157,7 -157,7 +157,7 @@@ struct irq_2_iommu
   * @irqs_unhandled:   stats field for spurious unhandled interrupts
   * @lock:             locking for SMP
   * @affinity:         IRQ affinity on SMP
-  * @cpu:              cpu index useful for balancing
+  * @node:             node index useful for balancing
   * @pending_mask:     pending rebalanced interrupts
   * @threads_active:   number of irqaction threads currently running
   * @wait_for_threads: wait queue for sync_irq to wait for threaded handlers
@@@ -187,7 -187,7 +187,7 @@@ struct irq_desc 
        spinlock_t              lock;
  #ifdef CONFIG_SMP
        cpumask_var_t           affinity;
 -      unsigned int            cpu;
 +      unsigned int            node;
  #ifdef CONFIG_GENERIC_PENDING_IRQ
        cpumask_var_t           pending_mask;
  #endif
  } ____cacheline_internodealigned_in_smp;
  
  extern void arch_init_copy_chip_data(struct irq_desc *old_desc,
 -                                      struct irq_desc *desc, int cpu);
 +                                      struct irq_desc *desc, int node);
  extern void arch_free_chip_data(struct irq_desc *old_desc, struct irq_desc *desc);
  
  #ifndef CONFIG_SPARSE_IRQ
  extern struct irq_desc irq_desc[NR_IRQS];
 -#else /* CONFIG_SPARSE_IRQ */
 -extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int cpu);
 -#endif /* CONFIG_SPARSE_IRQ */
 -
 -extern struct irq_desc *irq_to_desc_alloc_cpu(unsigned int irq, int cpu);
 +#endif
  
 -static inline struct irq_desc *
 -irq_remap_to_desc(unsigned int irq, struct irq_desc *desc)
 -{
 -#ifdef CONFIG_NUMA_MIGRATE_IRQ_DESC
 -      return irq_to_desc(irq);
 +#ifdef CONFIG_NUMA_IRQ_DESC
 +extern struct irq_desc *move_irq_desc(struct irq_desc *old_desc, int node);
  #else
 +static inline struct irq_desc *move_irq_desc(struct irq_desc *desc, int node)
 +{
        return desc;
 -#endif
  }
 +#endif
 +
 +extern struct irq_desc *irq_to_desc_alloc_node(unsigned int irq, int node);
  
  /*
   * Migration helpers for obsolete names, they will go away:
@@@ -383,7 -386,7 +383,7 @@@ extern void set_irq_noprobe(unsigned in
  extern void set_irq_probe(unsigned int irq);
  
  /* Handle dynamic irq creation and destruction */
 -extern unsigned int create_irq_nr(unsigned int irq_want);
 +extern unsigned int create_irq_nr(unsigned int irq_want, int node);
  extern int create_irq(void);
  extern void destroy_irq(unsigned int irq);
  
@@@ -421,44 -424,47 +421,44 @@@ extern int set_irq_msi(unsigned int irq
  
  #ifdef CONFIG_SMP
  /**
 - * init_alloc_desc_masks - allocate cpumasks for irq_desc
 + * alloc_desc_masks - allocate cpumasks for irq_desc
   * @desc:     pointer to irq_desc struct
-  * @cpu:      cpu which will be handling the cpumasks
+  * @node:     node which will be handling the cpumasks
   * @boot:     true if need bootmem
   *
   * Allocates affinity and pending_mask cpumask if required.
   * Returns true if successful (or not required).
 - * Side effect: affinity has all bits set, pending_mask has all bits clear.
   */
 -static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu,
 -                                                              bool boot)
 +static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
 +                                                      bool boot)
  {
 -      int node;
 -
 -      if (boot) {
 -              alloc_bootmem_cpumask_var(&desc->affinity);
 -              cpumask_setall(desc->affinity);
 -
 -#ifdef CONFIG_GENERIC_PENDING_IRQ
 -              alloc_bootmem_cpumask_var(&desc->pending_mask);
 -              cpumask_clear(desc->pending_mask);
 -#endif
 -              return true;
 -      }
 +      gfp_t gfp = GFP_ATOMIC;
  
 -      node = cpu_to_node(cpu);
 +      if (boot)
 +              gfp = GFP_NOWAIT;
  
 -      if (!alloc_cpumask_var_node(&desc->affinity, GFP_ATOMIC, node))
 +#ifdef CONFIG_CPUMASK_OFFSTACK
 +      if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
                return false;
 -      cpumask_setall(desc->affinity);
  
  #ifdef CONFIG_GENERIC_PENDING_IRQ
 -      if (!alloc_cpumask_var_node(&desc->pending_mask, GFP_ATOMIC, node)) {
 +      if (!alloc_cpumask_var_node(&desc->pending_mask, gfp, node)) {
                free_cpumask_var(desc->affinity);
                return false;
        }
 -      cpumask_clear(desc->pending_mask);
 +#endif
  #endif
        return true;
  }
  
 +static inline void init_desc_masks(struct irq_desc *desc)
 +{
 +      cpumask_setall(desc->affinity);
 +#ifdef CONFIG_GENERIC_PENDING_IRQ
 +      cpumask_clear(desc->pending_mask);
 +#endif
 +}
 +
  /**
   * init_copy_desc_masks - copy cpumasks for irq_desc
   * @old_desc: pointer to old irq_desc struct
  static inline void init_copy_desc_masks(struct irq_desc *old_desc,
                                        struct irq_desc *new_desc)
  {
 -#ifdef CONFIG_CPUMASKS_OFFSTACK
 +#ifdef CONFIG_CPUMASK_OFFSTACK
        cpumask_copy(new_desc->affinity, old_desc->affinity);
  
  #ifdef CONFIG_GENERIC_PENDING_IRQ
@@@ -493,16 -499,12 +493,16 @@@ static inline void free_desc_masks(stru
  
  #else /* !CONFIG_SMP */
  
 -static inline bool init_alloc_desc_masks(struct irq_desc *desc, int cpu,
 +static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
                                                                bool boot)
  {
        return true;
  }
  
 +static inline void init_desc_masks(struct irq_desc *desc)
 +{
 +}
 +
  static inline void init_copy_desc_masks(struct irq_desc *old_desc,
                                        struct irq_desc *new_desc)
  {
diff --combined kernel/irq/manage.c
index aaf5c9d05770378b42acc2d41905ceb543f1f280,eb47f8b8055781cdb2576a199358cbeb5c8b3f77..50da676729013dfe8dbc7929a4fdec16412f7780
@@@ -80,7 -80,7 +80,7 @@@ int irq_can_set_affinity(unsigned int i
        return 1;
  }
  
 -static void
 +void
  irq_set_thread_affinity(struct irq_desc *desc, const struct cpumask *cpumask)
  {
        struct irqaction *action = desc->action;
@@@ -109,22 -109,17 +109,22 @@@ int irq_set_affinity(unsigned int irq, 
        spin_lock_irqsave(&desc->lock, flags);
  
  #ifdef CONFIG_GENERIC_PENDING_IRQ
 -      if (desc->status & IRQ_MOVE_PCNTXT)
 -              desc->chip->set_affinity(irq, cpumask);
 +      if (desc->status & IRQ_MOVE_PCNTXT) {
 +              if (!desc->chip->set_affinity(irq, cpumask)) {
 +                      cpumask_copy(desc->affinity, cpumask);
 +                      irq_set_thread_affinity(desc, cpumask);
 +              }
 +      }
        else {
                desc->status |= IRQ_MOVE_PENDING;
                cpumask_copy(desc->pending_mask, cpumask);
        }
  #else
 -      cpumask_copy(desc->affinity, cpumask);
 -      desc->chip->set_affinity(irq, cpumask);
 +      if (!desc->chip->set_affinity(irq, cpumask)) {
 +              cpumask_copy(desc->affinity, cpumask);
 +              irq_set_thread_affinity(desc, cpumask);
 +      }
  #endif
 -      irq_set_thread_affinity(desc, cpumask);
        desc->status |= IRQ_AFFINITY_SET;
        spin_unlock_irqrestore(&desc->lock, flags);
        return 0;
@@@ -856,7 -851,7 +856,7 @@@ EXPORT_SYMBOL(free_irq)
   *    still called in hard interrupt context and has to check
   *    whether the interrupt originates from the device. If yes it
   *    needs to disable the interrupt on the device and return
-  *    IRQ_THREAD_WAKE which will wake up the handler thread and run
+  *    IRQ_WAKE_THREAD which will wake up the handler thread and run
   *    @thread_fn. This split handler design is necessary to support
   *    shared interrupts.
   *