Version:  2.0.40 2.2.26 2.4.37 2.6.39 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15

Linux/include/linux/interrupt.h

  1 /* interrupt.h */
  2 #ifndef _LINUX_INTERRUPT_H
  3 #define _LINUX_INTERRUPT_H
  4 
  5 #include <linux/kernel.h>
  6 #include <linux/linkage.h>
  7 #include <linux/bitops.h>
  8 #include <linux/preempt.h>
  9 #include <linux/cpumask.h>
 10 #include <linux/irqreturn.h>
 11 #include <linux/irqnr.h>
 12 #include <linux/hardirq.h>
 13 #include <linux/irqflags.h>
 14 #include <linux/hrtimer.h>
 15 #include <linux/kref.h>
 16 #include <linux/workqueue.h>
 17 
 18 #include <linux/atomic.h>
 19 #include <asm/ptrace.h>
 20 #include <asm/irq.h>
 21 
 22 /*
 23  * These correspond to the IORESOURCE_IRQ_* defines in
 24  * linux/ioport.h to select the interrupt line behaviour.  When
 25  * requesting an interrupt without specifying a IRQF_TRIGGER, the
 26  * setting should be assumed to be "as already configured", which
 27  * may be as per machine or firmware initialisation.
 28  */
 29 #define IRQF_TRIGGER_NONE       0x00000000
 30 #define IRQF_TRIGGER_RISING     0x00000001
 31 #define IRQF_TRIGGER_FALLING    0x00000002
 32 #define IRQF_TRIGGER_HIGH       0x00000004
 33 #define IRQF_TRIGGER_LOW        0x00000008
 34 #define IRQF_TRIGGER_MASK       (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
 35                                  IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
 36 #define IRQF_TRIGGER_PROBE      0x00000010
 37 
 38 /*
 39  * These flags used only by the kernel as part of the
 40  * irq handling routines.
 41  *
 42  * IRQF_DISABLED - keep irqs disabled when calling the action handler.
 43  *                 DEPRECATED. This flag is a NOOP and scheduled to be removed
 44  * IRQF_SHARED - allow sharing the irq among several devices
 45  * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
 46  * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
 47  * IRQF_PERCPU - Interrupt is per cpu
 48  * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
 49  * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
 50  *                registered first in an shared interrupt is considered for
 51  *                performance reasons)
 52  * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
 53  *                Used by threaded interrupts which need to keep the
 54  *                irq line disabled until the threaded handler has been run.
 55  * IRQF_NO_SUSPEND - Do not disable this IRQ during suspend
 56  * IRQF_FORCE_RESUME - Force enable it on resume even if IRQF_NO_SUSPEND is set
 57  * IRQF_NO_THREAD - Interrupt cannot be threaded
 58  * IRQF_EARLY_RESUME - Resume IRQ early during syscore instead of at device
 59  *                resume time.
 60  */
 61 #define IRQF_DISABLED           0x00000020
 62 #define IRQF_SHARED             0x00000080
 63 #define IRQF_PROBE_SHARED       0x00000100
 64 #define __IRQF_TIMER            0x00000200
 65 #define IRQF_PERCPU             0x00000400
 66 #define IRQF_NOBALANCING        0x00000800
 67 #define IRQF_IRQPOLL            0x00001000
 68 #define IRQF_ONESHOT            0x00002000
 69 #define IRQF_NO_SUSPEND         0x00004000
 70 #define IRQF_FORCE_RESUME       0x00008000
 71 #define IRQF_NO_THREAD          0x00010000
 72 #define IRQF_EARLY_RESUME       0x00020000
 73 
 74 #define IRQF_TIMER              (__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
 75 
 76 /*
 77  * These values can be returned by request_any_context_irq() and
 78  * describe the context the interrupt will be run in.
 79  *
 80  * IRQC_IS_HARDIRQ - interrupt runs in hardirq context
 81  * IRQC_IS_NESTED - interrupt runs in a nested threaded context
 82  */
 83 enum {
 84         IRQC_IS_HARDIRQ = 0,
 85         IRQC_IS_NESTED,
 86 };
 87 
 88 typedef irqreturn_t (*irq_handler_t)(int, void *);
 89 
 90 /**
 91  * struct irqaction - per interrupt action descriptor
 92  * @handler:    interrupt handler function
 93  * @name:       name of the device
 94  * @dev_id:     cookie to identify the device
 95  * @percpu_dev_id:      cookie to identify the device
 96  * @next:       pointer to the next irqaction for shared interrupts
 97  * @irq:        interrupt number
 98  * @flags:      flags (see IRQF_* above)
 99  * @thread_fn:  interrupt handler function for threaded interrupts
100  * @thread:     thread pointer for threaded interrupts
101  * @thread_flags:       flags related to @thread
102  * @thread_mask:        bitmask for keeping track of @thread activity
103  * @dir:        pointer to the proc/irq/NN/name entry
104  */
105 struct irqaction {
106         irq_handler_t           handler;
107         void                    *dev_id;
108         void __percpu           *percpu_dev_id;
109         struct irqaction        *next;
110         irq_handler_t           thread_fn;
111         struct task_struct      *thread;
112         unsigned int            irq;
113         unsigned int            flags;
114         unsigned long           thread_flags;
115         unsigned long           thread_mask;
116         const char              *name;
117         struct proc_dir_entry   *dir;
118 } ____cacheline_internodealigned_in_smp;
119 
120 extern irqreturn_t no_action(int cpl, void *dev_id);
121 
122 extern int __must_check
123 request_threaded_irq(unsigned int irq, irq_handler_t handler,
124                      irq_handler_t thread_fn,
125                      unsigned long flags, const char *name, void *dev);
126 
127 static inline int __must_check
128 request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
129             const char *name, void *dev)
130 {
131         return request_threaded_irq(irq, handler, NULL, flags, name, dev);
132 }
133 
134 extern int __must_check
135 request_any_context_irq(unsigned int irq, irq_handler_t handler,
136                         unsigned long flags, const char *name, void *dev_id);
137 
138 extern int __must_check
139 request_percpu_irq(unsigned int irq, irq_handler_t handler,
140                    const char *devname, void __percpu *percpu_dev_id);
141 
142 extern void free_irq(unsigned int, void *);
143 extern void free_percpu_irq(unsigned int, void __percpu *);
144 
145 struct device;
146 
147 extern int __must_check
148 devm_request_threaded_irq(struct device *dev, unsigned int irq,
149                           irq_handler_t handler, irq_handler_t thread_fn,
150                           unsigned long irqflags, const char *devname,
151                           void *dev_id);
152 
153 static inline int __must_check
154 devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
155                  unsigned long irqflags, const char *devname, void *dev_id)
156 {
157         return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
158                                          devname, dev_id);
159 }
160 
161 extern int __must_check
162 devm_request_any_context_irq(struct device *dev, unsigned int irq,
163                  irq_handler_t handler, unsigned long irqflags,
164                  const char *devname, void *dev_id);
165 
166 extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
167 
168 /*
169  * On lockdep we dont want to enable hardirqs in hardirq
170  * context. Use local_irq_enable_in_hardirq() to annotate
171  * kernel code that has to do this nevertheless (pretty much
172  * the only valid case is for old/broken hardware that is
173  * insanely slow).
174  *
175  * NOTE: in theory this might break fragile code that relies
176  * on hardirq delivery - in practice we dont seem to have such
177  * places left. So the only effect should be slightly increased
178  * irqs-off latencies.
179  */
180 #ifdef CONFIG_LOCKDEP
181 # define local_irq_enable_in_hardirq()  do { } while (0)
182 #else
183 # define local_irq_enable_in_hardirq()  local_irq_enable()
184 #endif
185 
186 extern void disable_irq_nosync(unsigned int irq);
187 extern void disable_irq(unsigned int irq);
188 extern void disable_percpu_irq(unsigned int irq);
189 extern void enable_irq(unsigned int irq);
190 extern void enable_percpu_irq(unsigned int irq, unsigned int type);
191 extern void irq_wake_thread(unsigned int irq, void *dev_id);
192 
193 /* The following three functions are for the core kernel use only. */
194 extern void suspend_device_irqs(void);
195 extern void resume_device_irqs(void);
196 #ifdef CONFIG_PM_SLEEP
197 extern int check_wakeup_irqs(void);
198 #else
199 static inline int check_wakeup_irqs(void) { return 0; }
200 #endif
201 
202 #if defined(CONFIG_SMP)
203 
204 extern cpumask_var_t irq_default_affinity;
205 
206 /* Internal implementation. Use the helpers below */
207 extern int __irq_set_affinity(unsigned int irq, const struct cpumask *cpumask,
208                               bool force);
209 
210 /**
211  * irq_set_affinity - Set the irq affinity of a given irq
212  * @irq:        Interrupt to set affinity
213  * @cpumask:    cpumask
214  *
215  * Fails if cpumask does not contain an online CPU
216  */
217 static inline int
218 irq_set_affinity(unsigned int irq, const struct cpumask *cpumask)
219 {
220         return __irq_set_affinity(irq, cpumask, false);
221 }
222 
223 /**
224  * irq_force_affinity - Force the irq affinity of a given irq
225  * @irq:        Interrupt to set affinity
226  * @cpumask:    cpumask
227  *
228  * Same as irq_set_affinity, but without checking the mask against
229  * online cpus.
230  *
231  * Solely for low level cpu hotplug code, where we need to make per
232  * cpu interrupts affine before the cpu becomes online.
233  */
234 static inline int
235 irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
236 {
237         return __irq_set_affinity(irq, cpumask, true);
238 }
239 
240 extern int irq_can_set_affinity(unsigned int irq);
241 extern int irq_select_affinity(unsigned int irq);
242 
243 extern int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m);
244 
245 /**
246  * struct irq_affinity_notify - context for notification of IRQ affinity changes
247  * @irq:                Interrupt to which notification applies
248  * @kref:               Reference count, for internal use
249  * @work:               Work item, for internal use
250  * @notify:             Function to be called on change.  This will be
251  *                      called in process context.
252  * @release:            Function to be called on release.  This will be
253  *                      called in process context.  Once registered, the
254  *                      structure must only be freed when this function is
255  *                      called or later.
256  */
257 struct irq_affinity_notify {
258         unsigned int irq;
259         struct kref kref;
260         struct work_struct work;
261         void (*notify)(struct irq_affinity_notify *, const cpumask_t *mask);
262         void (*release)(struct kref *ref);
263 };
264 
265 extern int
266 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify);
267 
268 #else /* CONFIG_SMP */
269 
270 static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
271 {
272         return -EINVAL;
273 }
274 
275 static inline int irq_force_affinity(unsigned int irq, const struct cpumask *cpumask)
276 {
277         return 0;
278 }
279 
280 static inline int irq_can_set_affinity(unsigned int irq)
281 {
282         return 0;
283 }
284 
285 static inline int irq_select_affinity(unsigned int irq)  { return 0; }
286 
287 static inline int irq_set_affinity_hint(unsigned int irq,
288                                         const struct cpumask *m)
289 {
290         return -EINVAL;
291 }
292 #endif /* CONFIG_SMP */
293 
294 /*
295  * Special lockdep variants of irq disabling/enabling.
296  * These should be used for locking constructs that
297  * know that a particular irq context which is disabled,
298  * and which is the only irq-context user of a lock,
299  * that it's safe to take the lock in the irq-disabled
300  * section without disabling hardirqs.
301  *
302  * On !CONFIG_LOCKDEP they are equivalent to the normal
303  * irq disable/enable methods.
304  */
305 static inline void disable_irq_nosync_lockdep(unsigned int irq)
306 {
307         disable_irq_nosync(irq);
308 #ifdef CONFIG_LOCKDEP
309         local_irq_disable();
310 #endif
311 }
312 
313 static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
314 {
315         disable_irq_nosync(irq);
316 #ifdef CONFIG_LOCKDEP
317         local_irq_save(*flags);
318 #endif
319 }
320 
321 static inline void disable_irq_lockdep(unsigned int irq)
322 {
323         disable_irq(irq);
324 #ifdef CONFIG_LOCKDEP
325         local_irq_disable();
326 #endif
327 }
328 
329 static inline void enable_irq_lockdep(unsigned int irq)
330 {
331 #ifdef CONFIG_LOCKDEP
332         local_irq_enable();
333 #endif
334         enable_irq(irq);
335 }
336 
337 static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
338 {
339 #ifdef CONFIG_LOCKDEP
340         local_irq_restore(*flags);
341 #endif
342         enable_irq(irq);
343 }
344 
345 /* IRQ wakeup (PM) control: */
346 extern int irq_set_irq_wake(unsigned int irq, unsigned int on);
347 
348 static inline int enable_irq_wake(unsigned int irq)
349 {
350         return irq_set_irq_wake(irq, 1);
351 }
352 
353 static inline int disable_irq_wake(unsigned int irq)
354 {
355         return irq_set_irq_wake(irq, 0);
356 }
357 
358 
359 #ifdef CONFIG_IRQ_FORCED_THREADING
360 extern bool force_irqthreads;
361 #else
362 #define force_irqthreads        (0)
363 #endif
364 
365 #ifndef __ARCH_SET_SOFTIRQ_PENDING
366 #define set_softirq_pending(x) (local_softirq_pending() = (x))
367 #define or_softirq_pending(x)  (local_softirq_pending() |= (x))
368 #endif
369 
370 /* Some architectures might implement lazy enabling/disabling of
371  * interrupts. In some cases, such as stop_machine, we might want
372  * to ensure that after a local_irq_disable(), interrupts have
373  * really been disabled in hardware. Such architectures need to
374  * implement the following hook.
375  */
376 #ifndef hard_irq_disable
377 #define hard_irq_disable()      do { } while(0)
378 #endif
379 
380 /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
381    frequency threaded job scheduling. For almost all the purposes
382    tasklets are more than enough. F.e. all serial device BHs et
383    al. should be converted to tasklets, not to softirqs.
384  */
385 
386 enum
387 {
388         HI_SOFTIRQ=0,
389         TIMER_SOFTIRQ,
390         NET_TX_SOFTIRQ,
391         NET_RX_SOFTIRQ,
392         BLOCK_SOFTIRQ,
393         BLOCK_IOPOLL_SOFTIRQ,
394         TASKLET_SOFTIRQ,
395         SCHED_SOFTIRQ,
396         HRTIMER_SOFTIRQ,
397         RCU_SOFTIRQ,    /* Preferable RCU should always be the last softirq */
398 
399         NR_SOFTIRQS
400 };
401 
402 #define SOFTIRQ_STOP_IDLE_MASK (~(1 << RCU_SOFTIRQ))
403 
404 /* map softirq index to softirq name. update 'softirq_to_name' in
405  * kernel/softirq.c when adding a new softirq.
406  */
407 extern const char * const softirq_to_name[NR_SOFTIRQS];
408 
409 /* softirq mask and active fields moved to irq_cpustat_t in
410  * asm/hardirq.h to get better cache usage.  KAO
411  */
412 
413 struct softirq_action
414 {
415         void    (*action)(struct softirq_action *);
416 };
417 
418 asmlinkage void do_softirq(void);
419 asmlinkage void __do_softirq(void);
420 
421 #ifdef __ARCH_HAS_DO_SOFTIRQ
422 void do_softirq_own_stack(void);
423 #else
424 static inline void do_softirq_own_stack(void)
425 {
426         __do_softirq();
427 }
428 #endif
429 
430 extern void open_softirq(int nr, void (*action)(struct softirq_action *));
431 extern void softirq_init(void);
432 extern void __raise_softirq_irqoff(unsigned int nr);
433 
434 extern void raise_softirq_irqoff(unsigned int nr);
435 extern void raise_softirq(unsigned int nr);
436 
437 DECLARE_PER_CPU(struct task_struct *, ksoftirqd);
438 
439 static inline struct task_struct *this_cpu_ksoftirqd(void)
440 {
441         return this_cpu_read(ksoftirqd);
442 }
443 
444 /* Tasklets --- multithreaded analogue of BHs.
445 
446    Main feature differing them of generic softirqs: tasklet
447    is running only on one CPU simultaneously.
448 
449    Main feature differing them of BHs: different tasklets
450    may be run simultaneously on different CPUs.
451 
452    Properties:
453    * If tasklet_schedule() is called, then tasklet is guaranteed
454      to be executed on some cpu at least once after this.
455    * If the tasklet is already scheduled, but its execution is still not
456      started, it will be executed only once.
457    * If this tasklet is already running on another CPU (or schedule is called
458      from tasklet itself), it is rescheduled for later.
459    * Tasklet is strictly serialized wrt itself, but not
460      wrt another tasklets. If client needs some intertask synchronization,
461      he makes it with spinlocks.
462  */
463 
464 struct tasklet_struct
465 {
466         struct tasklet_struct *next;
467         unsigned long state;
468         atomic_t count;
469         void (*func)(unsigned long);
470         unsigned long data;
471 };
472 
473 #define DECLARE_TASKLET(name, func, data) \
474 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
475 
476 #define DECLARE_TASKLET_DISABLED(name, func, data) \
477 struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
478 
479 
480 enum
481 {
482         TASKLET_STATE_SCHED,    /* Tasklet is scheduled for execution */
483         TASKLET_STATE_RUN       /* Tasklet is running (SMP only) */
484 };
485 
486 #ifdef CONFIG_SMP
487 static inline int tasklet_trylock(struct tasklet_struct *t)
488 {
489         return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
490 }
491 
492 static inline void tasklet_unlock(struct tasklet_struct *t)
493 {
494         smp_mb__before_clear_bit(); 
495         clear_bit(TASKLET_STATE_RUN, &(t)->state);
496 }
497 
498 static inline void tasklet_unlock_wait(struct tasklet_struct *t)
499 {
500         while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
501 }
502 #else
503 #define tasklet_trylock(t) 1
504 #define tasklet_unlock_wait(t) do { } while (0)
505 #define tasklet_unlock(t) do { } while (0)
506 #endif
507 
508 extern void __tasklet_schedule(struct tasklet_struct *t);
509 
510 static inline void tasklet_schedule(struct tasklet_struct *t)
511 {
512         if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
513                 __tasklet_schedule(t);
514 }
515 
516 extern void __tasklet_hi_schedule(struct tasklet_struct *t);
517 
518 static inline void tasklet_hi_schedule(struct tasklet_struct *t)
519 {
520         if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
521                 __tasklet_hi_schedule(t);
522 }
523 
524 extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
525 
526 /*
527  * This version avoids touching any other tasklets. Needed for kmemcheck
528  * in order not to take any page faults while enqueueing this tasklet;
529  * consider VERY carefully whether you really need this or
530  * tasklet_hi_schedule()...
531  */
532 static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
533 {
534         if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
535                 __tasklet_hi_schedule_first(t);
536 }
537 
538 
539 static inline void tasklet_disable_nosync(struct tasklet_struct *t)
540 {
541         atomic_inc(&t->count);
542         smp_mb__after_atomic_inc();
543 }
544 
545 static inline void tasklet_disable(struct tasklet_struct *t)
546 {
547         tasklet_disable_nosync(t);
548         tasklet_unlock_wait(t);
549         smp_mb();
550 }
551 
552 static inline void tasklet_enable(struct tasklet_struct *t)
553 {
554         smp_mb__before_atomic_dec();
555         atomic_dec(&t->count);
556 }
557 
558 static inline void tasklet_hi_enable(struct tasklet_struct *t)
559 {
560         smp_mb__before_atomic_dec();
561         atomic_dec(&t->count);
562 }
563 
564 extern void tasklet_kill(struct tasklet_struct *t);
565 extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
566 extern void tasklet_init(struct tasklet_struct *t,
567                          void (*func)(unsigned long), unsigned long data);
568 
569 struct tasklet_hrtimer {
570         struct hrtimer          timer;
571         struct tasklet_struct   tasklet;
572         enum hrtimer_restart    (*function)(struct hrtimer *);
573 };
574 
575 extern void
576 tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
577                      enum hrtimer_restart (*function)(struct hrtimer *),
578                      clockid_t which_clock, enum hrtimer_mode mode);
579 
580 static inline
581 int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
582                           const enum hrtimer_mode mode)
583 {
584         return hrtimer_start(&ttimer->timer, time, mode);
585 }
586 
587 static inline
588 void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
589 {
590         hrtimer_cancel(&ttimer->timer);
591         tasklet_kill(&ttimer->tasklet);
592 }
593 
594 /*
595  * Autoprobing for irqs:
596  *
597  * probe_irq_on() and probe_irq_off() provide robust primitives
598  * for accurate IRQ probing during kernel initialization.  They are
599  * reasonably simple to use, are not "fooled" by spurious interrupts,
600  * and, unlike other attempts at IRQ probing, they do not get hung on
601  * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
602  *
603  * For reasonably foolproof probing, use them as follows:
604  *
605  * 1. clear and/or mask the device's internal interrupt.
606  * 2. sti();
607  * 3. irqs = probe_irq_on();      // "take over" all unassigned idle IRQs
608  * 4. enable the device and cause it to trigger an interrupt.
609  * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
610  * 6. irq = probe_irq_off(irqs);  // get IRQ number, 0=none, negative=multiple
611  * 7. service the device to clear its pending interrupt.
612  * 8. loop again if paranoia is required.
613  *
614  * probe_irq_on() returns a mask of allocated irq's.
615  *
616  * probe_irq_off() takes the mask as a parameter,
617  * and returns the irq number which occurred,
618  * or zero if none occurred, or a negative irq number
619  * if more than one irq occurred.
620  */
621 
622 #if !defined(CONFIG_GENERIC_IRQ_PROBE) 
623 static inline unsigned long probe_irq_on(void)
624 {
625         return 0;
626 }
627 static inline int probe_irq_off(unsigned long val)
628 {
629         return 0;
630 }
631 static inline unsigned int probe_irq_mask(unsigned long val)
632 {
633         return 0;
634 }
635 #else
636 extern unsigned long probe_irq_on(void);        /* returns 0 on failure */
637 extern int probe_irq_off(unsigned long);        /* returns 0 or negative on failure */
638 extern unsigned int probe_irq_mask(unsigned long);      /* returns mask of ISA interrupts */
639 #endif
640 
641 #ifdef CONFIG_PROC_FS
642 /* Initialize /proc/irq/ */
643 extern void init_irq_proc(void);
644 #else
645 static inline void init_irq_proc(void)
646 {
647 }
648 #endif
649 
650 struct seq_file;
651 int show_interrupts(struct seq_file *p, void *v);
652 int arch_show_interrupts(struct seq_file *p, int prec);
653 
654 extern int early_irq_init(void);
655 extern int arch_probe_nr_irqs(void);
656 extern int arch_early_irq_init(void);
657 
658 #endif
659 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us