Version:  2.6.34 2.6.35 2.6.36 2.6.37 2.6.38 2.6.39 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14

Linux/kernel/irq/manage.c

  1 /*
  2  * linux/kernel/irq/manage.c
  3  *
  4  * Copyright (C) 1992, 1998-2006 Linus Torvalds, Ingo Molnar
  5  * Copyright (C) 2005-2006 Thomas Gleixner
  6  *
  7  * This file contains driver APIs to the irq subsystem.
  8  */
  9 
 10 #define pr_fmt(fmt) "genirq: " fmt
 11 
 12 #include <linux/irq.h>
 13 #include <linux/kthread.h>
 14 #include <linux/module.h>
 15 #include <linux/random.h>
 16 #include <linux/interrupt.h>
 17 #include <linux/slab.h>
 18 #include <linux/sched.h>
 19 #include <linux/sched/rt.h>
 20 #include <linux/task_work.h>
 21 
 22 #include "internals.h"
 23 
 24 #ifdef CONFIG_IRQ_FORCED_THREADING
 25 __read_mostly bool force_irqthreads;
 26 
 27 static int __init setup_forced_irqthreads(char *arg)
 28 {
 29         force_irqthreads = true;
 30         return 0;
 31 }
 32 early_param("threadirqs", setup_forced_irqthreads);
 33 #endif
 34 
 35 /**
 36  *      synchronize_irq - wait for pending IRQ handlers (on other CPUs)
 37  *      @irq: interrupt number to wait for
 38  *
 39  *      This function waits for any pending IRQ handlers for this interrupt
 40  *      to complete before returning. If you use this function while
 41  *      holding a resource the IRQ handler may need you will deadlock.
 42  *
 43  *      This function may be called - with care - from IRQ context.
 44  */
 45 void synchronize_irq(unsigned int irq)
 46 {
 47         struct irq_desc *desc = irq_to_desc(irq);
 48         bool inprogress;
 49 
 50         if (!desc)
 51                 return;
 52 
 53         do {
 54                 unsigned long flags;
 55 
 56                 /*
 57                  * Wait until we're out of the critical section.  This might
 58                  * give the wrong answer due to the lack of memory barriers.
 59                  */
 60                 while (irqd_irq_inprogress(&desc->irq_data))
 61                         cpu_relax();
 62 
 63                 /* Ok, that indicated we're done: double-check carefully. */
 64                 raw_spin_lock_irqsave(&desc->lock, flags);
 65                 inprogress = irqd_irq_inprogress(&desc->irq_data);
 66                 raw_spin_unlock_irqrestore(&desc->lock, flags);
 67 
 68                 /* Oops, that failed? */
 69         } while (inprogress);
 70 
 71         /*
 72          * We made sure that no hardirq handler is running. Now verify
 73          * that no threaded handlers are active.
 74          */
 75         wait_event(desc->wait_for_threads, !atomic_read(&desc->threads_active));
 76 }
 77 EXPORT_SYMBOL(synchronize_irq);
 78 
 79 #ifdef CONFIG_SMP
 80 cpumask_var_t irq_default_affinity;
 81 
 82 /**
 83  *      irq_can_set_affinity - Check if the affinity of a given irq can be set
 84  *      @irq:           Interrupt to check
 85  *
 86  */
 87 int irq_can_set_affinity(unsigned int irq)
 88 {
 89         struct irq_desc *desc = irq_to_desc(irq);
 90 
 91         if (!desc || !irqd_can_balance(&desc->irq_data) ||
 92             !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
 93                 return 0;
 94 
 95         return 1;
 96 }
 97 
 98 /**
 99  *      irq_set_thread_affinity - Notify irq threads to adjust affinity
100  *      @desc:          irq descriptor which has affitnity changed
101  *
102  *      We just set IRQTF_AFFINITY and delegate the affinity setting
103  *      to the interrupt thread itself. We can not call
104  *      set_cpus_allowed_ptr() here as we hold desc->lock and this
105  *      code can be called from hard interrupt context.
106  */
107 void irq_set_thread_affinity(struct irq_desc *desc)
108 {
109         struct irqaction *action = desc->action;
110 
111         while (action) {
112                 if (action->thread)
113                         set_bit(IRQTF_AFFINITY, &action->thread_flags);
114                 action = action->next;
115         }
116 }
117 
118 #ifdef CONFIG_GENERIC_PENDING_IRQ
119 static inline bool irq_can_move_pcntxt(struct irq_data *data)
120 {
121         return irqd_can_move_in_process_context(data);
122 }
123 static inline bool irq_move_pending(struct irq_data *data)
124 {
125         return irqd_is_setaffinity_pending(data);
126 }
127 static inline void
128 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask)
129 {
130         cpumask_copy(desc->pending_mask, mask);
131 }
132 static inline void
133 irq_get_pending(struct cpumask *mask, struct irq_desc *desc)
134 {
135         cpumask_copy(mask, desc->pending_mask);
136 }
137 #else
138 static inline bool irq_can_move_pcntxt(struct irq_data *data) { return true; }
139 static inline bool irq_move_pending(struct irq_data *data) { return false; }
140 static inline void
141 irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) { }
142 static inline void
143 irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
144 #endif
145 
146 int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
147                         bool force)
148 {
149         struct irq_desc *desc = irq_data_to_desc(data);
150         struct irq_chip *chip = irq_data_get_irq_chip(data);
151         int ret;
152 
153         ret = chip->irq_set_affinity(data, mask, false);
154         switch (ret) {
155         case IRQ_SET_MASK_OK:
156                 cpumask_copy(data->affinity, mask);
157         case IRQ_SET_MASK_OK_NOCOPY:
158                 irq_set_thread_affinity(desc);
159                 ret = 0;
160         }
161 
162         return ret;
163 }
164 
165 int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
166 {
167         struct irq_chip *chip = irq_data_get_irq_chip(data);
168         struct irq_desc *desc = irq_data_to_desc(data);
169         int ret = 0;
170 
171         if (!chip || !chip->irq_set_affinity)
172                 return -EINVAL;
173 
174         if (irq_can_move_pcntxt(data)) {
175                 ret = irq_do_set_affinity(data, mask, false);
176         } else {
177                 irqd_set_move_pending(data);
178                 irq_copy_pending(desc, mask);
179         }
180 
181         if (desc->affinity_notify) {
182                 kref_get(&desc->affinity_notify->kref);
183                 schedule_work(&desc->affinity_notify->work);
184         }
185         irqd_set(data, IRQD_AFFINITY_SET);
186 
187         return ret;
188 }
189 
190 /**
191  *      irq_set_affinity - Set the irq affinity of a given irq
192  *      @irq:           Interrupt to set affinity
193  *      @mask:          cpumask
194  *
195  */
196 int irq_set_affinity(unsigned int irq, const struct cpumask *mask)
197 {
198         struct irq_desc *desc = irq_to_desc(irq);
199         unsigned long flags;
200         int ret;
201 
202         if (!desc)
203                 return -EINVAL;
204 
205         raw_spin_lock_irqsave(&desc->lock, flags);
206         ret =  __irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask);
207         raw_spin_unlock_irqrestore(&desc->lock, flags);
208         return ret;
209 }
210 
211 int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
212 {
213         unsigned long flags;
214         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
215 
216         if (!desc)
217                 return -EINVAL;
218         desc->affinity_hint = m;
219         irq_put_desc_unlock(desc, flags);
220         return 0;
221 }
222 EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
223 
224 static void irq_affinity_notify(struct work_struct *work)
225 {
226         struct irq_affinity_notify *notify =
227                 container_of(work, struct irq_affinity_notify, work);
228         struct irq_desc *desc = irq_to_desc(notify->irq);
229         cpumask_var_t cpumask;
230         unsigned long flags;
231 
232         if (!desc || !alloc_cpumask_var(&cpumask, GFP_KERNEL))
233                 goto out;
234 
235         raw_spin_lock_irqsave(&desc->lock, flags);
236         if (irq_move_pending(&desc->irq_data))
237                 irq_get_pending(cpumask, desc);
238         else
239                 cpumask_copy(cpumask, desc->irq_data.affinity);
240         raw_spin_unlock_irqrestore(&desc->lock, flags);
241 
242         notify->notify(notify, cpumask);
243 
244         free_cpumask_var(cpumask);
245 out:
246         kref_put(&notify->kref, notify->release);
247 }
248 
249 /**
250  *      irq_set_affinity_notifier - control notification of IRQ affinity changes
251  *      @irq:           Interrupt for which to enable/disable notification
252  *      @notify:        Context for notification, or %NULL to disable
253  *                      notification.  Function pointers must be initialised;
254  *                      the other fields will be initialised by this function.
255  *
256  *      Must be called in process context.  Notification may only be enabled
257  *      after the IRQ is allocated and must be disabled before the IRQ is
258  *      freed using free_irq().
259  */
260 int
261 irq_set_affinity_notifier(unsigned int irq, struct irq_affinity_notify *notify)
262 {
263         struct irq_desc *desc = irq_to_desc(irq);
264         struct irq_affinity_notify *old_notify;
265         unsigned long flags;
266 
267         /* The release function is promised process context */
268         might_sleep();
269 
270         if (!desc)
271                 return -EINVAL;
272 
273         /* Complete initialisation of *notify */
274         if (notify) {
275                 notify->irq = irq;
276                 kref_init(&notify->kref);
277                 INIT_WORK(&notify->work, irq_affinity_notify);
278         }
279 
280         raw_spin_lock_irqsave(&desc->lock, flags);
281         old_notify = desc->affinity_notify;
282         desc->affinity_notify = notify;
283         raw_spin_unlock_irqrestore(&desc->lock, flags);
284 
285         if (old_notify)
286                 kref_put(&old_notify->kref, old_notify->release);
287 
288         return 0;
289 }
290 EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
291 
292 #ifndef CONFIG_AUTO_IRQ_AFFINITY
293 /*
294  * Generic version of the affinity autoselector.
295  */
296 static int
297 setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
298 {
299         struct cpumask *set = irq_default_affinity;
300         int node = desc->irq_data.node;
301 
302         /* Excludes PER_CPU and NO_BALANCE interrupts */
303         if (!irq_can_set_affinity(irq))
304                 return 0;
305 
306         /*
307          * Preserve an userspace affinity setup, but make sure that
308          * one of the targets is online.
309          */
310         if (irqd_has_set(&desc->irq_data, IRQD_AFFINITY_SET)) {
311                 if (cpumask_intersects(desc->irq_data.affinity,
312                                        cpu_online_mask))
313                         set = desc->irq_data.affinity;
314                 else
315                         irqd_clear(&desc->irq_data, IRQD_AFFINITY_SET);
316         }
317 
318         cpumask_and(mask, cpu_online_mask, set);
319         if (node != NUMA_NO_NODE) {
320                 const struct cpumask *nodemask = cpumask_of_node(node);
321 
322                 /* make sure at least one of the cpus in nodemask is online */
323                 if (cpumask_intersects(mask, nodemask))
324                         cpumask_and(mask, mask, nodemask);
325         }
326         irq_do_set_affinity(&desc->irq_data, mask, false);
327         return 0;
328 }
329 #else
330 static inline int
331 setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask)
332 {
333         return irq_select_affinity(irq);
334 }
335 #endif
336 
337 /*
338  * Called when affinity is set via /proc/irq
339  */
340 int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask)
341 {
342         struct irq_desc *desc = irq_to_desc(irq);
343         unsigned long flags;
344         int ret;
345 
346         raw_spin_lock_irqsave(&desc->lock, flags);
347         ret = setup_affinity(irq, desc, mask);
348         raw_spin_unlock_irqrestore(&desc->lock, flags);
349         return ret;
350 }
351 
352 #else
353 static inline int
354 setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
355 {
356         return 0;
357 }
358 #endif
359 
360 void __disable_irq(struct irq_desc *desc, unsigned int irq, bool suspend)
361 {
362         if (suspend) {
363                 if (!desc->action || (desc->action->flags & IRQF_NO_SUSPEND))
364                         return;
365                 desc->istate |= IRQS_SUSPENDED;
366         }
367 
368         if (!desc->depth++)
369                 irq_disable(desc);
370 }
371 
372 static int __disable_irq_nosync(unsigned int irq)
373 {
374         unsigned long flags;
375         struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
376 
377         if (!desc)
378                 return -EINVAL;
379         __disable_irq(desc, irq, false);
380         irq_put_desc_busunlock(desc, flags);
381         return 0;
382 }
383 
384 /**
385  *      disable_irq_nosync - disable an irq without waiting
386  *      @irq: Interrupt to disable
387  *
388  *      Disable the selected interrupt line.  Disables and Enables are
389  *      nested.
390  *      Unlike disable_irq(), this function does not ensure existing
391  *      instances of the IRQ handler have completed before returning.
392  *
393  *      This function may be called from IRQ context.
394  */
395 void disable_irq_nosync(unsigned int irq)
396 {
397         __disable_irq_nosync(irq);
398 }
399 EXPORT_SYMBOL(disable_irq_nosync);
400 
401 /**
402  *      disable_irq - disable an irq and wait for completion
403  *      @irq: Interrupt to disable
404  *
405  *      Disable the selected interrupt line.  Enables and Disables are
406  *      nested.
407  *      This function waits for any pending IRQ handlers for this interrupt
408  *      to complete before returning. If you use this function while
409  *      holding a resource the IRQ handler may need you will deadlock.
410  *
411  *      This function may be called - with care - from IRQ context.
412  */
413 void disable_irq(unsigned int irq)
414 {
415         if (!__disable_irq_nosync(irq))
416                 synchronize_irq(irq);
417 }
418 EXPORT_SYMBOL(disable_irq);
419 
420 void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume)
421 {
422         if (resume) {
423                 if (!(desc->istate & IRQS_SUSPENDED)) {
424                         if (!desc->action)
425                                 return;
426                         if (!(desc->action->flags & IRQF_FORCE_RESUME))
427                                 return;
428                         /* Pretend that it got disabled ! */
429                         desc->depth++;
430                 }
431                 desc->istate &= ~IRQS_SUSPENDED;
432         }
433 
434         switch (desc->depth) {
435         case 0:
436  err_out:
437                 WARN(1, KERN_WARNING "Unbalanced enable for IRQ %d\n", irq);
438                 break;
439         case 1: {
440                 if (desc->istate & IRQS_SUSPENDED)
441                         goto err_out;
442                 /* Prevent probing on this irq: */
443                 irq_settings_set_noprobe(desc);
444                 irq_enable(desc);
445                 check_irq_resend(desc, irq);
446                 /* fall-through */
447         }
448         default:
449                 desc->depth--;
450         }
451 }
452 
453 /**
454  *      enable_irq - enable handling of an irq
455  *      @irq: Interrupt to enable
456  *
457  *      Undoes the effect of one call to disable_irq().  If this
458  *      matches the last disable, processing of interrupts on this
459  *      IRQ line is re-enabled.
460  *
461  *      This function may be called from IRQ context only when
462  *      desc->irq_data.chip->bus_lock and desc->chip->bus_sync_unlock are NULL !
463  */
464 void enable_irq(unsigned int irq)
465 {
466         unsigned long flags;
467         struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
468 
469         if (!desc)
470                 return;
471         if (WARN(!desc->irq_data.chip,
472                  KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
473                 goto out;
474 
475         __enable_irq(desc, irq, false);
476 out:
477         irq_put_desc_busunlock(desc, flags);
478 }
479 EXPORT_SYMBOL(enable_irq);
480 
481 static int set_irq_wake_real(unsigned int irq, unsigned int on)
482 {
483         struct irq_desc *desc = irq_to_desc(irq);
484         int ret = -ENXIO;
485 
486         if (irq_desc_get_chip(desc)->flags &  IRQCHIP_SKIP_SET_WAKE)
487                 return 0;
488 
489         if (desc->irq_data.chip->irq_set_wake)
490                 ret = desc->irq_data.chip->irq_set_wake(&desc->irq_data, on);
491 
492         return ret;
493 }
494 
495 /**
496  *      irq_set_irq_wake - control irq power management wakeup
497  *      @irq:   interrupt to control
498  *      @on:    enable/disable power management wakeup
499  *
500  *      Enable/disable power management wakeup mode, which is
501  *      disabled by default.  Enables and disables must match,
502  *      just as they match for non-wakeup mode support.
503  *
504  *      Wakeup mode lets this IRQ wake the system from sleep
505  *      states like "suspend to RAM".
506  */
507 int irq_set_irq_wake(unsigned int irq, unsigned int on)
508 {
509         unsigned long flags;
510         struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL);
511         int ret = 0;
512 
513         if (!desc)
514                 return -EINVAL;
515 
516         /* wakeup-capable irqs can be shared between drivers that
517          * don't need to have the same sleep mode behaviors.
518          */
519         if (on) {
520                 if (desc->wake_depth++ == 0) {
521                         ret = set_irq_wake_real(irq, on);
522                         if (ret)
523                                 desc->wake_depth = 0;
524                         else
525                                 irqd_set(&desc->irq_data, IRQD_WAKEUP_STATE);
526                 }
527         } else {
528                 if (desc->wake_depth == 0) {
529                         WARN(1, "Unbalanced IRQ %d wake disable\n", irq);
530                 } else if (--desc->wake_depth == 0) {
531                         ret = set_irq_wake_real(irq, on);
532                         if (ret)
533                                 desc->wake_depth = 1;
534                         else
535                                 irqd_clear(&desc->irq_data, IRQD_WAKEUP_STATE);
536                 }
537         }
538         irq_put_desc_busunlock(desc, flags);
539         return ret;
540 }
541 EXPORT_SYMBOL(irq_set_irq_wake);
542 
543 /*
544  * Internal function that tells the architecture code whether a
545  * particular irq has been exclusively allocated or is available
546  * for driver use.
547  */
548 int can_request_irq(unsigned int irq, unsigned long irqflags)
549 {
550         unsigned long flags;
551         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
552         int canrequest = 0;
553 
554         if (!desc)
555                 return 0;
556 
557         if (irq_settings_can_request(desc)) {
558                 if (!desc->action ||
559                     irqflags & desc->action->flags & IRQF_SHARED)
560                         canrequest = 1;
561         }
562         irq_put_desc_unlock(desc, flags);
563         return canrequest;
564 }
565 
566 int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
567                       unsigned long flags)
568 {
569         struct irq_chip *chip = desc->irq_data.chip;
570         int ret, unmask = 0;
571 
572         if (!chip || !chip->irq_set_type) {
573                 /*
574                  * IRQF_TRIGGER_* but the PIC does not support multiple
575                  * flow-types?
576                  */
577                 pr_debug("No set_type function for IRQ %d (%s)\n", irq,
578                          chip ? (chip->name ? : "unknown") : "unknown");
579                 return 0;
580         }
581 
582         flags &= IRQ_TYPE_SENSE_MASK;
583 
584         if (chip->flags & IRQCHIP_SET_TYPE_MASKED) {
585                 if (!irqd_irq_masked(&desc->irq_data))
586                         mask_irq(desc);
587                 if (!irqd_irq_disabled(&desc->irq_data))
588                         unmask = 1;
589         }
590 
591         /* caller masked out all except trigger mode flags */
592         ret = chip->irq_set_type(&desc->irq_data, flags);
593 
594         switch (ret) {
595         case IRQ_SET_MASK_OK:
596                 irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK);
597                 irqd_set(&desc->irq_data, flags);
598 
599         case IRQ_SET_MASK_OK_NOCOPY:
600                 flags = irqd_get_trigger_type(&desc->irq_data);
601                 irq_settings_set_trigger_mask(desc, flags);
602                 irqd_clear(&desc->irq_data, IRQD_LEVEL);
603                 irq_settings_clr_level(desc);
604                 if (flags & IRQ_TYPE_LEVEL_MASK) {
605                         irq_settings_set_level(desc);
606                         irqd_set(&desc->irq_data, IRQD_LEVEL);
607                 }
608 
609                 ret = 0;
610                 break;
611         default:
612                 pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
613                        flags, irq, chip->irq_set_type);
614         }
615         if (unmask)
616                 unmask_irq(desc);
617         return ret;
618 }
619 
620 #ifdef CONFIG_HARDIRQS_SW_RESEND
621 int irq_set_parent(int irq, int parent_irq)
622 {
623         unsigned long flags;
624         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
625 
626         if (!desc)
627                 return -EINVAL;
628 
629         desc->parent_irq = parent_irq;
630 
631         irq_put_desc_unlock(desc, flags);
632         return 0;
633 }
634 #endif
635 
636 /*
637  * Default primary interrupt handler for threaded interrupts. Is
638  * assigned as primary handler when request_threaded_irq is called
639  * with handler == NULL. Useful for oneshot interrupts.
640  */
641 static irqreturn_t irq_default_primary_handler(int irq, void *dev_id)
642 {
643         return IRQ_WAKE_THREAD;
644 }
645 
646 /*
647  * Primary handler for nested threaded interrupts. Should never be
648  * called.
649  */
650 static irqreturn_t irq_nested_primary_handler(int irq, void *dev_id)
651 {
652         WARN(1, "Primary handler called for nested irq %d\n", irq);
653         return IRQ_NONE;
654 }
655 
656 static int irq_wait_for_interrupt(struct irqaction *action)
657 {
658         set_current_state(TASK_INTERRUPTIBLE);
659 
660         while (!kthread_should_stop()) {
661 
662                 if (test_and_clear_bit(IRQTF_RUNTHREAD,
663                                        &action->thread_flags)) {
664                         __set_current_state(TASK_RUNNING);
665                         return 0;
666                 }
667                 schedule();
668                 set_current_state(TASK_INTERRUPTIBLE);
669         }
670         __set_current_state(TASK_RUNNING);
671         return -1;
672 }
673 
674 /*
675  * Oneshot interrupts keep the irq line masked until the threaded
676  * handler finished. unmask if the interrupt has not been disabled and
677  * is marked MASKED.
678  */
679 static void irq_finalize_oneshot(struct irq_desc *desc,
680                                  struct irqaction *action)
681 {
682         if (!(desc->istate & IRQS_ONESHOT))
683                 return;
684 again:
685         chip_bus_lock(desc);
686         raw_spin_lock_irq(&desc->lock);
687 
688         /*
689          * Implausible though it may be we need to protect us against
690          * the following scenario:
691          *
692          * The thread is faster done than the hard interrupt handler
693          * on the other CPU. If we unmask the irq line then the
694          * interrupt can come in again and masks the line, leaves due
695          * to IRQS_INPROGRESS and the irq line is masked forever.
696          *
697          * This also serializes the state of shared oneshot handlers
698          * versus "desc->threads_onehsot |= action->thread_mask;" in
699          * irq_wake_thread(). See the comment there which explains the
700          * serialization.
701          */
702         if (unlikely(irqd_irq_inprogress(&desc->irq_data))) {
703                 raw_spin_unlock_irq(&desc->lock);
704                 chip_bus_sync_unlock(desc);
705                 cpu_relax();
706                 goto again;
707         }
708 
709         /*
710          * Now check again, whether the thread should run. Otherwise
711          * we would clear the threads_oneshot bit of this thread which
712          * was just set.
713          */
714         if (test_bit(IRQTF_RUNTHREAD, &action->thread_flags))
715                 goto out_unlock;
716 
717         desc->threads_oneshot &= ~action->thread_mask;
718 
719         if (!desc->threads_oneshot && !irqd_irq_disabled(&desc->irq_data) &&
720             irqd_irq_masked(&desc->irq_data))
721                 unmask_irq(desc);
722 
723 out_unlock:
724         raw_spin_unlock_irq(&desc->lock);
725         chip_bus_sync_unlock(desc);
726 }
727 
728 #ifdef CONFIG_SMP
729 /*
730  * Check whether we need to chasnge the affinity of the interrupt thread.
731  */
732 static void
733 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action)
734 {
735         cpumask_var_t mask;
736         bool valid = true;
737 
738         if (!test_and_clear_bit(IRQTF_AFFINITY, &action->thread_flags))
739                 return;
740 
741         /*
742          * In case we are out of memory we set IRQTF_AFFINITY again and
743          * try again next time
744          */
745         if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
746                 set_bit(IRQTF_AFFINITY, &action->thread_flags);
747                 return;
748         }
749 
750         raw_spin_lock_irq(&desc->lock);
751         /*
752          * This code is triggered unconditionally. Check the affinity
753          * mask pointer. For CPU_MASK_OFFSTACK=n this is optimized out.
754          */
755         if (desc->irq_data.affinity)
756                 cpumask_copy(mask, desc->irq_data.affinity);
757         else
758                 valid = false;
759         raw_spin_unlock_irq(&desc->lock);
760 
761         if (valid)
762                 set_cpus_allowed_ptr(current, mask);
763         free_cpumask_var(mask);
764 }
765 #else
766 static inline void
767 irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
768 #endif
769 
770 /*
771  * Interrupts which are not explicitely requested as threaded
772  * interrupts rely on the implicit bh/preempt disable of the hard irq
773  * context. So we need to disable bh here to avoid deadlocks and other
774  * side effects.
775  */
776 static irqreturn_t
777 irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
778 {
779         irqreturn_t ret;
780 
781         local_bh_disable();
782         ret = action->thread_fn(action->irq, action->dev_id);
783         irq_finalize_oneshot(desc, action);
784         local_bh_enable();
785         return ret;
786 }
787 
788 /*
789  * Interrupts explicitly requested as threaded interrupts want to be
790  * preemtible - many of them need to sleep and wait for slow busses to
791  * complete.
792  */
793 static irqreturn_t irq_thread_fn(struct irq_desc *desc,
794                 struct irqaction *action)
795 {
796         irqreturn_t ret;
797 
798         ret = action->thread_fn(action->irq, action->dev_id);
799         irq_finalize_oneshot(desc, action);
800         return ret;
801 }
802 
803 static void wake_threads_waitq(struct irq_desc *desc)
804 {
805         if (atomic_dec_and_test(&desc->threads_active))
806                 wake_up(&desc->wait_for_threads);
807 }
808 
809 static void irq_thread_dtor(struct callback_head *unused)
810 {
811         struct task_struct *tsk = current;
812         struct irq_desc *desc;
813         struct irqaction *action;
814 
815         if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
816                 return;
817 
818         action = kthread_data(tsk);
819 
820         pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
821                tsk->comm, tsk->pid, action->irq);
822 
823 
824         desc = irq_to_desc(action->irq);
825         /*
826          * If IRQTF_RUNTHREAD is set, we need to decrement
827          * desc->threads_active and wake possible waiters.
828          */
829         if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
830                 wake_threads_waitq(desc);
831 
832         /* Prevent a stale desc->threads_oneshot */
833         irq_finalize_oneshot(desc, action);
834 }
835 
836 /*
837  * Interrupt handler thread
838  */
839 static int irq_thread(void *data)
840 {
841         struct callback_head on_exit_work;
842         struct irqaction *action = data;
843         struct irq_desc *desc = irq_to_desc(action->irq);
844         irqreturn_t (*handler_fn)(struct irq_desc *desc,
845                         struct irqaction *action);
846 
847         if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
848                                         &action->thread_flags))
849                 handler_fn = irq_forced_thread_fn;
850         else
851                 handler_fn = irq_thread_fn;
852 
853         init_task_work(&on_exit_work, irq_thread_dtor);
854         task_work_add(current, &on_exit_work, false);
855 
856         irq_thread_check_affinity(desc, action);
857 
858         while (!irq_wait_for_interrupt(action)) {
859                 irqreturn_t action_ret;
860 
861                 irq_thread_check_affinity(desc, action);
862 
863                 action_ret = handler_fn(desc, action);
864                 if (!noirqdebug)
865                         note_interrupt(action->irq, desc, action_ret);
866 
867                 wake_threads_waitq(desc);
868         }
869 
870         /*
871          * This is the regular exit path. __free_irq() is stopping the
872          * thread via kthread_stop() after calling
873          * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
874          * oneshot mask bit can be set. We cannot verify that as we
875          * cannot touch the oneshot mask at this point anymore as
876          * __setup_irq() might have given out currents thread_mask
877          * again.
878          */
879         task_work_cancel(current, irq_thread_dtor);
880         return 0;
881 }
882 
883 static void irq_setup_forced_threading(struct irqaction *new)
884 {
885         if (!force_irqthreads)
886                 return;
887         if (new->flags & (IRQF_NO_THREAD | IRQF_PERCPU | IRQF_ONESHOT))
888                 return;
889 
890         new->flags |= IRQF_ONESHOT;
891 
892         if (!new->thread_fn) {
893                 set_bit(IRQTF_FORCED_THREAD, &new->thread_flags);
894                 new->thread_fn = new->handler;
895                 new->handler = irq_default_primary_handler;
896         }
897 }
898 
899 /*
900  * Internal function to register an irqaction - typically used to
901  * allocate special interrupts that are part of the architecture.
902  */
903 static int
904 __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
905 {
906         struct irqaction *old, **old_ptr;
907         unsigned long flags, thread_mask = 0;
908         int ret, nested, shared = 0;
909         cpumask_var_t mask;
910 
911         if (!desc)
912                 return -EINVAL;
913 
914         if (desc->irq_data.chip == &no_irq_chip)
915                 return -ENOSYS;
916         if (!try_module_get(desc->owner))
917                 return -ENODEV;
918 
919         /*
920          * Check whether the interrupt nests into another interrupt
921          * thread.
922          */
923         nested = irq_settings_is_nested_thread(desc);
924         if (nested) {
925                 if (!new->thread_fn) {
926                         ret = -EINVAL;
927                         goto out_mput;
928                 }
929                 /*
930                  * Replace the primary handler which was provided from
931                  * the driver for non nested interrupt handling by the
932                  * dummy function which warns when called.
933                  */
934                 new->handler = irq_nested_primary_handler;
935         } else {
936                 if (irq_settings_can_thread(desc))
937                         irq_setup_forced_threading(new);
938         }
939 
940         /*
941          * Create a handler thread when a thread function is supplied
942          * and the interrupt does not nest into another interrupt
943          * thread.
944          */
945         if (new->thread_fn && !nested) {
946                 struct task_struct *t;
947                 static const struct sched_param param = {
948                         .sched_priority = MAX_USER_RT_PRIO/2,
949                 };
950 
951                 t = kthread_create(irq_thread, new, "irq/%d-%s", irq,
952                                    new->name);
953                 if (IS_ERR(t)) {
954                         ret = PTR_ERR(t);
955                         goto out_mput;
956                 }
957 
958                 sched_setscheduler_nocheck(t, SCHED_FIFO, &param);
959 
960                 /*
961                  * We keep the reference to the task struct even if
962                  * the thread dies to avoid that the interrupt code
963                  * references an already freed task_struct.
964                  */
965                 get_task_struct(t);
966                 new->thread = t;
967                 /*
968                  * Tell the thread to set its affinity. This is
969                  * important for shared interrupt handlers as we do
970                  * not invoke setup_affinity() for the secondary
971                  * handlers as everything is already set up. Even for
972                  * interrupts marked with IRQF_NO_BALANCE this is
973                  * correct as we want the thread to move to the cpu(s)
974                  * on which the requesting code placed the interrupt.
975                  */
976                 set_bit(IRQTF_AFFINITY, &new->thread_flags);
977         }
978 
979         if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
980                 ret = -ENOMEM;
981                 goto out_thread;
982         }
983 
984         /*
985          * Drivers are often written to work w/o knowledge about the
986          * underlying irq chip implementation, so a request for a
987          * threaded irq without a primary hard irq context handler
988          * requires the ONESHOT flag to be set. Some irq chips like
989          * MSI based interrupts are per se one shot safe. Check the
990          * chip flags, so we can avoid the unmask dance at the end of
991          * the threaded handler for those.
992          */
993         if (desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)
994                 new->flags &= ~IRQF_ONESHOT;
995 
996         /*
997          * The following block of code has to be executed atomically
998          */
999         raw_spin_lock_irqsave(&desc->lock, flags);
1000         old_ptr = &desc->action;
1001         old = *old_ptr;
1002         if (old) {
1003                 /*
1004                  * Can't share interrupts unless both agree to and are
1005                  * the same type (level, edge, polarity). So both flag
1006                  * fields must have IRQF_SHARED set and the bits which
1007                  * set the trigger type must match. Also all must
1008                  * agree on ONESHOT.
1009                  */
1010                 if (!((old->flags & new->flags) & IRQF_SHARED) ||
1011                     ((old->flags ^ new->flags) & IRQF_TRIGGER_MASK) ||
1012                     ((old->flags ^ new->flags) & IRQF_ONESHOT))
1013                         goto mismatch;
1014 
1015                 /* All handlers must agree on per-cpuness */
1016                 if ((old->flags & IRQF_PERCPU) !=
1017                     (new->flags & IRQF_PERCPU))
1018                         goto mismatch;
1019 
1020                 /* add new interrupt at end of irq queue */
1021                 do {
1022                         /*
1023                          * Or all existing action->thread_mask bits,
1024                          * so we can find the next zero bit for this
1025                          * new action.
1026                          */
1027                         thread_mask |= old->thread_mask;
1028                         old_ptr = &old->next;
1029                         old = *old_ptr;
1030                 } while (old);
1031                 shared = 1;
1032         }
1033 
1034         /*
1035          * Setup the thread mask for this irqaction for ONESHOT. For
1036          * !ONESHOT irqs the thread mask is 0 so we can avoid a
1037          * conditional in irq_wake_thread().
1038          */
1039         if (new->flags & IRQF_ONESHOT) {
1040                 /*
1041                  * Unlikely to have 32 resp 64 irqs sharing one line,
1042                  * but who knows.
1043                  */
1044                 if (thread_mask == ~0UL) {
1045                         ret = -EBUSY;
1046                         goto out_mask;
1047                 }
1048                 /*
1049                  * The thread_mask for the action is or'ed to
1050                  * desc->thread_active to indicate that the
1051                  * IRQF_ONESHOT thread handler has been woken, but not
1052                  * yet finished. The bit is cleared when a thread
1053                  * completes. When all threads of a shared interrupt
1054                  * line have completed desc->threads_active becomes
1055                  * zero and the interrupt line is unmasked. See
1056                  * handle.c:irq_wake_thread() for further information.
1057                  *
1058                  * If no thread is woken by primary (hard irq context)
1059                  * interrupt handlers, then desc->threads_active is
1060                  * also checked for zero to unmask the irq line in the
1061                  * affected hard irq flow handlers
1062                  * (handle_[fasteoi|level]_irq).
1063                  *
1064                  * The new action gets the first zero bit of
1065                  * thread_mask assigned. See the loop above which or's
1066                  * all existing action->thread_mask bits.
1067                  */
1068                 new->thread_mask = 1 << ffz(thread_mask);
1069 
1070         } else if (new->handler == irq_default_primary_handler &&
1071                    !(desc->irq_data.chip->flags & IRQCHIP_ONESHOT_SAFE)) {
1072                 /*
1073                  * The interrupt was requested with handler = NULL, so
1074                  * we use the default primary handler for it. But it
1075                  * does not have the oneshot flag set. In combination
1076                  * with level interrupts this is deadly, because the
1077                  * default primary handler just wakes the thread, then
1078                  * the irq lines is reenabled, but the device still
1079                  * has the level irq asserted. Rinse and repeat....
1080                  *
1081                  * While this works for edge type interrupts, we play
1082                  * it safe and reject unconditionally because we can't
1083                  * say for sure which type this interrupt really
1084                  * has. The type flags are unreliable as the
1085                  * underlying chip implementation can override them.
1086                  */
1087                 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1088                        irq);
1089                 ret = -EINVAL;
1090                 goto out_mask;
1091         }
1092 
1093         if (!shared) {
1094                 init_waitqueue_head(&desc->wait_for_threads);
1095 
1096                 /* Setup the type (level, edge polarity) if configured: */
1097                 if (new->flags & IRQF_TRIGGER_MASK) {
1098                         ret = __irq_set_trigger(desc, irq,
1099                                         new->flags & IRQF_TRIGGER_MASK);
1100 
1101                         if (ret)
1102                                 goto out_mask;
1103                 }
1104 
1105                 desc->istate &= ~(IRQS_AUTODETECT | IRQS_SPURIOUS_DISABLED | \
1106                                   IRQS_ONESHOT | IRQS_WAITING);
1107                 irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
1108 
1109                 if (new->flags & IRQF_PERCPU) {
1110                         irqd_set(&desc->irq_data, IRQD_PER_CPU);
1111                         irq_settings_set_per_cpu(desc);
1112                 }
1113 
1114                 if (new->flags & IRQF_ONESHOT)
1115                         desc->istate |= IRQS_ONESHOT;
1116 
1117                 if (irq_settings_can_autoenable(desc))
1118                         irq_startup(desc, true);
1119                 else
1120                         /* Undo nested disables: */
1121                         desc->depth = 1;
1122 
1123                 /* Exclude IRQ from balancing if requested */
1124                 if (new->flags & IRQF_NOBALANCING) {
1125                         irq_settings_set_no_balancing(desc);
1126                         irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
1127                 }
1128 
1129                 /* Set default affinity mask once everything is setup */
1130                 setup_affinity(irq, desc, mask);
1131 
1132         } else if (new->flags & IRQF_TRIGGER_MASK) {
1133                 unsigned int nmsk = new->flags & IRQF_TRIGGER_MASK;
1134                 unsigned int omsk = irq_settings_get_trigger_mask(desc);
1135 
1136                 if (nmsk != omsk)
1137                         /* hope the handler works with current  trigger mode */
1138                         pr_warning("irq %d uses trigger mode %u; requested %u\n",
1139                                    irq, nmsk, omsk);
1140         }
1141 
1142         new->irq = irq;
1143         *old_ptr = new;
1144 
1145         /* Reset broken irq detection when installing new handler */
1146         desc->irq_count = 0;
1147         desc->irqs_unhandled = 0;
1148 
1149         /*
1150          * Check whether we disabled the irq via the spurious handler
1151          * before. Reenable it and give it another chance.
1152          */
1153         if (shared && (desc->istate & IRQS_SPURIOUS_DISABLED)) {
1154                 desc->istate &= ~IRQS_SPURIOUS_DISABLED;
1155                 __enable_irq(desc, irq, false);
1156         }
1157 
1158         raw_spin_unlock_irqrestore(&desc->lock, flags);
1159 
1160         /*
1161          * Strictly no need to wake it up, but hung_task complains
1162          * when no hard interrupt wakes the thread up.
1163          */
1164         if (new->thread)
1165                 wake_up_process(new->thread);
1166 
1167         register_irq_proc(irq, desc);
1168         new->dir = NULL;
1169         register_handler_proc(irq, new);
1170         free_cpumask_var(mask);
1171 
1172         return 0;
1173 
1174 mismatch:
1175         if (!(new->flags & IRQF_PROBE_SHARED)) {
1176                 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1177                        irq, new->flags, new->name, old->flags, old->name);
1178 #ifdef CONFIG_DEBUG_SHIRQ
1179                 dump_stack();
1180 #endif
1181         }
1182         ret = -EBUSY;
1183 
1184 out_mask:
1185         raw_spin_unlock_irqrestore(&desc->lock, flags);
1186         free_cpumask_var(mask);
1187 
1188 out_thread:
1189         if (new->thread) {
1190                 struct task_struct *t = new->thread;
1191 
1192                 new->thread = NULL;
1193                 kthread_stop(t);
1194                 put_task_struct(t);
1195         }
1196 out_mput:
1197         module_put(desc->owner);
1198         return ret;
1199 }
1200 
1201 /**
1202  *      setup_irq - setup an interrupt
1203  *      @irq: Interrupt line to setup
1204  *      @act: irqaction for the interrupt
1205  *
1206  * Used to statically setup interrupts in the early boot process.
1207  */
1208 int setup_irq(unsigned int irq, struct irqaction *act)
1209 {
1210         int retval;
1211         struct irq_desc *desc = irq_to_desc(irq);
1212 
1213         if (WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1214                 return -EINVAL;
1215         chip_bus_lock(desc);
1216         retval = __setup_irq(irq, desc, act);
1217         chip_bus_sync_unlock(desc);
1218 
1219         return retval;
1220 }
1221 EXPORT_SYMBOL_GPL(setup_irq);
1222 
1223 /*
1224  * Internal function to unregister an irqaction - used to free
1225  * regular and special interrupts that are part of the architecture.
1226  */
1227 static struct irqaction *__free_irq(unsigned int irq, void *dev_id)
1228 {
1229         struct irq_desc *desc = irq_to_desc(irq);
1230         struct irqaction *action, **action_ptr;
1231         unsigned long flags;
1232 
1233         WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1234 
1235         if (!desc)
1236                 return NULL;
1237 
1238         raw_spin_lock_irqsave(&desc->lock, flags);
1239 
1240         /*
1241          * There can be multiple actions per IRQ descriptor, find the right
1242          * one based on the dev_id:
1243          */
1244         action_ptr = &desc->action;
1245         for (;;) {
1246                 action = *action_ptr;
1247 
1248                 if (!action) {
1249                         WARN(1, "Trying to free already-free IRQ %d\n", irq);
1250                         raw_spin_unlock_irqrestore(&desc->lock, flags);
1251 
1252                         return NULL;
1253                 }
1254 
1255                 if (action->dev_id == dev_id)
1256                         break;
1257                 action_ptr = &action->next;
1258         }
1259 
1260         /* Found it - now remove it from the list of entries: */
1261         *action_ptr = action->next;
1262 
1263         /* If this was the last handler, shut down the IRQ line: */
1264         if (!desc->action)
1265                 irq_shutdown(desc);
1266 
1267 #ifdef CONFIG_SMP
1268         /* make sure affinity_hint is cleaned up */
1269         if (WARN_ON_ONCE(desc->affinity_hint))
1270                 desc->affinity_hint = NULL;
1271 #endif
1272 
1273         raw_spin_unlock_irqrestore(&desc->lock, flags);
1274 
1275         unregister_handler_proc(irq, action);
1276 
1277         /* Make sure it's not being used on another CPU: */
1278         synchronize_irq(irq);
1279 
1280 #ifdef CONFIG_DEBUG_SHIRQ
1281         /*
1282          * It's a shared IRQ -- the driver ought to be prepared for an IRQ
1283          * event to happen even now it's being freed, so let's make sure that
1284          * is so by doing an extra call to the handler ....
1285          *
1286          * ( We do this after actually deregistering it, to make sure that a
1287          *   'real' IRQ doesn't run in * parallel with our fake. )
1288          */
1289         if (action->flags & IRQF_SHARED) {
1290                 local_irq_save(flags);
1291                 action->handler(irq, dev_id);
1292                 local_irq_restore(flags);
1293         }
1294 #endif
1295 
1296         if (action->thread) {
1297                 kthread_stop(action->thread);
1298                 put_task_struct(action->thread);
1299         }
1300 
1301         module_put(desc->owner);
1302         return action;
1303 }
1304 
1305 /**
1306  *      remove_irq - free an interrupt
1307  *      @irq: Interrupt line to free
1308  *      @act: irqaction for the interrupt
1309  *
1310  * Used to remove interrupts statically setup by the early boot process.
1311  */
1312 void remove_irq(unsigned int irq, struct irqaction *act)
1313 {
1314         struct irq_desc *desc = irq_to_desc(irq);
1315 
1316         if (desc && !WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1317             __free_irq(irq, act->dev_id);
1318 }
1319 EXPORT_SYMBOL_GPL(remove_irq);
1320 
1321 /**
1322  *      free_irq - free an interrupt allocated with request_irq
1323  *      @irq: Interrupt line to free
1324  *      @dev_id: Device identity to free
1325  *
1326  *      Remove an interrupt handler. The handler is removed and if the
1327  *      interrupt line is no longer in use by any driver it is disabled.
1328  *      On a shared IRQ the caller must ensure the interrupt is disabled
1329  *      on the card it drives before calling this function. The function
1330  *      does not return until any executing interrupts for this IRQ
1331  *      have completed.
1332  *
1333  *      This function must not be called from interrupt context.
1334  */
1335 void free_irq(unsigned int irq, void *dev_id)
1336 {
1337         struct irq_desc *desc = irq_to_desc(irq);
1338 
1339         if (!desc || WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1340                 return;
1341 
1342 #ifdef CONFIG_SMP
1343         if (WARN_ON(desc->affinity_notify))
1344                 desc->affinity_notify = NULL;
1345 #endif
1346 
1347         chip_bus_lock(desc);
1348         kfree(__free_irq(irq, dev_id));
1349         chip_bus_sync_unlock(desc);
1350 }
1351 EXPORT_SYMBOL(free_irq);
1352 
1353 /**
1354  *      request_threaded_irq - allocate an interrupt line
1355  *      @irq: Interrupt line to allocate
1356  *      @handler: Function to be called when the IRQ occurs.
1357  *                Primary handler for threaded interrupts
1358  *                If NULL and thread_fn != NULL the default
1359  *                primary handler is installed
1360  *      @thread_fn: Function called from the irq handler thread
1361  *                  If NULL, no irq thread is created
1362  *      @irqflags: Interrupt type flags
1363  *      @devname: An ascii name for the claiming device
1364  *      @dev_id: A cookie passed back to the handler function
1365  *
1366  *      This call allocates interrupt resources and enables the
1367  *      interrupt line and IRQ handling. From the point this
1368  *      call is made your handler function may be invoked. Since
1369  *      your handler function must clear any interrupt the board
1370  *      raises, you must take care both to initialise your hardware
1371  *      and to set up the interrupt handler in the right order.
1372  *
1373  *      If you want to set up a threaded irq handler for your device
1374  *      then you need to supply @handler and @thread_fn. @handler is
1375  *      still called in hard interrupt context and has to check
1376  *      whether the interrupt originates from the device. If yes it
1377  *      needs to disable the interrupt on the device and return
1378  *      IRQ_WAKE_THREAD which will wake up the handler thread and run
1379  *      @thread_fn. This split handler design is necessary to support
1380  *      shared interrupts.
1381  *
1382  *      Dev_id must be globally unique. Normally the address of the
1383  *      device data structure is used as the cookie. Since the handler
1384  *      receives this value it makes sense to use it.
1385  *
1386  *      If your interrupt is shared you must pass a non NULL dev_id
1387  *      as this is required when freeing the interrupt.
1388  *
1389  *      Flags:
1390  *
1391  *      IRQF_SHARED             Interrupt is shared
1392  *      IRQF_TRIGGER_*          Specify active edge(s) or level
1393  *
1394  */
1395 int request_threaded_irq(unsigned int irq, irq_handler_t handler,
1396                          irq_handler_t thread_fn, unsigned long irqflags,
1397                          const char *devname, void *dev_id)
1398 {
1399         struct irqaction *action;
1400         struct irq_desc *desc;
1401         int retval;
1402 
1403         /*
1404          * Sanity-check: shared interrupts must pass in a real dev-ID,
1405          * otherwise we'll have trouble later trying to figure out
1406          * which interrupt is which (messes up the interrupt freeing
1407          * logic etc).
1408          */
1409         if ((irqflags & IRQF_SHARED) && !dev_id)
1410                 return -EINVAL;
1411 
1412         desc = irq_to_desc(irq);
1413         if (!desc)
1414                 return -EINVAL;
1415 
1416         if (!irq_settings_can_request(desc) ||
1417             WARN_ON(irq_settings_is_per_cpu_devid(desc)))
1418                 return -EINVAL;
1419 
1420         if (!handler) {
1421                 if (!thread_fn)
1422                         return -EINVAL;
1423                 handler = irq_default_primary_handler;
1424         }
1425 
1426         action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1427         if (!action)
1428                 return -ENOMEM;
1429 
1430         action->handler = handler;
1431         action->thread_fn = thread_fn;
1432         action->flags = irqflags;
1433         action->name = devname;
1434         action->dev_id = dev_id;
1435 
1436         chip_bus_lock(desc);
1437         retval = __setup_irq(irq, desc, action);
1438         chip_bus_sync_unlock(desc);
1439 
1440         if (retval)
1441                 kfree(action);
1442 
1443 #ifdef CONFIG_DEBUG_SHIRQ_FIXME
1444         if (!retval && (irqflags & IRQF_SHARED)) {
1445                 /*
1446                  * It's a shared IRQ -- the driver ought to be prepared for it
1447                  * to happen immediately, so let's make sure....
1448                  * We disable the irq to make sure that a 'real' IRQ doesn't
1449                  * run in parallel with our fake.
1450                  */
1451                 unsigned long flags;
1452 
1453                 disable_irq(irq);
1454                 local_irq_save(flags);
1455 
1456                 handler(irq, dev_id);
1457 
1458                 local_irq_restore(flags);
1459                 enable_irq(irq);
1460         }
1461 #endif
1462         return retval;
1463 }
1464 EXPORT_SYMBOL(request_threaded_irq);
1465 
1466 /**
1467  *      request_any_context_irq - allocate an interrupt line
1468  *      @irq: Interrupt line to allocate
1469  *      @handler: Function to be called when the IRQ occurs.
1470  *                Threaded handler for threaded interrupts.
1471  *      @flags: Interrupt type flags
1472  *      @name: An ascii name for the claiming device
1473  *      @dev_id: A cookie passed back to the handler function
1474  *
1475  *      This call allocates interrupt resources and enables the
1476  *      interrupt line and IRQ handling. It selects either a
1477  *      hardirq or threaded handling method depending on the
1478  *      context.
1479  *
1480  *      On failure, it returns a negative value. On success,
1481  *      it returns either IRQC_IS_HARDIRQ or IRQC_IS_NESTED.
1482  */
1483 int request_any_context_irq(unsigned int irq, irq_handler_t handler,
1484                             unsigned long flags, const char *name, void *dev_id)
1485 {
1486         struct irq_desc *desc = irq_to_desc(irq);
1487         int ret;
1488 
1489         if (!desc)
1490                 return -EINVAL;
1491 
1492         if (irq_settings_is_nested_thread(desc)) {
1493                 ret = request_threaded_irq(irq, NULL, handler,
1494                                            flags, name, dev_id);
1495                 return !ret ? IRQC_IS_NESTED : ret;
1496         }
1497 
1498         ret = request_irq(irq, handler, flags, name, dev_id);
1499         return !ret ? IRQC_IS_HARDIRQ : ret;
1500 }
1501 EXPORT_SYMBOL_GPL(request_any_context_irq);
1502 
1503 void enable_percpu_irq(unsigned int irq, unsigned int type)
1504 {
1505         unsigned int cpu = smp_processor_id();
1506         unsigned long flags;
1507         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1508 
1509         if (!desc)
1510                 return;
1511 
1512         type &= IRQ_TYPE_SENSE_MASK;
1513         if (type != IRQ_TYPE_NONE) {
1514                 int ret;
1515 
1516                 ret = __irq_set_trigger(desc, irq, type);
1517 
1518                 if (ret) {
1519                         WARN(1, "failed to set type for IRQ%d\n", irq);
1520                         goto out;
1521                 }
1522         }
1523 
1524         irq_percpu_enable(desc, cpu);
1525 out:
1526         irq_put_desc_unlock(desc, flags);
1527 }
1528 EXPORT_SYMBOL_GPL(enable_percpu_irq);
1529 
1530 void disable_percpu_irq(unsigned int irq)
1531 {
1532         unsigned int cpu = smp_processor_id();
1533         unsigned long flags;
1534         struct irq_desc *desc = irq_get_desc_lock(irq, &flags, IRQ_GET_DESC_CHECK_PERCPU);
1535 
1536         if (!desc)
1537                 return;
1538 
1539         irq_percpu_disable(desc, cpu);
1540         irq_put_desc_unlock(desc, flags);
1541 }
1542 EXPORT_SYMBOL_GPL(disable_percpu_irq);
1543 
1544 /*
1545  * Internal function to unregister a percpu irqaction.
1546  */
1547 static struct irqaction *__free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1548 {
1549         struct irq_desc *desc = irq_to_desc(irq);
1550         struct irqaction *action;
1551         unsigned long flags;
1552 
1553         WARN(in_interrupt(), "Trying to free IRQ %d from IRQ context!\n", irq);
1554 
1555         if (!desc)
1556                 return NULL;
1557 
1558         raw_spin_lock_irqsave(&desc->lock, flags);
1559 
1560         action = desc->action;
1561         if (!action || action->percpu_dev_id != dev_id) {
1562                 WARN(1, "Trying to free already-free IRQ %d\n", irq);
1563                 goto bad;
1564         }
1565 
1566         if (!cpumask_empty(desc->percpu_enabled)) {
1567                 WARN(1, "percpu IRQ %d still enabled on CPU%d!\n",
1568                      irq, cpumask_first(desc->percpu_enabled));
1569                 goto bad;
1570         }
1571 
1572         /* Found it - now remove it from the list of entries: */
1573         desc->action = NULL;
1574 
1575         raw_spin_unlock_irqrestore(&desc->lock, flags);
1576 
1577         unregister_handler_proc(irq, action);
1578 
1579         module_put(desc->owner);
1580         return action;
1581 
1582 bad:
1583         raw_spin_unlock_irqrestore(&desc->lock, flags);
1584         return NULL;
1585 }
1586 
1587 /**
1588  *      remove_percpu_irq - free a per-cpu interrupt
1589  *      @irq: Interrupt line to free
1590  *      @act: irqaction for the interrupt
1591  *
1592  * Used to remove interrupts statically setup by the early boot process.
1593  */
1594 void remove_percpu_irq(unsigned int irq, struct irqaction *act)
1595 {
1596         struct irq_desc *desc = irq_to_desc(irq);
1597 
1598         if (desc && irq_settings_is_per_cpu_devid(desc))
1599             __free_percpu_irq(irq, act->percpu_dev_id);
1600 }
1601 
1602 /**
1603  *      free_percpu_irq - free an interrupt allocated with request_percpu_irq
1604  *      @irq: Interrupt line to free
1605  *      @dev_id: Device identity to free
1606  *
1607  *      Remove a percpu interrupt handler. The handler is removed, but
1608  *      the interrupt line is not disabled. This must be done on each
1609  *      CPU before calling this function. The function does not return
1610  *      until any executing interrupts for this IRQ have completed.
1611  *
1612  *      This function must not be called from interrupt context.
1613  */
1614 void free_percpu_irq(unsigned int irq, void __percpu *dev_id)
1615 {
1616         struct irq_desc *desc = irq_to_desc(irq);
1617 
1618         if (!desc || !irq_settings_is_per_cpu_devid(desc))
1619                 return;
1620 
1621         chip_bus_lock(desc);
1622         kfree(__free_percpu_irq(irq, dev_id));
1623         chip_bus_sync_unlock(desc);
1624 }
1625 
1626 /**
1627  *      setup_percpu_irq - setup a per-cpu interrupt
1628  *      @irq: Interrupt line to setup
1629  *      @act: irqaction for the interrupt
1630  *
1631  * Used to statically setup per-cpu interrupts in the early boot process.
1632  */
1633 int setup_percpu_irq(unsigned int irq, struct irqaction *act)
1634 {
1635         struct irq_desc *desc = irq_to_desc(irq);
1636         int retval;
1637 
1638         if (!desc || !irq_settings_is_per_cpu_devid(desc))
1639                 return -EINVAL;
1640         chip_bus_lock(desc);
1641         retval = __setup_irq(irq, desc, act);
1642         chip_bus_sync_unlock(desc);
1643 
1644         return retval;
1645 }
1646 
1647 /**
1648  *      request_percpu_irq - allocate a percpu interrupt line
1649  *      @irq: Interrupt line to allocate
1650  *      @handler: Function to be called when the IRQ occurs.
1651  *      @devname: An ascii name for the claiming device
1652  *      @dev_id: A percpu cookie passed back to the handler function
1653  *
1654  *      This call allocates interrupt resources, but doesn't
1655  *      automatically enable the interrupt. It has to be done on each
1656  *      CPU using enable_percpu_irq().
1657  *
1658  *      Dev_id must be globally unique. It is a per-cpu variable, and
1659  *      the handler gets called with the interrupted CPU's instance of
1660  *      that variable.
1661  */
1662 int request_percpu_irq(unsigned int irq, irq_handler_t handler,
1663                        const char *devname, void __percpu *dev_id)
1664 {
1665         struct irqaction *action;
1666         struct irq_desc *desc;
1667         int retval;
1668 
1669         if (!dev_id)
1670                 return -EINVAL;
1671 
1672         desc = irq_to_desc(irq);
1673         if (!desc || !irq_settings_can_request(desc) ||
1674             !irq_settings_is_per_cpu_devid(desc))
1675                 return -EINVAL;
1676 
1677         action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
1678         if (!action)
1679                 return -ENOMEM;
1680 
1681         action->handler = handler;
1682         action->flags = IRQF_PERCPU | IRQF_NO_SUSPEND;
1683         action->name = devname;
1684         action->percpu_dev_id = dev_id;
1685 
1686         chip_bus_lock(desc);
1687         retval = __setup_irq(irq, desc, action);
1688         chip_bus_sync_unlock(desc);
1689 
1690         if (retval)
1691                 kfree(action);
1692 
1693         return retval;
1694 }
1695 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us