LCOV - code coverage report
Current view: top level - include/linux - interrupt.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 1 1 100.0 %
Date: 2017-01-25 Functions: 0 0 -

          Line data    Source code
       1             : /* interrupt.h */
       2             : #ifndef _LINUX_INTERRUPT_H
       3             : #define _LINUX_INTERRUPT_H
       4             : 
       5             : #include <linux/kernel.h>
       6             : #include <linux/linkage.h>
       7             : #include <linux/bitops.h>
       8             : #include <linux/preempt.h>
       9             : #include <linux/cpumask.h>
      10             : #include <linux/irqreturn.h>
      11             : #include <linux/irqnr.h>
      12             : #include <linux/hardirq.h>
      13             : #include <linux/irqflags.h>
      14             : #include <linux/smp.h>
      15             : #include <linux/percpu.h>
      16             : #include <linux/hrtimer.h>
      17             : 
      18             : #include <asm/atomic.h>
      19             : #include <asm/ptrace.h>
      20             : #include <asm/system.h>
      21             : 
      22             : /*
      23             :  * These correspond to the IORESOURCE_IRQ_* defines in
      24             :  * linux/ioport.h to select the interrupt line behaviour.  When
      25             :  * requesting an interrupt without specifying a IRQF_TRIGGER, the
      26             :  * setting should be assumed to be "as already configured", which
      27             :  * may be as per machine or firmware initialisation.
      28             :  */
      29             : #define IRQF_TRIGGER_NONE       0x00000000
      30             : #define IRQF_TRIGGER_RISING     0x00000001
      31             : #define IRQF_TRIGGER_FALLING    0x00000002
      32             : #define IRQF_TRIGGER_HIGH       0x00000004
      33             : #define IRQF_TRIGGER_LOW        0x00000008
      34             : #define IRQF_TRIGGER_MASK       (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW | \
      35             :                                  IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)
      36             : #define IRQF_TRIGGER_PROBE      0x00000010
      37             : 
      38             : /*
      39             :  * These flags used only by the kernel as part of the
      40             :  * irq handling routines.
      41             :  *
      42             :  * IRQF_DISABLED - keep irqs disabled when calling the action handler
      43             :  * IRQF_SAMPLE_RANDOM - irq is used to feed the random generator
      44             :  * IRQF_SHARED - allow sharing the irq among several devices
      45             :  * IRQF_PROBE_SHARED - set by callers when they expect sharing mismatches to occur
      46             :  * IRQF_TIMER - Flag to mark this interrupt as timer interrupt
      47             :  * IRQF_PERCPU - Interrupt is per cpu
      48             :  * IRQF_NOBALANCING - Flag to exclude this interrupt from irq balancing
      49             :  * IRQF_IRQPOLL - Interrupt is used for polling (only the interrupt that is
      50             :  *                registered first in an shared interrupt is considered for
      51             :  *                performance reasons)
      52             :  * IRQF_ONESHOT - Interrupt is not reenabled after the hardirq handler finished.
      53             :  *                Used by threaded interrupts which need to keep the
      54             :  *                irq line disabled until the threaded handler has been run.
      55             :  */
      56             : #define IRQF_DISABLED           0x00000020
      57             : #define IRQF_SAMPLE_RANDOM      0x00000040
      58             : #define IRQF_SHARED             0x00000080
      59             : #define IRQF_PROBE_SHARED       0x00000100
      60             : #define IRQF_TIMER              0x00000200
      61             : #define IRQF_PERCPU             0x00000400
      62             : #define IRQF_NOBALANCING        0x00000800
      63             : #define IRQF_IRQPOLL            0x00001000
      64             : #define IRQF_ONESHOT            0x00002000
      65             : 
      66             : /*
      67             :  * Bits used by threaded handlers:
      68             :  * IRQTF_RUNTHREAD - signals that the interrupt handler thread should run
      69             :  * IRQTF_DIED      - handler thread died
      70             :  * IRQTF_WARNED    - warning "IRQ_WAKE_THREAD w/o thread_fn" has been printed
      71             :  * IRQTF_AFFINITY  - irq thread is requested to adjust affinity
      72             :  */
      73             : enum {
      74             :         IRQTF_RUNTHREAD,
      75             :         IRQTF_DIED,
      76             :         IRQTF_WARNED,
      77             :         IRQTF_AFFINITY,
      78             : };
      79             : 
      80             : typedef irqreturn_t (*irq_handler_t)(int, void *);
      81           1 : 
      82             : /**
      83             :  * struct irqaction - per interrupt action descriptor
      84             :  * @handler:    interrupt handler function
      85             :  * @flags:      flags (see IRQF_* above)
      86             :  * @name:       name of the device
      87             :  * @dev_id:     cookie to identify the device
      88             :  * @next:       pointer to the next irqaction for shared interrupts
      89             :  * @irq:        interrupt number
      90             :  * @dir:        pointer to the proc/irq/NN/name entry
      91             :  * @thread_fn:  interupt handler function for threaded interrupts
      92             :  * @thread:     thread pointer for threaded interrupts
      93             :  * @thread_flags:       flags related to @thread
      94             :  */
      95             : struct irqaction {
      96             :         irq_handler_t handler;
      97             :         unsigned long flags;
      98             :         const char *name;
      99             :         void *dev_id;
     100             :         struct irqaction *next;
     101             :         int irq;
     102             :         struct proc_dir_entry *dir;
     103             :         irq_handler_t thread_fn;
     104             :         struct task_struct *thread;
     105             :         unsigned long thread_flags;
     106             : };
     107             : 
     108             : extern irqreturn_t no_action(int cpl, void *dev_id);
     109             : 
     110             : #ifdef CONFIG_GENERIC_HARDIRQS
     111             : extern int __must_check
     112             : request_threaded_irq(unsigned int irq, irq_handler_t handler,
     113             :                      irq_handler_t thread_fn,
     114             :                      unsigned long flags, const char *name, void *dev);
     115             : 
     116             : static inline int __must_check
     117             : request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
     118             :             const char *name, void *dev)
     119             : {
     120             :         return request_threaded_irq(irq, handler, NULL, flags, name, dev);
     121             : }
     122             : 
     123             : extern void exit_irq_thread(void);
     124             : #else
     125             : 
     126             : extern int __must_check
     127             : request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
     128             :             const char *name, void *dev);
     129             : 
     130             : /*
     131             :  * Special function to avoid ifdeffery in kernel/irq/devres.c which
     132             :  * gets magically built by GENERIC_HARDIRQS=n architectures (sparc,
     133             :  * m68k). I really love these $@%#!* obvious Makefile references:
     134             :  * ../../../kernel/irq/devres.o
     135             :  */
     136             : static inline int __must_check
     137             : request_threaded_irq(unsigned int irq, irq_handler_t handler,
     138             :                      irq_handler_t thread_fn,
     139             :                      unsigned long flags, const char *name, void *dev)
     140             : {
     141             :         return request_irq(irq, handler, flags, name, dev);
     142             : }
     143             : 
     144             : static inline void exit_irq_thread(void) { }
     145             : #endif
     146             : 
     147             : extern void free_irq(unsigned int, void *);
     148             : 
     149             : struct device;
     150             : 
     151             : extern int __must_check
     152             : devm_request_threaded_irq(struct device *dev, unsigned int irq,
     153             :                           irq_handler_t handler, irq_handler_t thread_fn,
     154             :                           unsigned long irqflags, const char *devname,
     155             :                           void *dev_id);
     156             : 
     157             : static inline int __must_check
     158             : devm_request_irq(struct device *dev, unsigned int irq, irq_handler_t handler,
     159             :                  unsigned long irqflags, const char *devname, void *dev_id)
     160             : {
     161             :         return devm_request_threaded_irq(dev, irq, handler, NULL, irqflags,
     162             :                                          devname, dev_id);
     163             : }
     164             : 
     165             : extern void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id);
     166             : 
     167             : /*
     168             :  * On lockdep we dont want to enable hardirqs in hardirq
     169             :  * context. Use local_irq_enable_in_hardirq() to annotate
     170             :  * kernel code that has to do this nevertheless (pretty much
     171             :  * the only valid case is for old/broken hardware that is
     172             :  * insanely slow).
     173             :  *
     174             :  * NOTE: in theory this might break fragile code that relies
     175             :  * on hardirq delivery - in practice we dont seem to have such
     176             :  * places left. So the only effect should be slightly increased
     177             :  * irqs-off latencies.
     178             :  */
     179             : #ifdef CONFIG_LOCKDEP
     180             : # define local_irq_enable_in_hardirq()  do { } while (0)
     181             : #else
     182             : # define local_irq_enable_in_hardirq()  local_irq_enable()
     183             : #endif
     184             : 
     185             : extern void disable_irq_nosync(unsigned int irq);
     186             : extern void disable_irq(unsigned int irq);
     187             : extern void enable_irq(unsigned int irq);
     188             : 
     189             : /* The following three functions are for the core kernel use only. */
     190             : #ifdef CONFIG_GENERIC_HARDIRQS
     191             : extern void suspend_device_irqs(void);
     192             : extern void resume_device_irqs(void);
     193             : #ifdef CONFIG_PM_SLEEP
     194             : extern int check_wakeup_irqs(void);
     195             : #else
     196             : static inline int check_wakeup_irqs(void) { return 0; }
     197             : #endif
     198             : #else
     199             : static inline void suspend_device_irqs(void) { };
     200             : static inline void resume_device_irqs(void) { };
     201             : static inline int check_wakeup_irqs(void) { return 0; }
     202             : #endif
     203             : 
     204             : #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_HARDIRQS)
     205             : 
     206             : extern cpumask_var_t irq_default_affinity;
     207             : 
     208             : extern int irq_set_affinity(unsigned int irq, const struct cpumask *cpumask);
     209             : extern int irq_can_set_affinity(unsigned int irq);
     210             : extern int irq_select_affinity(unsigned int irq);
     211             : 
     212             : #else /* CONFIG_SMP */
     213             : 
     214             : static inline int irq_set_affinity(unsigned int irq, const struct cpumask *m)
     215             : {
     216             :         return -EINVAL;
     217             : }
     218             : 
     219             : static inline int irq_can_set_affinity(unsigned int irq)
     220             : {
     221             :         return 0;
     222             : }
     223             : 
     224             : static inline int irq_select_affinity(unsigned int irq)  { return 0; }
     225             : 
     226             : #endif /* CONFIG_SMP && CONFIG_GENERIC_HARDIRQS */
     227             : 
     228             : #ifdef CONFIG_GENERIC_HARDIRQS
     229             : /*
     230             :  * Special lockdep variants of irq disabling/enabling.
     231             :  * These should be used for locking constructs that
     232             :  * know that a particular irq context which is disabled,
     233             :  * and which is the only irq-context user of a lock,
     234             :  * that it's safe to take the lock in the irq-disabled
     235             :  * section without disabling hardirqs.
     236             :  *
     237             :  * On !CONFIG_LOCKDEP they are equivalent to the normal
     238             :  * irq disable/enable methods.
     239             :  */
     240             : static inline void disable_irq_nosync_lockdep(unsigned int irq)
     241             : {
     242             :         disable_irq_nosync(irq);
     243             : #ifdef CONFIG_LOCKDEP
     244             :         local_irq_disable();
     245             : #endif
     246             : }
     247             : 
     248             : static inline void disable_irq_nosync_lockdep_irqsave(unsigned int irq, unsigned long *flags)
     249             : {
     250             :         disable_irq_nosync(irq);
     251             : #ifdef CONFIG_LOCKDEP
     252             :         local_irq_save(*flags);
     253             : #endif
     254             : }
     255             : 
     256             : static inline void disable_irq_lockdep(unsigned int irq)
     257             : {
     258             :         disable_irq(irq);
     259             : #ifdef CONFIG_LOCKDEP
     260             :         local_irq_disable();
     261             : #endif
     262             : }
     263             : 
     264             : static inline void enable_irq_lockdep(unsigned int irq)
     265             : {
     266             : #ifdef CONFIG_LOCKDEP
     267             :         local_irq_enable();
     268             : #endif
     269             :         enable_irq(irq);
     270             : }
     271             : 
     272             : static inline void enable_irq_lockdep_irqrestore(unsigned int irq, unsigned long *flags)
     273             : {
     274             : #ifdef CONFIG_LOCKDEP
     275             :         local_irq_restore(*flags);
     276             : #endif
     277             :         enable_irq(irq);
     278             : }
     279             : 
     280             : /* IRQ wakeup (PM) control: */
     281             : extern int set_irq_wake(unsigned int irq, unsigned int on);
     282             : 
     283             : static inline int enable_irq_wake(unsigned int irq)
     284             : {
     285             :         return set_irq_wake(irq, 1);
     286             : }
     287             : 
     288             : static inline int disable_irq_wake(unsigned int irq)
     289             : {
     290             :         return set_irq_wake(irq, 0);
     291             : }
     292             : 
     293             : #else /* !CONFIG_GENERIC_HARDIRQS */
     294             : /*
     295             :  * NOTE: non-genirq architectures, if they want to support the lock
     296             :  * validator need to define the methods below in their asm/irq.h
     297             :  * files, under an #ifdef CONFIG_LOCKDEP section.
     298             :  */
     299             : #ifndef CONFIG_LOCKDEP
     300             : #  define disable_irq_nosync_lockdep(irq)       disable_irq_nosync(irq)
     301             : #  define disable_irq_nosync_lockdep_irqsave(irq, flags) \
     302             :                                                 disable_irq_nosync(irq)
     303             : #  define disable_irq_lockdep(irq)              disable_irq(irq)
     304             : #  define enable_irq_lockdep(irq)               enable_irq(irq)
     305             : #  define enable_irq_lockdep_irqrestore(irq, flags) \
     306             :                                                 enable_irq(irq)
     307             : # endif
     308             : 
     309             : static inline int enable_irq_wake(unsigned int irq)
     310             : {
     311             :         return 0;
     312             : }
     313             : 
     314             : static inline int disable_irq_wake(unsigned int irq)
     315             : {
     316             :         return 0;
     317             : }
     318             : #endif /* CONFIG_GENERIC_HARDIRQS */
     319             : 
     320             : #ifndef __ARCH_SET_SOFTIRQ_PENDING
     321             : #define set_softirq_pending(x) (local_softirq_pending() = (x))
     322             : #define or_softirq_pending(x)  (local_softirq_pending() |= (x))
     323             : #endif
     324             : 
     325             : /* Some architectures might implement lazy enabling/disabling of
     326             :  * interrupts. In some cases, such as stop_machine, we might want
     327             :  * to ensure that after a local_irq_disable(), interrupts have
     328             :  * really been disabled in hardware. Such architectures need to
     329             :  * implement the following hook.
     330             :  */
     331             : #ifndef hard_irq_disable
     332             : #define hard_irq_disable()      do { } while(0)
     333             : #endif
     334             : 
     335             : /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
     336             :    frequency threaded job scheduling. For almost all the purposes
     337             :    tasklets are more than enough. F.e. all serial device BHs et
     338             :    al. should be converted to tasklets, not to softirqs.
     339             :  */
     340             : 
     341             : enum
     342             : {
     343             :         HI_SOFTIRQ=0,
     344             :         TIMER_SOFTIRQ,
     345             :         NET_TX_SOFTIRQ,
     346             :         NET_RX_SOFTIRQ,
     347             :         BLOCK_SOFTIRQ,
     348             :         BLOCK_IOPOLL_SOFTIRQ,
     349             :         TASKLET_SOFTIRQ,
     350             :         SCHED_SOFTIRQ,
     351             :         HRTIMER_SOFTIRQ,
     352             :         RCU_SOFTIRQ,    /* Preferable RCU should always be the last softirq */
     353             : 
     354             :         NR_SOFTIRQS
     355             : };
     356             : 
     357             : /* map softirq index to softirq name. update 'softirq_to_name' in
     358             :  * kernel/softirq.c when adding a new softirq.
     359             :  */
     360             : extern char *softirq_to_name[NR_SOFTIRQS];
     361             : 
     362             : /* softirq mask and active fields moved to irq_cpustat_t in
     363             :  * asm/hardirq.h to get better cache usage.  KAO
     364             :  */
     365             : 
     366             : struct softirq_action
     367             : {
     368             :         void    (*action)(struct softirq_action *);
     369             : };
     370             : 
     371             : asmlinkage void do_softirq(void);
     372             : asmlinkage void __do_softirq(void);
     373             : extern void open_softirq(int nr, void (*action)(struct softirq_action *));
     374             : extern void softirq_init(void);
     375             : #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
     376             : extern void raise_softirq_irqoff(unsigned int nr);
     377             : extern void raise_softirq(unsigned int nr);
     378             : extern void wakeup_softirqd(void);
     379             : 
     380             : /* This is the worklist that queues up per-cpu softirq work.
     381             :  *
     382             :  * send_remote_sendirq() adds work to these lists, and
     383             :  * the softirq handler itself dequeues from them.  The queues
     384             :  * are protected by disabling local cpu interrupts and they must
     385             :  * only be accessed by the local cpu that they are for.
     386             :  */
     387             : DECLARE_PER_CPU(struct list_head [NR_SOFTIRQS], softirq_work_list);
     388             : 
     389             : /* Try to send a softirq to a remote cpu.  If this cannot be done, the
     390             :  * work will be queued to the local cpu.
     391             :  */
     392             : extern void send_remote_softirq(struct call_single_data *cp, int cpu, int softirq);
     393             : 
     394             : /* Like send_remote_softirq(), but the caller must disable local cpu interrupts
     395             :  * and compute the current cpu, passed in as 'this_cpu'.
     396             :  */
     397             : extern void __send_remote_softirq(struct call_single_data *cp, int cpu,
     398             :                                   int this_cpu, int softirq);
     399             : 
     400             : /* Tasklets --- multithreaded analogue of BHs.
     401             : 
     402             :    Main feature differing them of generic softirqs: tasklet
     403             :    is running only on one CPU simultaneously.
     404             : 
     405             :    Main feature differing them of BHs: different tasklets
     406             :    may be run simultaneously on different CPUs.
     407             : 
     408             :    Properties:
     409             :    * If tasklet_schedule() is called, then tasklet is guaranteed
     410             :      to be executed on some cpu at least once after this.
     411             :    * If the tasklet is already scheduled, but its excecution is still not
     412             :      started, it will be executed only once.
     413             :    * If this tasklet is already running on another CPU (or schedule is called
     414             :      from tasklet itself), it is rescheduled for later.
     415             :    * Tasklet is strictly serialized wrt itself, but not
     416             :      wrt another tasklets. If client needs some intertask synchronization,
     417             :      he makes it with spinlocks.
     418             :  */
     419             : 
     420             : struct tasklet_struct
     421             : {
     422             :         struct tasklet_struct *next;
     423             :         unsigned long state;
     424             :         atomic_t count;
     425             :         void (*func)(unsigned long);
     426             :         unsigned long data;
     427             : };
     428             : 
     429             : #define DECLARE_TASKLET(name, func, data) \
     430             : struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(0), func, data }
     431             : 
     432             : #define DECLARE_TASKLET_DISABLED(name, func, data) \
     433             : struct tasklet_struct name = { NULL, 0, ATOMIC_INIT(1), func, data }
     434             : 
     435             : 
     436             : enum
     437             : {
     438             :         TASKLET_STATE_SCHED,    /* Tasklet is scheduled for execution */
     439             :         TASKLET_STATE_RUN       /* Tasklet is running (SMP only) */
     440             : };
     441             : 
     442             : #ifdef CONFIG_SMP
     443             : static inline int tasklet_trylock(struct tasklet_struct *t)
     444             : {
     445             :         return !test_and_set_bit(TASKLET_STATE_RUN, &(t)->state);
     446             : }
     447             : 
     448             : static inline void tasklet_unlock(struct tasklet_struct *t)
     449             : {
     450             :         smp_mb__before_clear_bit(); 
     451             :         clear_bit(TASKLET_STATE_RUN, &(t)->state);
     452             : }
     453             : 
     454             : static inline void tasklet_unlock_wait(struct tasklet_struct *t)
     455             : {
     456             :         while (test_bit(TASKLET_STATE_RUN, &(t)->state)) { barrier(); }
     457             : }
     458             : #else
     459             : #define tasklet_trylock(t) 1
     460             : #define tasklet_unlock_wait(t) do { } while (0)
     461             : #define tasklet_unlock(t) do { } while (0)
     462             : #endif
     463             : 
     464             : extern void __tasklet_schedule(struct tasklet_struct *t);
     465             : 
     466             : static inline void tasklet_schedule(struct tasklet_struct *t)
     467             : {
     468             :         if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
     469             :                 __tasklet_schedule(t);
     470             : }
     471             : 
     472             : extern void __tasklet_hi_schedule(struct tasklet_struct *t);
     473             : 
     474             : static inline void tasklet_hi_schedule(struct tasklet_struct *t)
     475             : {
     476             :         if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
     477             :                 __tasklet_hi_schedule(t);
     478             : }
     479             : 
     480             : extern void __tasklet_hi_schedule_first(struct tasklet_struct *t);
     481             : 
     482             : /*
     483             :  * This version avoids touching any other tasklets. Needed for kmemcheck
     484             :  * in order not to take any page faults while enqueueing this tasklet;
     485             :  * consider VERY carefully whether you really need this or
     486             :  * tasklet_hi_schedule()...
     487             :  */
     488             : static inline void tasklet_hi_schedule_first(struct tasklet_struct *t)
     489             : {
     490             :         if (!test_and_set_bit(TASKLET_STATE_SCHED, &t->state))
     491             :                 __tasklet_hi_schedule_first(t);
     492             : }
     493             : 
     494             : 
     495             : static inline void tasklet_disable_nosync(struct tasklet_struct *t)
     496             : {
     497             :         atomic_inc(&t->count);
     498             :         smp_mb__after_atomic_inc();
     499             : }
     500             : 
     501             : static inline void tasklet_disable(struct tasklet_struct *t)
     502             : {
     503             :         tasklet_disable_nosync(t);
     504             :         tasklet_unlock_wait(t);
     505             :         smp_mb();
     506             : }
     507             : 
     508             : static inline void tasklet_enable(struct tasklet_struct *t)
     509             : {
     510             :         smp_mb__before_atomic_dec();
     511             :         atomic_dec(&t->count);
     512             : }
     513             : 
     514             : static inline void tasklet_hi_enable(struct tasklet_struct *t)
     515             : {
     516             :         smp_mb__before_atomic_dec();
     517             :         atomic_dec(&t->count);
     518             : }
     519             : 
     520             : extern void tasklet_kill(struct tasklet_struct *t);
     521             : extern void tasklet_kill_immediate(struct tasklet_struct *t, unsigned int cpu);
     522             : extern void tasklet_init(struct tasklet_struct *t,
     523             :                          void (*func)(unsigned long), unsigned long data);
     524             : 
     525             : struct tasklet_hrtimer {
     526             :         struct hrtimer          timer;
     527             :         struct tasklet_struct   tasklet;
     528             :         enum hrtimer_restart    (*function)(struct hrtimer *);
     529             : };
     530             : 
     531             : extern void
     532             : tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
     533             :                      enum hrtimer_restart (*function)(struct hrtimer *),
     534             :                      clockid_t which_clock, enum hrtimer_mode mode);
     535             : 
     536             : static inline
     537             : int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
     538             :                           const enum hrtimer_mode mode)
     539             : {
     540             :         return hrtimer_start(&ttimer->timer, time, mode);
     541             : }
     542             : 
     543             : static inline
     544             : void tasklet_hrtimer_cancel(struct tasklet_hrtimer *ttimer)
     545             : {
     546             :         hrtimer_cancel(&ttimer->timer);
     547             :         tasklet_kill(&ttimer->tasklet);
     548             : }
     549             : 
     550             : /*
     551             :  * Autoprobing for irqs:
     552             :  *
     553             :  * probe_irq_on() and probe_irq_off() provide robust primitives
     554             :  * for accurate IRQ probing during kernel initialization.  They are
     555             :  * reasonably simple to use, are not "fooled" by spurious interrupts,
     556             :  * and, unlike other attempts at IRQ probing, they do not get hung on
     557             :  * stuck interrupts (such as unused PS2 mouse interfaces on ASUS boards).
     558             :  *
     559             :  * For reasonably foolproof probing, use them as follows:
     560             :  *
     561             :  * 1. clear and/or mask the device's internal interrupt.
     562             :  * 2. sti();
     563             :  * 3. irqs = probe_irq_on();      // "take over" all unassigned idle IRQs
     564             :  * 4. enable the device and cause it to trigger an interrupt.
     565             :  * 5. wait for the device to interrupt, using non-intrusive polling or a delay.
     566             :  * 6. irq = probe_irq_off(irqs);  // get IRQ number, 0=none, negative=multiple
     567             :  * 7. service the device to clear its pending interrupt.
     568             :  * 8. loop again if paranoia is required.
     569             :  *
     570             :  * probe_irq_on() returns a mask of allocated irq's.
     571             :  *
     572             :  * probe_irq_off() takes the mask as a parameter,
     573             :  * and returns the irq number which occurred,
     574             :  * or zero if none occurred, or a negative irq number
     575             :  * if more than one irq occurred.
     576             :  */
     577             : 
     578             : #if defined(CONFIG_GENERIC_HARDIRQS) && !defined(CONFIG_GENERIC_IRQ_PROBE) 
     579             : static inline unsigned long probe_irq_on(void)
     580             : {
     581             :         return 0;
     582             : }
     583             : static inline int probe_irq_off(unsigned long val)
     584             : {
     585             :         return 0;
     586             : }
     587             : static inline unsigned int probe_irq_mask(unsigned long val)
     588             : {
     589             :         return 0;
     590             : }
     591             : #else
     592             : extern unsigned long probe_irq_on(void);        /* returns 0 on failure */
     593             : extern int probe_irq_off(unsigned long);        /* returns 0 or negative on failure */
     594             : extern unsigned int probe_irq_mask(unsigned long);      /* returns mask of ISA interrupts */
     595             : #endif
     596             : 
     597             : #ifdef CONFIG_PROC_FS
     598             : /* Initialize /proc/irq/ */
     599             : extern void init_irq_proc(void);
     600             : #else
     601             : static inline void init_irq_proc(void)
     602             : {
     603             : }
     604             : #endif
     605             : 
     606             : struct seq_file;
     607             : int show_interrupts(struct seq_file *p, void *v);
     608             : 
     609             : struct irq_desc;
     610             : 
     611             : extern int early_irq_init(void);
     612             : extern int arch_probe_nr_irqs(void);
     613             : extern int arch_early_irq_init(void);
     614             : extern int arch_init_chip_data(struct irq_desc *desc, int node);
     615             : 
     616             : #endif

Generated by: LCOV version 1.10