LCOV - code coverage report
Current view: top level - include/linux - spinlock.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 12 12 100.0 %
Date: 2017-01-25 Functions: 6 6 100.0 %

          Line data    Source code
       1             : #ifndef __LINUX_SPINLOCK_H
       2             : #define __LINUX_SPINLOCK_H
       3             : 
       4             : /*
       5             :  * include/linux/spinlock.h - generic spinlock/rwlock declarations
       6             :  *
       7             :  * here's the role of the various spinlock/rwlock related include files:
       8             :  *
       9             :  * on SMP builds:
      10             :  *
      11             :  *  asm/spinlock_types.h: contains the arch_spinlock_t/arch_rwlock_t and the
      12             :  *                        initializers
      13             :  *
      14             :  *  linux/spinlock_types.h:
      15             :  *                        defines the generic type and initializers
      16             :  *
      17             :  *  asm/spinlock.h:       contains the arch_spin_*()/etc. lowlevel
      18             :  *                        implementations, mostly inline assembly code
      19             :  *
      20             :  *   (also included on UP-debug builds:)
      21             :  *
      22             :  *  linux/spinlock_api_smp.h:
      23             :  *                        contains the prototypes for the _spin_*() APIs.
      24             :  *
      25             :  *  linux/spinlock.h:     builds the final spin_*() APIs.
      26             :  *
      27             :  * on UP builds:
      28             :  *
      29             :  *  linux/spinlock_type_up.h:
      30             :  *                        contains the generic, simplified UP spinlock type.
      31             :  *                        (which is an empty structure on non-debug builds)
      32             :  *
      33             :  *  linux/spinlock_types.h:
      34             :  *                        defines the generic type and initializers
      35             :  *
      36             :  *  linux/spinlock_up.h:
      37             :  *                        contains the arch_spin_*()/etc. version of UP
      38             :  *                        builds. (which are NOPs on non-debug, non-preempt
      39             :  *                        builds)
      40             :  *
      41             :  *   (included on UP-non-debug builds:)
      42             :  *
      43             :  *  linux/spinlock_api_up.h:
      44             :  *                        builds the _spin_*() APIs.
      45             :  *
      46             :  *  linux/spinlock.h:     builds the final spin_*() APIs.
      47             :  */
      48             : 
      49             : #include <linux/typecheck.h>
      50             : #include <linux/preempt.h>
      51             : #include <linux/linkage.h>
      52             : #include <linux/compiler.h>
      53             : #include <linux/thread_info.h>
      54             : #include <linux/kernel.h>
      55             : #include <linux/stringify.h>
      56             : #include <linux/bottom_half.h>
      57             : 
      58             : #include <asm/system.h>
      59             : 
      60             : /*
      61             :  * Must define these before including other files, inline functions need them
      62             :  */
      63             : #define LOCK_SECTION_NAME ".text.lock."KBUILD_BASENAME
      64             : 
      65             : #define LOCK_SECTION_START(extra)               \
      66             :         ".subsection 1\n\t"                     \
      67             :         extra                                   \
      68             :         ".ifndef " LOCK_SECTION_NAME "\n\t"     \
      69             :         LOCK_SECTION_NAME ":\n\t"               \
      70             :         ".endif\n"
      71             : 
      72             : #define LOCK_SECTION_END                        \
      73             :         ".previous\n\t"
      74             : 
      75             : #define __lockfunc __attribute__((section(".spinlock.text")))
      76             : 
      77             : /*
      78             :  * Pull the arch_spinlock_t and arch_rwlock_t definitions:
      79             :  */
      80             : #include <linux/spinlock_types.h>
      81             : 
      82             : /*
      83             :  * Pull the arch_spin*() functions/declarations (UP-nondebug doesnt need them):
      84             :  */
      85             : #ifdef CONFIG_SMP
      86             : # include <asm/spinlock.h>
      87             : #else
      88             : # include <linux/spinlock_up.h>
      89             : #endif
      90             : 
      91             : #ifdef CONFIG_DEBUG_SPINLOCK
      92             :   extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
      93             :                                    struct lock_class_key *key);
      94             : # define raw_spin_lock_init(lock)                               \
      95             : do {                                                            \
      96             :         static struct lock_class_key __key;                     \
      97             :                                                                 \
      98             :         __raw_spin_lock_init((lock), #lock, &__key);                \
      99             : } while (0)
     100             : 
     101             : #else
     102             : # define raw_spin_lock_init(lock)                               \
     103             :         do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
     104             : #endif
     105             : 
     106             : #define raw_spin_is_locked(lock)        arch_spin_is_locked(&(lock)->raw_lock)
     107             : 
     108             : #ifdef CONFIG_GENERIC_LOCKBREAK
     109             : #define raw_spin_is_contended(lock) ((lock)->break_lock)
     110             : #else
     111             : 
     112             : #ifdef arch_spin_is_contended
     113             : #define raw_spin_is_contended(lock)     arch_spin_is_contended(&(lock)->raw_lock)
     114             : #else
     115             : #define raw_spin_is_contended(lock)     (((void)(lock), 0))
     116             : #endif /*arch_spin_is_contended*/
     117             : #endif
     118             : 
     119             : /* The lock does not imply full memory barrier. */
     120             : #ifndef ARCH_HAS_SMP_MB_AFTER_LOCK
     121             : static inline void smp_mb__after_lock(void) { smp_mb(); }
     122             : #endif
     123             : 
     124             : /**
     125             :  * raw_spin_unlock_wait - wait until the spinlock gets unlocked
     126             :  * @lock: the spinlock in question.
     127             :  */
     128             : #define raw_spin_unlock_wait(lock)      arch_spin_unlock_wait(&(lock)->raw_lock)
     129             : 
     130             : #ifdef CONFIG_DEBUG_SPINLOCK
     131             :  extern void do_raw_spin_lock(raw_spinlock_t *lock);
     132             : #define do_raw_spin_lock_flags(lock, flags) do_raw_spin_lock(lock)
     133             :  extern int do_raw_spin_trylock(raw_spinlock_t *lock);
     134             :  extern void do_raw_spin_unlock(raw_spinlock_t *lock);
     135             : #else
     136             : static inline void do_raw_spin_lock(raw_spinlock_t *lock)
     137             : {
     138             :         arch_spin_lock(&lock->raw_lock);
     139             : }
     140             : 
     141             : static inline void
     142             : do_raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long *flags)
     143             : {
     144             :         arch_spin_lock_flags(&lock->raw_lock, *flags);
     145             : }
     146             : 
     147             : static inline int do_raw_spin_trylock(raw_spinlock_t *lock)
     148             : {
     149             :         return arch_spin_trylock(&(lock)->raw_lock);
     150             : }
     151             : 
     152             : static inline void do_raw_spin_unlock(raw_spinlock_t *lock)
     153             : {
     154          18 :         arch_spin_unlock(&lock->raw_lock);
     155           9 : }
     156             : #endif
     157             : 
     158             : /*
     159             :  * Define the various spin_lock methods.  Note we define these
     160             :  * regardless of whether CONFIG_SMP or CONFIG_PREEMPT are set. The
     161             :  * various methods are defined as nops in the case they are not
     162             :  * required.
     163             :  */
     164             : #define raw_spin_trylock(lock)  __cond_lock(lock, _raw_spin_trylock(lock))
     165             : 
     166             : #define raw_spin_lock(lock)     _raw_spin_lock(lock)
     167             : 
     168             : #ifdef CONFIG_DEBUG_LOCK_ALLOC
     169             : # define raw_spin_lock_nested(lock, subclass) \
     170             :         _raw_spin_lock_nested(lock, subclass)
     171             : 
     172             : # define raw_spin_lock_nest_lock(lock, nest_lock)                       \
     173             :          do {                                                           \
     174             :                  typecheck(struct lockdep_map *, &(nest_lock)->dep_map);\
     175             :                  _raw_spin_lock_nest_lock(lock, &(nest_lock)->dep_map);  \
     176             :          } while (0)
     177             : #else
     178             : # define raw_spin_lock_nested(lock, subclass)           _raw_spin_lock(lock)
     179             : # define raw_spin_lock_nest_lock(lock, nest_lock)       _raw_spin_lock(lock)
     180             : #endif
     181             : 
     182             : #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
     183             : 
     184             : #define raw_spin_lock_irqsave(lock, flags)                      \
     185             :         do {                                            \
     186             :                 typecheck(unsigned long, flags);        \
     187             :                 flags = _raw_spin_lock_irqsave(lock);   \
     188             :         } while (0)
     189             : 
     190             : #ifdef CONFIG_DEBUG_LOCK_ALLOC
     191             : #define raw_spin_lock_irqsave_nested(lock, flags, subclass)             \
     192             :         do {                                                            \
     193             :                 typecheck(unsigned long, flags);                        \
     194             :                 flags = _raw_spin_lock_irqsave_nested(lock, subclass);  \
     195             :         } while (0)
     196             : #else
     197             : #define raw_spin_lock_irqsave_nested(lock, flags, subclass)             \
     198             :         do {                                                            \
     199             :                 typecheck(unsigned long, flags);                        \
     200             :                 flags = _raw_spin_lock_irqsave(lock);                   \
     201             :         } while (0)
     202             : #endif
     203             : 
     204             : #else
     205             : 
     206             : #define raw_spin_lock_irqsave(lock, flags)              \
     207             :         do {                                            \
     208             :                 typecheck(unsigned long, flags);        \
     209             :                 _raw_spin_lock_irqsave(lock, flags);    \
     210             :         } while (0)
     211             : 
     212             : #define raw_spin_lock_irqsave_nested(lock, flags, subclass)     \
     213             :         raw_spin_lock_irqsave(lock, flags)
     214             : 
     215             : #endif
     216             : 
     217             : #define raw_spin_lock_irq(lock)         _raw_spin_lock_irq(lock)
     218             : #define raw_spin_lock_bh(lock)          _raw_spin_lock_bh(lock)
     219             : #define raw_spin_unlock(lock)           _raw_spin_unlock(lock)
     220             : #define raw_spin_unlock_irq(lock)       _raw_spin_unlock_irq(lock)
     221             : 
     222             : #define raw_spin_unlock_irqrestore(lock, flags)         \
     223             :         do {                                                    \
     224             :                 typecheck(unsigned long, flags);                \
     225             :                 _raw_spin_unlock_irqrestore(lock, flags);       \
     226             :         } while (0)
     227             : #define raw_spin_unlock_bh(lock)        _raw_spin_unlock_bh(lock)
     228             : 
     229             : #define raw_spin_trylock_bh(lock) \
     230             :         __cond_lock(lock, _raw_spin_trylock_bh(lock))
     231             : 
     232             : #define raw_spin_trylock_irq(lock) \
     233             : ({ \
     234             :         local_irq_disable(); \
     235             :         raw_spin_trylock(lock) ? \
     236             :         1 : ({ local_irq_enable(); 0;  }); \
     237             : })
     238             : 
     239             : #define raw_spin_trylock_irqsave(lock, flags) \
     240             : ({ \
     241             :         local_irq_save(flags); \
     242             :         raw_spin_trylock(lock) ? \
     243             :         1 : ({ local_irq_restore(flags); 0; }); \
     244             : })
     245             : 
     246             : /**
     247             :  * raw_spin_can_lock - would raw_spin_trylock() succeed?
     248             :  * @lock: the spinlock in question.
     249             :  */
     250             : #define raw_spin_can_lock(lock) (!raw_spin_is_locked(lock))
     251             : 
     252             : /* Include rwlock functions */
     253             : #include <linux/rwlock.h>
     254             : 
     255             : /*
     256             :  * Pull the _spin_*()/_read_*()/_write_*() functions/declarations:
     257             :  */
     258             : #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
     259             : # include <linux/spinlock_api_smp.h>
     260             : #else
     261             : # include <linux/spinlock_api_up.h>
     262             : #endif
     263             : 
     264             : /*
     265             :  * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
     266             :  */
     267             : 
     268             : static inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
     269             : {
     270           1 :         return &lock->rlock;
     271             : }
     272             : 
     273             : #define spin_lock_init(_lock)                           \
     274             : do {                                                    \
     275             :         spinlock_check(_lock);                          \
     276             :         raw_spin_lock_init(&(_lock)->rlock);             \
     277             : } while (0)
     278             : 
     279             : static inline void spin_lock(spinlock_t *lock)
     280             : {
     281           5 :         raw_spin_lock(&lock->rlock);
     282           5 : }
     283             : 
     284             : static inline void spin_lock_bh(spinlock_t *lock)
     285             : {
     286           1 :         raw_spin_lock_bh(&lock->rlock);
     287           1 : }
     288             : 
     289             : static inline int spin_trylock(spinlock_t *lock)
     290             : {
     291             :         return raw_spin_trylock(&lock->rlock);
     292             : }
     293             : 
     294             : #define spin_lock_nested(lock, subclass)                        \
     295             : do {                                                            \
     296             :         raw_spin_lock_nested(spinlock_check(lock), subclass);   \
     297             : } while (0)
     298             : 
     299             : #define spin_lock_nest_lock(lock, nest_lock)                            \
     300             : do {                                                                    \
     301             :         raw_spin_lock_nest_lock(spinlock_check(lock), nest_lock);       \
     302             : } while (0)
     303             : 
     304             : static inline void spin_lock_irq(spinlock_t *lock)
     305             : {
     306             :         raw_spin_lock_irq(&lock->rlock);
     307             : }
     308             : 
     309             : #define spin_lock_irqsave(lock, flags)                          \
     310             : do {                                                            \
     311             :         raw_spin_lock_irqsave(spinlock_check(lock), flags);     \
     312             : } while (0)
     313             : 
     314             : #define spin_lock_irqsave_nested(lock, flags, subclass)                 \
     315             : do {                                                                    \
     316             :         raw_spin_lock_irqsave_nested(spinlock_check(lock), flags, subclass); \
     317             : } while (0)
     318             : 
     319             : static inline void spin_unlock(spinlock_t *lock)
     320             : {
     321          18 :         raw_spin_unlock(&lock->rlock);
     322           9 : }
     323             : 
     324             : static inline void spin_unlock_bh(spinlock_t *lock)
     325             : {
     326           1 :         raw_spin_unlock_bh(&lock->rlock);
     327           1 : }
     328             : 
     329             : static inline void spin_unlock_irq(spinlock_t *lock)
     330             : {
     331             :         raw_spin_unlock_irq(&lock->rlock);
     332             : }
     333             : 
     334             : static inline void spin_unlock_irqrestore(spinlock_t *lock, unsigned long flags)
     335             : {
     336             :         raw_spin_unlock_irqrestore(&lock->rlock, flags);
     337             : }
     338             : 
     339             : static inline int spin_trylock_bh(spinlock_t *lock)
     340             : {
     341             :         return raw_spin_trylock_bh(&lock->rlock);
     342             : }
     343             : 
     344             : static inline int spin_trylock_irq(spinlock_t *lock)
     345             : {
     346             :         return raw_spin_trylock_irq(&lock->rlock);
     347             : }
     348             : 
     349             : #define spin_trylock_irqsave(lock, flags)                       \
     350             : ({                                                              \
     351             :         raw_spin_trylock_irqsave(spinlock_check(lock), flags); \
     352             : })
     353             : 
     354             : static inline void spin_unlock_wait(spinlock_t *lock)
     355             : {
     356             :         raw_spin_unlock_wait(&lock->rlock);
     357             : }
     358             : 
     359             : static inline int spin_is_locked(spinlock_t *lock)
     360             : {
     361             :         return raw_spin_is_locked(&lock->rlock);
     362             : }
     363             : 
     364             : static inline int spin_is_contended(spinlock_t *lock)
     365             : {
     366             :         return raw_spin_is_contended(&lock->rlock);
     367             : }
     368             : 
     369             : static inline int spin_can_lock(spinlock_t *lock)
     370             : {
     371             :         return raw_spin_can_lock(&lock->rlock);
     372             : }
     373             : 
     374             : static inline void assert_spin_locked(spinlock_t *lock)
     375             : {
     376             :         assert_raw_spin_locked(&lock->rlock);
     377             : }
     378             : 
     379             : /*
     380             :  * Pull the atomic_t declaration:
     381             :  * (asm-mips/atomic.h needs above definitions)
     382             :  */
     383             : #include <asm/atomic.h>
     384             : /**
     385             :  * atomic_dec_and_lock - lock on reaching reference count zero
     386             :  * @atomic: the atomic counter
     387             :  * @lock: the spinlock in question
     388             :  *
     389             :  * Decrements @atomic by 1.  If the result is 0, returns true and locks
     390             :  * @lock.  Returns false for all other cases.
     391             :  */
     392             : extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
     393           1 : #define atomic_dec_and_lock(atomic, lock) \
     394             :                 __cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
     395             : 
     396             : #endif /* __LINUX_SPINLOCK_H */

Generated by: LCOV version 1.10