LCOV - code coverage report
Current view: top level - include/linux - percpu.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 1 1 100.0 %
Date: 2017-01-25 Functions: 0 0 -

          Line data    Source code
       1             : #ifndef __LINUX_PERCPU_H
       2             : #define __LINUX_PERCPU_H
       3             : 
       4             : #include <linux/preempt.h>
       5             : #include <linux/slab.h> /* For kmalloc() */
       6             : #include <linux/smp.h>
       7             : #include <linux/cpumask.h>
       8             : #include <linux/pfn.h>
       9             : 
      10             : #include <asm/percpu.h>
      11             : 
      12             : /* enough to cover all DEFINE_PER_CPUs in modules */
      13             : #ifdef CONFIG_MODULES
      14             : #define PERCPU_MODULE_RESERVE           (8 << 10)
      15             : #else
      16             : #define PERCPU_MODULE_RESERVE           0
      17             : #endif
      18             : 
      19             : #ifndef PERCPU_ENOUGH_ROOM
      20             : #define PERCPU_ENOUGH_ROOM                                              \
      21             :         (ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES) +      \
      22             :          PERCPU_MODULE_RESERVE)
      23             : #endif
      24             : 
      25             : /*
      26             :  * Must be an lvalue. Since @var must be a simple identifier,
      27             :  * we force a syntax error here if it isn't.
      28             :  */
      29             : #define get_cpu_var(var) (*({                           \
      30             :         extern int simple_identifier_##var(void);       \
      31             :         preempt_disable();                              \
      32             :         &__get_cpu_var(var); }))
      33             : #define put_cpu_var(var) preempt_enable()
      34             : 
      35             : #ifdef CONFIG_SMP
      36             : 
      37             : /* minimum unit size, also is the maximum supported allocation size */
      38             : #define PCPU_MIN_UNIT_SIZE              PFN_ALIGN(64 << 10)
      39             : 
      40             : /*
      41             :  * PERCPU_DYNAMIC_RESERVE indicates the amount of free area to piggy
      42             :  * back on the first chunk for dynamic percpu allocation if arch is
      43             :  * manually allocating and mapping it for faster access (as a part of
      44             :  * large page mapping for example).
      45             :  *
      46             :  * The following values give between one and two pages of free space
      47             :  * after typical minimal boot (2-way SMP, single disk and NIC) with
      48             :  * both defconfig and a distro config on x86_64 and 32.  More
      49             :  * intelligent way to determine this would be nice.
      50             :  */
      51             : #if BITS_PER_LONG > 32
      52             : #define PERCPU_DYNAMIC_RESERVE          (20 << 10)
      53             : #else
      54             : #define PERCPU_DYNAMIC_RESERVE          (12 << 10)
      55             : #endif
      56             : 
      57             : extern void *pcpu_base_addr;
      58             : extern const unsigned long *pcpu_unit_offsets;
      59             : 
      60             : struct pcpu_group_info {
      61             :         int                     nr_units;       /* aligned # of units */
      62             :         unsigned long           base_offset;    /* base address offset */
      63             :         unsigned int            *cpu_map;       /* unit->cpu map, empty
      64             :                                                  * entries contain NR_CPUS */
      65             : };
      66             : 
      67             : struct pcpu_alloc_info {
      68             :         size_t                  static_size;
      69             :         size_t                  reserved_size;
      70             :         size_t                  dyn_size;
      71             :         size_t                  unit_size;
      72             :         size_t                  atom_size;
      73             :         size_t                  alloc_size;
      74             :         size_t                  __ai_size;      /* internal, don't use */
      75             :         int                     nr_groups;      /* 0 if grouping unnecessary */
      76             :         struct pcpu_group_info  groups[];
      77             : };
      78             : 
      79             : enum pcpu_fc {
      80             :         PCPU_FC_AUTO,
      81             :         PCPU_FC_EMBED,
      82             :         PCPU_FC_PAGE,
      83             : 
      84             :         PCPU_FC_NR,
      85             : };
      86             : extern const char *pcpu_fc_names[PCPU_FC_NR];
      87             : 
      88             : extern enum pcpu_fc pcpu_chosen_fc;
      89             : 
      90             : typedef void * (*pcpu_fc_alloc_fn_t)(unsigned int cpu, size_t size,
      91             :                                      size_t align);
      92             : typedef void (*pcpu_fc_free_fn_t)(void *ptr, size_t size);
      93             : typedef void (*pcpu_fc_populate_pte_fn_t)(unsigned long addr);
      94             : typedef int (pcpu_fc_cpu_distance_fn_t)(unsigned int from, unsigned int to);
      95             : 
      96             : extern struct pcpu_alloc_info * __init pcpu_alloc_alloc_info(int nr_groups,
      97             :                                                              int nr_units);
      98             : extern void __init pcpu_free_alloc_info(struct pcpu_alloc_info *ai);
      99             : 
     100             : extern struct pcpu_alloc_info * __init pcpu_build_alloc_info(
     101             :                                 size_t reserved_size, ssize_t dyn_size,
     102             :                                 size_t atom_size,
     103             :                                 pcpu_fc_cpu_distance_fn_t cpu_distance_fn);
     104             : 
     105             : extern int __init pcpu_setup_first_chunk(const struct pcpu_alloc_info *ai,
     106             :                                          void *base_addr);
     107             : 
     108             : #ifdef CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK
     109             : extern int __init pcpu_embed_first_chunk(size_t reserved_size, ssize_t dyn_size,
     110             :                                 size_t atom_size,
     111             :                                 pcpu_fc_cpu_distance_fn_t cpu_distance_fn,
     112             :                                 pcpu_fc_alloc_fn_t alloc_fn,
     113             :                                 pcpu_fc_free_fn_t free_fn);
     114             : #endif
     115             : 
     116             : #ifdef CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK
     117             : extern int __init pcpu_page_first_chunk(size_t reserved_size,
     118             :                                 pcpu_fc_alloc_fn_t alloc_fn,
     119             :                                 pcpu_fc_free_fn_t free_fn,
     120             :                                 pcpu_fc_populate_pte_fn_t populate_pte_fn);
     121             : #endif
     122             : 
     123             : /*
     124             :  * Use this to get to a cpu's version of the per-cpu object
     125             :  * dynamically allocated. Non-atomic access to the current CPU's
     126             :  * version should probably be combined with get_cpu()/put_cpu().
     127             :  */
     128             : #define per_cpu_ptr(ptr, cpu)   SHIFT_PERCPU_PTR((ptr), per_cpu_offset((cpu)))
     129             : 
     130             : extern void *__alloc_reserved_percpu(size_t size, size_t align);
     131             : extern void *__alloc_percpu(size_t size, size_t align);
     132             : extern void free_percpu(void *__pdata);
     133             : extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
     134             : 
     135             : #ifndef CONFIG_HAVE_SETUP_PER_CPU_AREA
     136             : extern void __init setup_per_cpu_areas(void);
     137             : #endif
     138             : 
     139             : #else /* CONFIG_SMP */
     140             : 
     141             : #define per_cpu_ptr(ptr, cpu) ({ (void)(cpu); (ptr); })
     142             : 
     143             : static inline void *__alloc_percpu(size_t size, size_t align)
     144             : {
     145             :         /*
     146             :          * Can't easily make larger alignment work with kmalloc.  WARN
     147             :          * on it.  Larger alignment should only be used for module
     148             :          * percpu sections on SMP for which this path isn't used.
     149             :          */
     150             :         WARN_ON_ONCE(align > SMP_CACHE_BYTES);
     151             :         return kzalloc(size, GFP_KERNEL);
     152             : }
     153             : 
     154             : static inline void free_percpu(void *p)
     155             : {
     156             :         kfree(p);
     157             : }
     158             : 
     159             : static inline phys_addr_t per_cpu_ptr_to_phys(void *addr)
     160             : {
     161             :         return __pa(addr);
     162             : }
     163             : 
     164             : static inline void __init setup_per_cpu_areas(void) { }
     165             : 
     166             : static inline void *pcpu_lpage_remapped(void *kaddr)
     167             : {
     168             :         return NULL;
     169             : }
     170             : 
     171             : #endif /* CONFIG_SMP */
     172             : 
     173             : #define alloc_percpu(type)      \
     174             :         (typeof(type) *)__alloc_percpu(sizeof(type), __alignof__(type))
     175             : 
     176             : /*
     177             :  * Optional methods for optimized non-lvalue per-cpu variable access.
     178             :  *
     179             :  * @var can be a percpu variable or a field of it and its size should
     180             :  * equal char, int or long.  percpu_read() evaluates to a lvalue and
     181             :  * all others to void.
     182             :  *
     183             :  * These operations are guaranteed to be atomic w.r.t. preemption.
     184             :  * The generic versions use plain get/put_cpu_var().  Archs are
     185             :  * encouraged to implement single-instruction alternatives which don't
     186             :  * require preemption protection.
     187             :  */
     188             : #ifndef percpu_read
     189             : # define percpu_read(var)                                               \
     190             :   ({                                                                    \
     191             :         typeof(per_cpu_var(var)) __tmp_var__;                           \
     192             :         __tmp_var__ = get_cpu_var(var);                                 \
     193             :         put_cpu_var(var);                                               \
     194             :         __tmp_var__;                                                    \
     195             :   })
     196             : #endif
     197             : 
     198             : #define __percpu_generic_to_op(var, val, op)                            \
     199             : do {                                                                    \
     200             :         get_cpu_var(var) op val;                                        \
     201             :         put_cpu_var(var);                                               \
     202             : } while (0)
     203             : 
     204             : #ifndef percpu_write
     205             : # define percpu_write(var, val)         __percpu_generic_to_op(var, (val), =)
     206             : #endif
     207             : 
     208             : #ifndef percpu_add
     209             : # define percpu_add(var, val)           __percpu_generic_to_op(var, (val), +=)
     210             : #endif
     211             : 
     212             : #ifndef percpu_sub
     213             : # define percpu_sub(var, val)           __percpu_generic_to_op(var, (val), -=)
     214             : #endif
     215             : 
     216             : #ifndef percpu_and
     217             : # define percpu_and(var, val)           __percpu_generic_to_op(var, (val), &=)
     218             : #endif
     219             : 
     220             : #ifndef percpu_or
     221             : # define percpu_or(var, val)            __percpu_generic_to_op(var, (val), |=)
     222             : #endif
     223             : 
     224             : #ifndef percpu_xor
     225             : # define percpu_xor(var, val)           __percpu_generic_to_op(var, (val), ^=)
     226             : #endif
     227             : 
     228             : /*
     229             :  * Branching function to split up a function into a set of functions that
     230             :  * are called for different scalar sizes of the objects handled.
     231             :  */
     232             : 
     233             : extern void __bad_size_call_parameter(void);
     234           1 : 
     235             : #define __pcpu_size_call_return(stem, variable)                         \
     236             : ({      typeof(variable) pscr_ret__;                                    \
     237             :         switch(sizeof(variable)) {                                      \
     238             :         case 1: pscr_ret__ = stem##1(variable);break;                   \
     239             :         case 2: pscr_ret__ = stem##2(variable);break;                   \
     240             :         case 4: pscr_ret__ = stem##4(variable);break;                   \
     241             :         case 8: pscr_ret__ = stem##8(variable);break;                   \
     242             :         default:                                                        \
     243             :                 __bad_size_call_parameter();break;                      \
     244             :         }                                                               \
     245             :         pscr_ret__;                                                     \
     246             : })
     247             : 
     248             : #define __pcpu_size_call(stem, variable, ...)                           \
     249             : do {                                                                    \
     250             :         switch(sizeof(variable)) {                                      \
     251             :                 case 1: stem##1(variable, __VA_ARGS__);break;           \
     252             :                 case 2: stem##2(variable, __VA_ARGS__);break;           \
     253             :                 case 4: stem##4(variable, __VA_ARGS__);break;           \
     254             :                 case 8: stem##8(variable, __VA_ARGS__);break;           \
     255             :                 default:                                                \
     256             :                         __bad_size_call_parameter();break;              \
     257             :         }                                                               \
     258             : } while (0)
     259             : 
     260             : /*
     261             :  * Optimized manipulation for memory allocated through the per cpu
     262             :  * allocator or for addresses of per cpu variables (can be determined
     263             :  * using per_cpu_var(xx).
     264             :  *
     265             :  * These operation guarantee exclusivity of access for other operations
     266             :  * on the *same* processor. The assumption is that per cpu data is only
     267             :  * accessed by a single processor instance (the current one).
     268             :  *
     269             :  * The first group is used for accesses that must be done in a
     270             :  * preemption safe way since we know that the context is not preempt
     271             :  * safe. Interrupts may occur. If the interrupt modifies the variable
     272             :  * too then RMW actions will not be reliable.
     273             :  *
     274             :  * The arch code can provide optimized functions in two ways:
     275             :  *
     276             :  * 1. Override the function completely. F.e. define this_cpu_add().
     277             :  *    The arch must then ensure that the various scalar format passed
     278             :  *    are handled correctly.
     279             :  *
     280             :  * 2. Provide functions for certain scalar sizes. F.e. provide
     281             :  *    this_cpu_add_2() to provide per cpu atomic operations for 2 byte
     282             :  *    sized RMW actions. If arch code does not provide operations for
     283             :  *    a scalar size then the fallback in the generic code will be
     284             :  *    used.
     285             :  */
     286             : 
     287             : #define _this_cpu_generic_read(pcp)                                     \
     288             : ({      typeof(pcp) ret__;                                              \
     289             :         preempt_disable();                                              \
     290             :         ret__ = *this_cpu_ptr(&(pcp));                                      \
     291             :         preempt_enable();                                               \
     292             :         ret__;                                                          \
     293             : })
     294             : 
     295             : #ifndef this_cpu_read
     296             : # ifndef this_cpu_read_1
     297             : #  define this_cpu_read_1(pcp)  _this_cpu_generic_read(pcp)
     298             : # endif
     299             : # ifndef this_cpu_read_2
     300             : #  define this_cpu_read_2(pcp)  _this_cpu_generic_read(pcp)
     301             : # endif
     302             : # ifndef this_cpu_read_4
     303             : #  define this_cpu_read_4(pcp)  _this_cpu_generic_read(pcp)
     304             : # endif
     305             : # ifndef this_cpu_read_8
     306             : #  define this_cpu_read_8(pcp)  _this_cpu_generic_read(pcp)
     307             : # endif
     308             : # define this_cpu_read(pcp)     __pcpu_size_call_return(this_cpu_read_, (pcp))
     309             : #endif
     310             : 
     311             : #define _this_cpu_generic_to_op(pcp, val, op)                           \
     312             : do {                                                                    \
     313             :         preempt_disable();                                              \
     314             :         *__this_cpu_ptr(&pcp) op val;                                       \
     315             :         preempt_enable();                                               \
     316             : } while (0)
     317             : 
     318             : #ifndef this_cpu_write
     319             : # ifndef this_cpu_write_1
     320             : #  define this_cpu_write_1(pcp, val)    _this_cpu_generic_to_op((pcp), (val), =)
     321             : # endif
     322             : # ifndef this_cpu_write_2
     323             : #  define this_cpu_write_2(pcp, val)    _this_cpu_generic_to_op((pcp), (val), =)
     324             : # endif
     325             : # ifndef this_cpu_write_4
     326             : #  define this_cpu_write_4(pcp, val)    _this_cpu_generic_to_op((pcp), (val), =)
     327             : # endif
     328             : # ifndef this_cpu_write_8
     329             : #  define this_cpu_write_8(pcp, val)    _this_cpu_generic_to_op((pcp), (val), =)
     330             : # endif
     331             : # define this_cpu_write(pcp, val)       __pcpu_size_call(this_cpu_write_, (pcp), (val))
     332             : #endif
     333             : 
     334             : #ifndef this_cpu_add
     335             : # ifndef this_cpu_add_1
     336             : #  define this_cpu_add_1(pcp, val)      _this_cpu_generic_to_op((pcp), (val), +=)
     337             : # endif
     338             : # ifndef this_cpu_add_2
     339             : #  define this_cpu_add_2(pcp, val)      _this_cpu_generic_to_op((pcp), (val), +=)
     340             : # endif
     341             : # ifndef this_cpu_add_4
     342             : #  define this_cpu_add_4(pcp, val)      _this_cpu_generic_to_op((pcp), (val), +=)
     343             : # endif
     344             : # ifndef this_cpu_add_8
     345             : #  define this_cpu_add_8(pcp, val)      _this_cpu_generic_to_op((pcp), (val), +=)
     346             : # endif
     347             : # define this_cpu_add(pcp, val)         __pcpu_size_call(this_cpu_add_, (pcp), (val))
     348             : #endif
     349             : 
     350             : #ifndef this_cpu_sub
     351             : # define this_cpu_sub(pcp, val)         this_cpu_add((pcp), -(val))
     352             : #endif
     353             : 
     354             : #ifndef this_cpu_inc
     355             : # define this_cpu_inc(pcp)              this_cpu_add((pcp), 1)
     356             : #endif
     357             : 
     358             : #ifndef this_cpu_dec
     359             : # define this_cpu_dec(pcp)              this_cpu_sub((pcp), 1)
     360             : #endif
     361             : 
     362             : #ifndef this_cpu_and
     363             : # ifndef this_cpu_and_1
     364             : #  define this_cpu_and_1(pcp, val)      _this_cpu_generic_to_op((pcp), (val), &=)
     365             : # endif
     366             : # ifndef this_cpu_and_2
     367             : #  define this_cpu_and_2(pcp, val)      _this_cpu_generic_to_op((pcp), (val), &=)
     368             : # endif
     369             : # ifndef this_cpu_and_4
     370             : #  define this_cpu_and_4(pcp, val)      _this_cpu_generic_to_op((pcp), (val), &=)
     371             : # endif
     372             : # ifndef this_cpu_and_8
     373             : #  define this_cpu_and_8(pcp, val)      _this_cpu_generic_to_op((pcp), (val), &=)
     374             : # endif
     375             : # define this_cpu_and(pcp, val)         __pcpu_size_call(this_cpu_and_, (pcp), (val))
     376             : #endif
     377             : 
     378             : #ifndef this_cpu_or
     379             : # ifndef this_cpu_or_1
     380             : #  define this_cpu_or_1(pcp, val)       _this_cpu_generic_to_op((pcp), (val), |=)
     381             : # endif
     382             : # ifndef this_cpu_or_2
     383             : #  define this_cpu_or_2(pcp, val)       _this_cpu_generic_to_op((pcp), (val), |=)
     384             : # endif
     385             : # ifndef this_cpu_or_4
     386             : #  define this_cpu_or_4(pcp, val)       _this_cpu_generic_to_op((pcp), (val), |=)
     387             : # endif
     388             : # ifndef this_cpu_or_8
     389             : #  define this_cpu_or_8(pcp, val)       _this_cpu_generic_to_op((pcp), (val), |=)
     390             : # endif
     391             : # define this_cpu_or(pcp, val)          __pcpu_size_call(this_cpu_or_, (pcp), (val))
     392             : #endif
     393             : 
     394             : #ifndef this_cpu_xor
     395             : # ifndef this_cpu_xor_1
     396             : #  define this_cpu_xor_1(pcp, val)      _this_cpu_generic_to_op((pcp), (val), ^=)
     397             : # endif
     398             : # ifndef this_cpu_xor_2
     399             : #  define this_cpu_xor_2(pcp, val)      _this_cpu_generic_to_op((pcp), (val), ^=)
     400             : # endif
     401             : # ifndef this_cpu_xor_4
     402             : #  define this_cpu_xor_4(pcp, val)      _this_cpu_generic_to_op((pcp), (val), ^=)
     403             : # endif
     404             : # ifndef this_cpu_xor_8
     405             : #  define this_cpu_xor_8(pcp, val)      _this_cpu_generic_to_op((pcp), (val), ^=)
     406             : # endif
     407             : # define this_cpu_xor(pcp, val)         __pcpu_size_call(this_cpu_or_, (pcp), (val))
     408             : #endif
     409             : 
     410             : /*
     411             :  * Generic percpu operations that do not require preemption handling.
     412             :  * Either we do not care about races or the caller has the
     413             :  * responsibility of handling preemptions issues. Arch code can still
     414             :  * override these instructions since the arch per cpu code may be more
     415             :  * efficient and may actually get race freeness for free (that is the
     416             :  * case for x86 for example).
     417             :  *
     418             :  * If there is no other protection through preempt disable and/or
     419             :  * disabling interupts then one of these RMW operations can show unexpected
     420             :  * behavior because the execution thread was rescheduled on another processor
     421             :  * or an interrupt occurred and the same percpu variable was modified from
     422             :  * the interrupt context.
     423             :  */
     424             : #ifndef __this_cpu_read
     425             : # ifndef __this_cpu_read_1
     426             : #  define __this_cpu_read_1(pcp)        (*__this_cpu_ptr(&(pcp)))
     427             : # endif
     428             : # ifndef __this_cpu_read_2
     429             : #  define __this_cpu_read_2(pcp)        (*__this_cpu_ptr(&(pcp)))
     430             : # endif
     431             : # ifndef __this_cpu_read_4
     432             : #  define __this_cpu_read_4(pcp)        (*__this_cpu_ptr(&(pcp)))
     433             : # endif
     434             : # ifndef __this_cpu_read_8
     435             : #  define __this_cpu_read_8(pcp)        (*__this_cpu_ptr(&(pcp)))
     436             : # endif
     437             : # define __this_cpu_read(pcp)   __pcpu_size_call_return(__this_cpu_read_, (pcp))
     438             : #endif
     439             : 
     440             : #define __this_cpu_generic_to_op(pcp, val, op)                          \
     441             : do {                                                                    \
     442             :         *__this_cpu_ptr(&(pcp)) op val;                                     \
     443             : } while (0)
     444             : 
     445             : #ifndef __this_cpu_write
     446             : # ifndef __this_cpu_write_1
     447             : #  define __this_cpu_write_1(pcp, val)  __this_cpu_generic_to_op((pcp), (val), =)
     448             : # endif
     449             : # ifndef __this_cpu_write_2
     450             : #  define __this_cpu_write_2(pcp, val)  __this_cpu_generic_to_op((pcp), (val), =)
     451             : # endif
     452             : # ifndef __this_cpu_write_4
     453             : #  define __this_cpu_write_4(pcp, val)  __this_cpu_generic_to_op((pcp), (val), =)
     454             : # endif
     455             : # ifndef __this_cpu_write_8
     456             : #  define __this_cpu_write_8(pcp, val)  __this_cpu_generic_to_op((pcp), (val), =)
     457             : # endif
     458             : # define __this_cpu_write(pcp, val)     __pcpu_size_call(__this_cpu_write_, (pcp), (val))
     459             : #endif
     460             : 
     461             : #ifndef __this_cpu_add
     462             : # ifndef __this_cpu_add_1
     463             : #  define __this_cpu_add_1(pcp, val)    __this_cpu_generic_to_op((pcp), (val), +=)
     464             : # endif
     465             : # ifndef __this_cpu_add_2
     466             : #  define __this_cpu_add_2(pcp, val)    __this_cpu_generic_to_op((pcp), (val), +=)
     467             : # endif
     468             : # ifndef __this_cpu_add_4
     469             : #  define __this_cpu_add_4(pcp, val)    __this_cpu_generic_to_op((pcp), (val), +=)
     470             : # endif
     471             : # ifndef __this_cpu_add_8
     472             : #  define __this_cpu_add_8(pcp, val)    __this_cpu_generic_to_op((pcp), (val), +=)
     473             : # endif
     474             : # define __this_cpu_add(pcp, val)       __pcpu_size_call(__this_cpu_add_, (pcp), (val))
     475             : #endif
     476             : 
     477             : #ifndef __this_cpu_sub
     478             : # define __this_cpu_sub(pcp, val)       __this_cpu_add((pcp), -(val))
     479             : #endif
     480             : 
     481             : #ifndef __this_cpu_inc
     482             : # define __this_cpu_inc(pcp)            __this_cpu_add((pcp), 1)
     483             : #endif
     484             : 
     485             : #ifndef __this_cpu_dec
     486             : # define __this_cpu_dec(pcp)            __this_cpu_sub((pcp), 1)
     487             : #endif
     488             : 
     489             : #ifndef __this_cpu_and
     490             : # ifndef __this_cpu_and_1
     491             : #  define __this_cpu_and_1(pcp, val)    __this_cpu_generic_to_op((pcp), (val), &=)
     492             : # endif
     493             : # ifndef __this_cpu_and_2
     494             : #  define __this_cpu_and_2(pcp, val)    __this_cpu_generic_to_op((pcp), (val), &=)
     495             : # endif
     496             : # ifndef __this_cpu_and_4
     497             : #  define __this_cpu_and_4(pcp, val)    __this_cpu_generic_to_op((pcp), (val), &=)
     498             : # endif
     499             : # ifndef __this_cpu_and_8
     500             : #  define __this_cpu_and_8(pcp, val)    __this_cpu_generic_to_op((pcp), (val), &=)
     501             : # endif
     502             : # define __this_cpu_and(pcp, val)       __pcpu_size_call(__this_cpu_and_, (pcp), (val))
     503             : #endif
     504             : 
     505             : #ifndef __this_cpu_or
     506             : # ifndef __this_cpu_or_1
     507             : #  define __this_cpu_or_1(pcp, val)     __this_cpu_generic_to_op((pcp), (val), |=)
     508             : # endif
     509             : # ifndef __this_cpu_or_2
     510             : #  define __this_cpu_or_2(pcp, val)     __this_cpu_generic_to_op((pcp), (val), |=)
     511             : # endif
     512             : # ifndef __this_cpu_or_4
     513             : #  define __this_cpu_or_4(pcp, val)     __this_cpu_generic_to_op((pcp), (val), |=)
     514             : # endif
     515             : # ifndef __this_cpu_or_8
     516             : #  define __this_cpu_or_8(pcp, val)     __this_cpu_generic_to_op((pcp), (val), |=)
     517             : # endif
     518             : # define __this_cpu_or(pcp, val)        __pcpu_size_call(__this_cpu_or_, (pcp), (val))
     519             : #endif
     520             : 
     521             : #ifndef __this_cpu_xor
     522             : # ifndef __this_cpu_xor_1
     523             : #  define __this_cpu_xor_1(pcp, val)    __this_cpu_generic_to_op((pcp), (val), ^=)
     524             : # endif
     525             : # ifndef __this_cpu_xor_2
     526             : #  define __this_cpu_xor_2(pcp, val)    __this_cpu_generic_to_op((pcp), (val), ^=)
     527             : # endif
     528             : # ifndef __this_cpu_xor_4
     529             : #  define __this_cpu_xor_4(pcp, val)    __this_cpu_generic_to_op((pcp), (val), ^=)
     530             : # endif
     531             : # ifndef __this_cpu_xor_8
     532             : #  define __this_cpu_xor_8(pcp, val)    __this_cpu_generic_to_op((pcp), (val), ^=)
     533             : # endif
     534             : # define __this_cpu_xor(pcp, val)       __pcpu_size_call(__this_cpu_xor_, (pcp), (val))
     535             : #endif
     536             : 
     537             : /*
     538             :  * IRQ safe versions of the per cpu RMW operations. Note that these operations
     539             :  * are *not* safe against modification of the same variable from another
     540             :  * processors (which one gets when using regular atomic operations)
     541             :  . They are guaranteed to be atomic vs. local interrupts and
     542             :  * preemption only.
     543             :  */
     544             : #define irqsafe_cpu_generic_to_op(pcp, val, op)                         \
     545             : do {                                                                    \
     546             :         unsigned long flags;                                            \
     547             :         local_irq_save(flags);                                          \
     548             :         *__this_cpu_ptr(&(pcp)) op val;                                     \
     549             :         local_irq_restore(flags);                                       \
     550             : } while (0)
     551             : 
     552             : #ifndef irqsafe_cpu_add
     553             : # ifndef irqsafe_cpu_add_1
     554             : #  define irqsafe_cpu_add_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
     555             : # endif
     556             : # ifndef irqsafe_cpu_add_2
     557             : #  define irqsafe_cpu_add_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
     558             : # endif
     559             : # ifndef irqsafe_cpu_add_4
     560             : #  define irqsafe_cpu_add_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
     561             : # endif
     562             : # ifndef irqsafe_cpu_add_8
     563             : #  define irqsafe_cpu_add_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), +=)
     564             : # endif
     565             : # define irqsafe_cpu_add(pcp, val) __pcpu_size_call(irqsafe_cpu_add_, (pcp), (val))
     566             : #endif
     567             : 
     568             : #ifndef irqsafe_cpu_sub
     569             : # define irqsafe_cpu_sub(pcp, val)      irqsafe_cpu_add((pcp), -(val))
     570             : #endif
     571             : 
     572             : #ifndef irqsafe_cpu_inc
     573             : # define irqsafe_cpu_inc(pcp)   irqsafe_cpu_add((pcp), 1)
     574             : #endif
     575             : 
     576             : #ifndef irqsafe_cpu_dec
     577             : # define irqsafe_cpu_dec(pcp)   irqsafe_cpu_sub((pcp), 1)
     578             : #endif
     579             : 
     580             : #ifndef irqsafe_cpu_and
     581             : # ifndef irqsafe_cpu_and_1
     582             : #  define irqsafe_cpu_and_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
     583             : # endif
     584             : # ifndef irqsafe_cpu_and_2
     585             : #  define irqsafe_cpu_and_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
     586             : # endif
     587             : # ifndef irqsafe_cpu_and_4
     588             : #  define irqsafe_cpu_and_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
     589             : # endif
     590             : # ifndef irqsafe_cpu_and_8
     591             : #  define irqsafe_cpu_and_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), &=)
     592             : # endif
     593             : # define irqsafe_cpu_and(pcp, val) __pcpu_size_call(irqsafe_cpu_and_, (val))
     594             : #endif
     595             : 
     596             : #ifndef irqsafe_cpu_or
     597             : # ifndef irqsafe_cpu_or_1
     598             : #  define irqsafe_cpu_or_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
     599             : # endif
     600             : # ifndef irqsafe_cpu_or_2
     601             : #  define irqsafe_cpu_or_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
     602             : # endif
     603             : # ifndef irqsafe_cpu_or_4
     604             : #  define irqsafe_cpu_or_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
     605             : # endif
     606             : # ifndef irqsafe_cpu_or_8
     607             : #  define irqsafe_cpu_or_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), |=)
     608             : # endif
     609             : # define irqsafe_cpu_or(pcp, val) __pcpu_size_call(irqsafe_cpu_or_, (val))
     610             : #endif
     611             : 
     612             : #ifndef irqsafe_cpu_xor
     613             : # ifndef irqsafe_cpu_xor_1
     614             : #  define irqsafe_cpu_xor_1(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
     615             : # endif
     616             : # ifndef irqsafe_cpu_xor_2
     617             : #  define irqsafe_cpu_xor_2(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
     618             : # endif
     619             : # ifndef irqsafe_cpu_xor_4
     620             : #  define irqsafe_cpu_xor_4(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
     621             : # endif
     622             : # ifndef irqsafe_cpu_xor_8
     623             : #  define irqsafe_cpu_xor_8(pcp, val) irqsafe_cpu_generic_to_op((pcp), (val), ^=)
     624             : # endif
     625             : # define irqsafe_cpu_xor(pcp, val) __pcpu_size_call(irqsafe_cpu_xor_, (val))
     626             : #endif
     627             : 
     628             : #endif /* __LINUX_PERCPU_H */

Generated by: LCOV version 1.10