LCOV - code coverage report
Current view: top level - include/linux - mm_types.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 6 6 100.0 %
Date: 2017-01-25 Functions: 0 0 -

          Line data    Source code
       1             : #ifndef _LINUX_MM_TYPES_H
       2             : #define _LINUX_MM_TYPES_H
       3             : 
       4             : #include <linux/auxvec.h>
       5             : #include <linux/types.h>
       6             : #include <linux/threads.h>
       7             : #include <linux/list.h>
       8             : #include <linux/spinlock.h>
       9             : #include <linux/prio_tree.h>
      10             : #include <linux/rbtree.h>
      11             : #include <linux/rwsem.h>
      12             : #include <linux/completion.h>
      13             : #include <linux/cpumask.h>
      14             : #include <linux/page-debug-flags.h>
      15             : #include <asm/page.h>
      16             : #include <asm/mmu.h>
      17             : 
      18             : #ifndef AT_VECTOR_SIZE_ARCH
      19             : #define AT_VECTOR_SIZE_ARCH 0
      20             : #endif
      21             : #define AT_VECTOR_SIZE (2*(AT_VECTOR_SIZE_ARCH + AT_VECTOR_SIZE_BASE + 1))
      22             : 
      23             : struct address_space;
      24             : 
      25             : #define USE_SPLIT_PTLOCKS       (NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS)
      26             : 
      27             : #if USE_SPLIT_PTLOCKS
      28           1 : typedef atomic_long_t mm_counter_t;
      29           6 : #else  /* !USE_SPLIT_PTLOCKS */
      30             : typedef unsigned long mm_counter_t;
      31             : #endif /* !USE_SPLIT_PTLOCKS */
      32             : 
      33             : /*
      34             :  * Each physical page in the system has a struct page associated with
      35             :  * it to keep track of whatever it is we are using the page for at the
      36             :  * moment. Note that we have no way to track which tasks are using
      37             :  * a page, though if it is a pagecache page, rmap structures can tell us
      38             :  * who is mapping it.
      39             :  */
      40             : struct page {
      41             :         unsigned long flags;            /* Atomic flags, some possibly
      42             :                                          * updated asynchronously */
      43             :         atomic_t _count;                /* Usage count, see below. */
      44             :         union {
      45             :                 atomic_t _mapcount;     /* Count of ptes mapped in mms,
      46             :                                          * to show when page is mapped
      47             :                                          * & limit reverse map searches.
      48             :                                          */
      49             :                 struct {                /* SLUB */
      50             :                         u16 inuse;
      51             :                         u16 objects;
      52             :                 };
      53             :         };
      54             :         union {
      55             :             struct {
      56             :                 unsigned long private;          /* Mapping-private opaque data:
      57             :                                                  * usually used for buffer_heads
      58             :                                                  * if PagePrivate set; used for
      59             :                                                  * swp_entry_t if PageSwapCache;
      60             :                                                  * indicates order in the buddy
      61             :                                                  * system if PG_buddy is set.
      62             :                                                  */
      63             :                 struct address_space *mapping;  /* If low bit clear, points to
      64             :                                                  * inode address_space, or NULL.
      65             :                                                  * If page mapped as anonymous
      66             :                                                  * memory, low bit is set, and
      67             :                                                  * it points to anon_vma object:
      68             :                                                  * see PAGE_MAPPING_ANON below.
      69             :                                                  */
      70             :             };
      71             : #if USE_SPLIT_PTLOCKS
      72             :             spinlock_t ptl;
      73             : #endif
      74             :             struct kmem_cache *slab;    /* SLUB: Pointer to slab */
      75             :             struct page *first_page;    /* Compound tail pages */
      76             :         };
      77             :         union {
      78             :                 pgoff_t index;          /* Our offset within mapping. */
      79             :                 void *freelist;         /* SLUB: freelist req. slab lock */
      80             :         };
      81             :         struct list_head lru;           /* Pageout list, eg. active_list
      82             :                                          * protected by zone->lru_lock !
      83             :                                          */
      84             :         /*
      85             :          * On machines where all RAM is mapped into kernel address space,
      86             :          * we can simply calculate the virtual address. On machines with
      87             :          * highmem some memory is mapped into kernel virtual memory
      88             :          * dynamically, so we need a place to store that address.
      89             :          * Note that this field could be 16 bits on x86 ... ;)
      90             :          *
      91             :          * Architectures with slow multiplication can define
      92             :          * WANT_PAGE_VIRTUAL in asm/page.h
      93             :          */
      94             : #if defined(WANT_PAGE_VIRTUAL)
      95             :         void *virtual;                  /* Kernel virtual address (NULL if
      96             :                                            not kmapped, ie. highmem) */
      97             : #endif /* WANT_PAGE_VIRTUAL */
      98             : #ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
      99             :         unsigned long debug_flags;      /* Use atomic bitops on this */
     100             : #endif
     101             : 
     102             : #ifdef CONFIG_KMEMCHECK
     103             :         /*
     104             :          * kmemcheck wants to track the status of each byte in a page; this
     105             :          * is a pointer to such a status block. NULL if not tracked.
     106             :          */
     107             :         void *shadow;
     108             : #endif
     109             : };
     110             : 
     111             : /*
     112             :  * A region containing a mapping of a non-memory backed file under NOMMU
     113             :  * conditions.  These are held in a global tree and are pinned by the VMAs that
     114             :  * map parts of them.
     115             :  */
     116             : struct vm_region {
     117             :         struct rb_node  vm_rb;          /* link in global region tree */
     118             :         unsigned long   vm_flags;       /* VMA vm_flags */
     119             :         unsigned long   vm_start;       /* start address of region */
     120             :         unsigned long   vm_end;         /* region initialised to here */
     121             :         unsigned long   vm_top;         /* region allocated to here */
     122             :         unsigned long   vm_pgoff;       /* the offset in vm_file corresponding to vm_start */
     123             :         struct file     *vm_file;       /* the backing file or NULL */
     124             : 
     125             :         int             vm_usage;       /* region usage count (access under nommu_region_sem) */
     126             :         bool            vm_icache_flushed : 1; /* true if the icache has been flushed for
     127             :                                                 * this region */
     128           6 : };
     129             : 
     130             : /*
     131             :  * This struct defines a memory VMM memory area. There is one of these
     132             :  * per VM-area/task.  A VM area is any part of the process virtual memory
     133             :  * space that has a special rule for the page-fault handlers (ie a shared
     134             :  * library, the executable area etc).
     135             :  */
     136             : struct vm_area_struct {
     137             :         struct mm_struct * vm_mm;       /* The address space we belong to. */
     138             :         unsigned long vm_start;         /* Our start address within vm_mm. */
     139             :         unsigned long vm_end;           /* The first byte after our end address
     140             :                                            within vm_mm. */
     141             : 
     142             :         /* linked list of VM areas per task, sorted by address */
     143             :         struct vm_area_struct *vm_next;
     144             : 
     145             :         pgprot_t vm_page_prot;          /* Access permissions of this VMA. */
     146             :         unsigned long vm_flags;         /* Flags, see mm.h. */
     147             : 
     148             :         struct rb_node vm_rb;
     149             : 
     150             :         /*
     151             :          * For areas with an address space and backing store,
     152             :          * linkage into the address_space->i_mmap prio tree, or
     153             :          * linkage to the list of like vmas hanging off its node, or
     154             :          * linkage of vma in the address_space->i_mmap_nonlinear list.
     155             :          */
     156             :         union {
     157             :                 struct {
     158             :                         struct list_head list;
     159             :                         void *parent;   /* aligns with prio_tree_node parent */
     160             :                         struct vm_area_struct *head;
     161             :                 } vm_set;
     162             : 
     163             :                 struct raw_prio_tree_node prio_tree_node;
     164             :         } shared;
     165             : 
     166             :         /*
     167             :          * A file's MAP_PRIVATE vma can be in both i_mmap tree and anon_vma
     168             :          * list, after a COW of one of the file pages.  A MAP_SHARED vma
     169             :          * can only be in the i_mmap tree.  An anonymous MAP_PRIVATE, stack
     170             :          * or brk vma (with NULL file) can only be in an anon_vma list.
     171             :          */
     172             :         struct list_head anon_vma_node; /* Serialized by anon_vma->lock */
     173             :         struct anon_vma *anon_vma;      /* Serialized by page_table_lock */
     174             : 
     175             :         /* Function pointers to deal with this struct. */
     176             :         const struct vm_operations_struct *vm_ops;
     177             : 
     178             :         /* Information about our backing store: */
     179             :         unsigned long vm_pgoff;         /* Offset (within vm_file) in PAGE_SIZE
     180             :                                            units, *not* PAGE_CACHE_SIZE */
     181             :         struct file * vm_file;          /* File we map to (can be NULL). */
     182             :         void * vm_private_data;         /* was vm_pte (shared mem) */
     183             :         unsigned long vm_truncate_count;/* truncate_count or restart_addr */
     184             : 
     185             : #ifndef CONFIG_MMU
     186             :         struct vm_region *vm_region;    /* NOMMU mapping region */
     187             : #endif
     188             : #ifdef CONFIG_NUMA
     189             :         struct mempolicy *vm_policy;    /* NUMA policy for the VMA */
     190             : #endif
     191           1 : };
     192             : 
     193             : struct core_thread {
     194             :         struct task_struct *task;
     195             :         struct core_thread *next;
     196             : };
     197           1 : 
     198             : struct core_state {
     199             :         atomic_t nr_threads;
     200             :         struct core_thread dumper;
     201             :         struct completion startup;
     202             : };
     203           2 : 
     204             : struct mm_struct {
     205             :         struct vm_area_struct * mmap;           /* list of VMAs */
     206             :         struct rb_root mm_rb;
     207             :         struct vm_area_struct * mmap_cache;     /* last find_vma result */
     208             : #ifdef CONFIG_MMU
     209             :         unsigned long (*get_unmapped_area) (struct file *filp,
     210             :                                 unsigned long addr, unsigned long len,
     211             :                                 unsigned long pgoff, unsigned long flags);
     212             :         void (*unmap_area) (struct mm_struct *mm, unsigned long addr);
     213             : #endif
     214             :         unsigned long mmap_base;                /* base of mmap area */
     215             :         unsigned long task_size;                /* size of task vm space */
     216             :         unsigned long cached_hole_size;         /* if non-zero, the largest hole below free_area_cache */
     217             :         unsigned long free_area_cache;          /* first hole of size cached_hole_size or larger */
     218             :         pgd_t * pgd;
     219             :         atomic_t mm_users;                      /* How many users with user space? */
     220             :         atomic_t mm_count;                      /* How many references to "struct mm_struct" (users count as 1) */
     221             :         int map_count;                          /* number of VMAs */
     222             :         struct rw_semaphore mmap_sem;
     223             :         spinlock_t page_table_lock;             /* Protects page tables and some counters */
     224             : 
     225             :         struct list_head mmlist;                /* List of maybe swapped mm's.  These are globally strung
     226             :                                                  * together off init_mm.mmlist, and are protected
     227             :                                                  * by mmlist_lock
     228             :                                                  */
     229             : 
     230             :         /* Special counters, in some configurations protected by the
     231             :          * page_table_lock, in other configurations by being atomic.
     232             :          */
     233             :         mm_counter_t _file_rss;
     234             :         mm_counter_t _anon_rss;
     235             : 
     236             :         unsigned long hiwater_rss;      /* High-watermark of RSS usage */
     237             :         unsigned long hiwater_vm;       /* High-water virtual memory usage */
     238             : 
     239             :         unsigned long total_vm, locked_vm, shared_vm, exec_vm;
     240             :         unsigned long stack_vm, reserved_vm, def_flags, nr_ptes;
     241             :         unsigned long start_code, end_code, start_data, end_data;
     242             :         unsigned long start_brk, brk, start_stack;
     243             :         unsigned long arg_start, arg_end, env_start, env_end;
     244             : 
     245             :         unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
     246             : 
     247             :         struct linux_binfmt *binfmt;
     248             : 
     249             :         cpumask_t cpu_vm_mask;
     250             : 
     251             :         /* Architecture-specific MM context */
     252             :         mm_context_t context;
     253             : 
     254             :         /* Swap token stuff */
     255             :         /*
     256             :          * Last value of global fault stamp as seen by this process.
     257             :          * In other words, this value gives an indication of how long
     258             :          * it has been since this task got the token.
     259             :          * Look at mm/thrash.c
     260             :          */
     261             :         unsigned int faultstamp;
     262             :         unsigned int token_priority;
     263             :         unsigned int last_interval;
     264             : 
     265             :         unsigned long flags; /* Must use atomic bitops to access the bits */
     266             : 
     267             :         struct core_state *core_state; /* coredumping support */
     268             : #ifdef CONFIG_AIO
     269             :         spinlock_t              ioctx_lock;
     270             :         struct hlist_head       ioctx_list;
     271             : #endif
     272             : #ifdef CONFIG_MM_OWNER
     273             :         /*
     274             :          * "owner" points to a task that is regarded as the canonical
     275             :          * user/owner of this mm. All of the following must be true in
     276             :          * order for it to be changed:
     277             :          *
     278             :          * current == mm->owner
     279             :          * current->mm != mm
     280             :          * new_owner->mm == mm
     281             :          * new_owner->alloc_lock is held
     282             :          */
     283             :         struct task_struct *owner;
     284             : #endif
     285             : 
     286             : #ifdef CONFIG_PROC_FS
     287             :         /* store ref to file /proc/<pid>/exe symlink points to */
     288             :         struct file *exe_file;
     289             :         unsigned long num_exe_file_vmas;
     290             : #endif
     291             : #ifdef CONFIG_MMU_NOTIFIER
     292             :         struct mmu_notifier_mm *mmu_notifier_mm;
     293             : #endif
     294             : };
     295             : 
     296             : /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
     297             : #define mm_cpumask(mm) (&(mm)->cpu_vm_mask)
     298             : 
     299             : #endif /* _LINUX_MM_TYPES_H */

Generated by: LCOV version 1.10